Compare commits

..

1 Commits

Author SHA1 Message Date
Gitea Actions Bot
cdbc4f9e2b Auto-update README with current k8s applications
All checks were successful
Terraform / Terraform (pull_request) Successful in 44s
Generated by CI/CD workflow on 2025-11-07 14:35:22

This PR updates the README.md file with the current list of applications found in the k8s/ directory structure.
2025-11-07 14:35:22 +00:00
23 changed files with 190 additions and 870 deletions

View File

@@ -40,7 +40,6 @@ ArgoCD homelab project
| **greece-notifier** | [![greece-notifier](https://ag.hexor.cy/api/badge?name=greece-notifier&revision=true)](https://ag.hexor.cy/applications/argocd/greece-notifier) |
| **hexound** | [![hexound](https://ag.hexor.cy/api/badge?name=hexound&revision=true)](https://ag.hexor.cy/applications/argocd/hexound) |
| **immich** | [![immich](https://ag.hexor.cy/api/badge?name=immich&revision=true)](https://ag.hexor.cy/applications/argocd/immich) |
| **iperf3** | [![iperf3](https://ag.hexor.cy/api/badge?name=iperf3&revision=true)](https://ag.hexor.cy/applications/argocd/iperf3) |
| **jellyfin** | [![jellyfin](https://ag.hexor.cy/api/badge?name=jellyfin&revision=true)](https://ag.hexor.cy/applications/argocd/jellyfin) |
| **k8s-secrets** | [![k8s-secrets](https://ag.hexor.cy/api/badge?name=k8s-secrets&revision=true)](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
| **khm** | [![khm](https://ag.hexor.cy/api/badge?name=khm&revision=true)](https://ag.hexor.cy/applications/argocd/khm) |

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: iperf3
namespace: argocd
spec:
project: apps
destination:
namespace: iperf3
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/iperf3
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,92 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: iperf3-server
spec:
selector:
matchLabels:
app: iperf3-server
template:
metadata:
labels:
app: iperf3-server
spec:
serviceAccountName: iperf3-server
subdomain: iperf3
initContainers:
- name: create-service
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- /bin/bash
- -c
- |
# Clean node name for service name
NODE_CLEAN=$(echo "$NODE_NAME" | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]' | tr '_' '-')
SERVICE_NAME="iperf3-${NODE_CLEAN}"
# Create service for this pod
kubectl apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-node-service
target-node: "${NODE_NAME}"
spec:
type: ClusterIP
ports:
- name: iperf3
port: 5201
protocol: TCP
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-node-service
target-node: "${NODE_NAME}"
subsets:
- addresses:
- ip: ${POD_IP}
ports:
- name: iperf3
port: 5201
protocol: TCP
EOF
containers:
- name: iperf3-server
image: networkstatic/iperf3:latest
args: ["-s"]
ports:
- containerPort: 5201
protocol: TCP
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists

View File

@@ -1,92 +0,0 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: iperf3-exporter
labels:
app: iperf3-exporter
spec:
selector:
matchLabels:
app: iperf3-exporter
template:
metadata:
labels:
app: iperf3-exporter
spec:
serviceAccountName: iperf3-server
initContainers:
- name: create-exporter-service
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- /bin/bash
- -c
- |
NODE_CLEAN=$(echo "$NODE_NAME" | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]' | tr '_' '-')
SERVICE_NAME="iperf3-exporter-${NODE_CLEAN}"
kubectl apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-exporter-service
target-node: "${NODE_NAME}"
spec:
type: ClusterIP
ports:
- name: metrics
port: 9579
protocol: TCP
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-exporter-service
target-node: "${NODE_NAME}"
subsets:
- addresses:
- ip: ${POD_IP}
ports:
- name: metrics
port: 9579
protocol: TCP
EOF
containers:
- name: iperf3-exporter
image: ghcr.io/edgard/iperf3_exporter:1.2.2
ports:
- containerPort: 9579
name: metrics
protocol: TCP
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "200m"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: iperf3-exporter
labels:
app: iperf3-exporter
spec:
selector:
app: iperf3-exporter
ports:
- name: metrics
protocol: TCP
port: 9579
targetPort: 9579

View File

@@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- rbac.yaml
- daemonset.yaml
- service-headless.yaml
- iperf3-exporter-daemonset.yaml
- iperf3-exporter-service.yaml
- servicemonitor.yaml

View File

@@ -1,36 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: iperf3-server
namespace: iperf3
labels:
app: iperf3-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: iperf3-service-manager
namespace: iperf3
labels:
app: iperf3-server
rules:
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: iperf3-service-manager
namespace: iperf3
labels:
app: iperf3-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: iperf3-service-manager
subjects:
- kind: ServiceAccount
name: iperf3-server
namespace: iperf3

View File

@@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: iperf3
spec:
clusterIP: None
selector:
app: iperf3-server
ports:
- name: iperf3
protocol: TCP
port: 5201
targetPort: 5201

View File

@@ -1,122 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: iperf3-exporter
labels:
app: iperf3-exporter
release: prometheus
spec:
selector:
matchLabels:
app: iperf3-exporter
endpoints:
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-ch.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: ch.tail2fe2d.ts.net
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-us.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: us.tail2fe2d.ts.net
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-iris.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iris.khv
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-home.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: home.homenet
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-master.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: de.hexor.cy
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-it.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: it.hexor.cy
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-nas.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: nas.homenet
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-spb.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: spb.tail2fe2d.ts.net
metricRelabelings:
- sourceLabels: [__name__]
regex: iperf3_(.+)
targetLabel: __name__
replacement: network_${1}

View File

@@ -1,33 +0,0 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: oidc-secret
spec:
target:
name: oidc-secret
deletionPolicy: Delete
template:
type: Opaque
data:
OAUTH_CLIENT_SECRET: |-
{{ .OAUTH_CLIENT_SECRET }}
OAUTH_CLIENT_ID: |-
{{ .OAUTH_CLIENT_ID }}
data:
- secretKey: OAUTH_CLIENT_SECRET
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
property: fields[0].value
- secretKey: OAUTH_CLIENT_ID
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
property: fields[1].value

View File

@@ -1,9 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- external-secrets.yaml
helmCharts:
- name: ollama
repo: https://otwld.github.io/ollama-helm/
@@ -12,10 +9,3 @@ helmCharts:
namespace: ollama
valuesFile: ollama-values.yaml
includeCRDs: true
- name: open-webui
repo: https://helm.openwebui.com/
version: 8.14.0
releaseName: openweb-ui
namespace: ollama
valuesFile: openweb-ui-values.yaml
includeCRDs: true

View File

@@ -1,52 +0,0 @@
clusterDomain: ai.hexor.cy
extraEnvVars:
GLOBAL_LOG_LEVEL: debug
OAUTH_PROVIDER_NAME: authentik
OPENID_PROVIDER_URL: https://idm.hexor.cy/application/o/openwebui/.well-known/openid-configuration
OPENID_REDIRECT_URI: https://ai.hexor.cy/oauth/oidc/callback
WEBUI_URL: https://ai.hexor.cy
# Allows auto-creation of new users using OAuth. Must be paired with ENABLE_LOGIN_FORM=false.
ENABLE_OAUTH_SIGNUP: true
# Disables user/password login form. Required when ENABLE_OAUTH_SIGNUP=true.
ENABLE_LOGIN_FORM: false
OAUTH_MERGE_ACCOUNTS_BY_EMAIL: true
extraEnvFrom:
- secretRef:
name: oidc-secret
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
ollamaUrls:
- http://ollama.ollama.svc:11434
ollama:
enabled: false
ollama:
gpu:
enabled: false
models:
pull:
- qwen3-vl:8b
run:
- qwen3-vl:8b
pipelines:
enabled: true
tika:
enabled: true
websocket:
enabled: true
ingress:
enabled: true
class: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
host: "ai.hexor.cy"
tls:
- hosts:
- '*.hexor.cy'
secretName: ollama-tls

View File

@@ -1,264 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: pasarguard-scripts
labels:
app: pasarguard-node
data:
init-uuid.sh: |
#!/bin/bash
set -e
echo "Started"
# NODE_NAME is already set via environment variable
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Get DNS name from node label xray-node-address
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
if [ -z "${DNS_NAME}" ]; then
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
exit 1
fi
echo "Node: ${NODE_NAME}"
echo "DNS Name from label: ${DNS_NAME}"
# Use DNS name for ConfigMap name to ensure uniqueness
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
# Check if ConfigMap exists and get UUID
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "ConfigMap exists, reading UUID..."
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
if [ -z "${API_KEY}" ]; then
echo "UUID not found in ConfigMap, generating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
else
echo "Using existing UUID from ConfigMap"
fi
else
echo "ConfigMap does not exist, creating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
--from-literal=API_KEY="${API_KEY}" \
--from-literal=NODE_NAME="${NODE_NAME}"
fi
# Save UUID and node info to shared volume for the main container
echo -n "${API_KEY}" > /shared/api-key
echo -n "${NODE_NAME}" > /shared/node-name
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
echo "UUID initialized: ${API_KEY}"
echo "Node name: ${NODE_NAME}"
echo "ConfigMap: ${CONFIGMAP_NAME}"
# Create Certificate for this node using DNS name from label
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
# Check if Certificate already exists
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate does not exist, creating..."
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${CERT_NAME}
namespace: ${NAMESPACE}
spec:
secretName: ${CERT_NAME}-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- ${DNS_NAME}
EOF
else
echo "Certificate already exists"
fi
# Wait for certificate to be ready
echo "Waiting for certificate to be ready..."
for i in {1..600}; do
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate secret is ready!"
break
fi
echo "Waiting for certificate... ($i/600)"
sleep 1
done
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "WARNING: Certificate secret not ready after 600 seconds"
else
# Extract certificate and key from secret to shared volume
echo "Extracting certificate and key..."
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
echo "Certificate and key extracted successfully."
cat /shared/tls.crt
fi
# Create individual Service and Endpoints for this node
# Take only first part of node name before first dot
NODE_SHORT_NAME="${NODE_NAME%%.*}"
SERVICE_NAME="${NODE_SHORT_NAME}"
# Get node internal IP (take only first IP if multiple)
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
# Create Service without selector
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
spec:
clusterIP: None
ports:
- name: api
port: 62050
protocol: TCP
targetPort: 62050
- name: metrics
port: 9550
protocol: TCP
targetPort: 9550
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
subsets:
- addresses:
- ip: ${NODE_IP}
nodeName: ${NODE_NAME}
ports:
- name: api
port: 62050
protocol: TCP
- name: metrics
port: 9550
protocol: TCP
EOF
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
exporter-start.sh: |
#!/bin/sh
# Install required tools
apk add --no-cache wget curl iproute2-ss bash
# Download v2ray-exporter
echo "Downloading v2ray-exporter..."
ARCH=$(uname -m)
case $ARCH in
x86_64)
BINARY_ARCH="amd64"
;;
aarch64|arm64)
BINARY_ARCH="arm64"
;;
*)
echo "Unsupported architecture: $ARCH"
exit 1
;;
esac
echo "Detected architecture: $ARCH, using binary: v2ray-exporter_linux_$BINARY_ARCH"
wget -L -O /tmp/v2ray-exporter "https://github.com/wi1dcard/v2ray-exporter/releases/download/v0.6.0/v2ray-exporter_linux_$BINARY_ARCH"
mv /tmp/v2ray-exporter /usr/local/bin/v2ray-exporter
chmod +x /usr/local/bin/v2ray-exporter
# Wait for initial API port file
echo "Waiting for initial xray API port file..."
while [ ! -f /shared/xray-api-port ]; do
echo "Waiting for API port file..."
sleep 2
done
# Main loop - restart exporter if it crashes or port changes
while true; do
if [ -f /shared/xray-api-port ]; then
API_PORT=$(cat /shared/xray-api-port)
if [ -n "$API_PORT" ]; then
echo "Starting v2ray-exporter with endpoint 127.0.0.1:$API_PORT"
/usr/local/bin/v2ray-exporter --v2ray-endpoint "127.0.0.1:$API_PORT" --listen ":9550" &
EXPORTER_PID=$!
# Wait for exporter to exit or port file to change
while kill -0 $EXPORTER_PID 2>/dev/null; do
if [ -f /shared/xray-api-port ]; then
NEW_PORT=$(cat /shared/xray-api-port)
if [ "$NEW_PORT" != "$API_PORT" ]; then
echo "API port changed from $API_PORT to $NEW_PORT, restarting exporter"
kill $EXPORTER_PID 2>/dev/null
wait $EXPORTER_PID 2>/dev/null
break
fi
fi
sleep 5
done
echo "Exporter stopped, restarting..."
wait $EXPORTER_PID 2>/dev/null
fi
fi
sleep 2
done
pasarguard-start.sh: |
#!/bin/sh
# Read API_KEY from shared volume created by init container
if [ -f /shared/api-key ]; then
export API_KEY=$(cat /shared/api-key)
echo "Loaded API_KEY from shared volume"
else
echo "WARNING: API_KEY file not found, using default"
fi
cd /app
# Start main process in background
./main &
MAIN_PID=$!
# Start continuous port monitoring in background
{
sleep 10 # Wait for xray to start initially
LAST_PORT=""
while true; do
API_PORT=$(netstat -tlpn | grep xray | grep 127.0.0.1 | awk '{print $4}' | cut -d: -f2 | head -1)
if [ -n "$API_PORT" ] && [ "$API_PORT" != "$LAST_PORT" ]; then
echo "Found xray API port: $API_PORT"
echo -n "$API_PORT" > /shared/xray-api-port
LAST_PORT="$API_PORT"
fi
sleep 5 # Check every 5 seconds
done
} &
PORT_MONITOR_PID=$!
# Wait for main process to finish
wait $MAIN_PID
# Clean up port monitor
kill $PORT_MONITOR_PID 2>/dev/null

View File

@@ -105,58 +105,174 @@ spec:
fieldPath: spec.nodeName
command:
- /bin/bash
- /scripts/init-uuid.sh
- -c
- |
set -e
echo "Started"
# NODE_NAME is already set via environment variable
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Get DNS name from node label xray-node-address
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
if [ -z "${DNS_NAME}" ]; then
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
exit 1
fi
echo "Node: ${NODE_NAME}"
echo "DNS Name from label: ${DNS_NAME}"
# Use DNS name for ConfigMap name to ensure uniqueness
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
# Check if ConfigMap exists and get UUID
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "ConfigMap exists, reading UUID..."
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
if [ -z "${API_KEY}" ]; then
echo "UUID not found in ConfigMap, generating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
else
echo "Using existing UUID from ConfigMap"
fi
else
echo "ConfigMap does not exist, creating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
--from-literal=API_KEY="${API_KEY}" \
--from-literal=NODE_NAME="${NODE_NAME}"
fi
# Save UUID and node info to shared volume for the main container
echo -n "${API_KEY}" > /shared/api-key
echo -n "${NODE_NAME}" > /shared/node-name
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
echo "UUID initialized: ${API_KEY}"
echo "Node name: ${NODE_NAME}"
echo "ConfigMap: ${CONFIGMAP_NAME}"
# Create Certificate for this node using DNS name from label
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
# Check if Certificate already exists
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate does not exist, creating..."
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${CERT_NAME}
namespace: ${NAMESPACE}
spec:
secretName: ${CERT_NAME}-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- ${DNS_NAME}
EOF
else
echo "Certificate already exists"
fi
# Wait for certificate to be ready
echo "Waiting for certificate to be ready..."
for i in {1..600}; do
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate secret is ready!"
break
fi
echo "Waiting for certificate... ($i/600)"
sleep 1
done
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "WARNING: Certificate secret not ready after 600 seconds"
else
# Extract certificate and key from secret to shared volume
echo "Extracting certificate and key..."
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
echo "Certificate and key extracted successfully."
cat /shared/tls.crt
fi
# Create individual Service and Endpoints for this node
# Take only first part of node name before first dot
NODE_SHORT_NAME="${NODE_NAME%%.*}"
SERVICE_NAME="${NODE_SHORT_NAME}"
# Get node internal IP (take only first IP if multiple)
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
# Create Service without selector
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
spec:
clusterIP: None
ports:
- name: api
port: 62050
protocol: TCP
targetPort: 62050
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
subsets:
- addresses:
- ip: ${NODE_IP}
nodeName: ${NODE_NAME}
ports:
- name: api
port: 62050
protocol: TCP
EOF
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
volumeMounts:
- name: shared-data
mountPath: /shared
- name: scripts
mountPath: /scripts
containers:
- name: xray-exporter
image: alpine:3.18
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- /scripts/exporter-start.sh
ports:
- name: metrics
containerPort: 9550
protocol: TCP
livenessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "100m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: true
- name: scripts
mountPath: /scripts
- name: pasarguard-node
image: 'pasarguard/node:v0.1.1'
imagePullPolicy: Always
command:
- /bin/sh
- /scripts/pasarguard-start.sh
- -c
- |
# Read API_KEY from shared volume created by init container
if [ -f /shared/api-key ]; then
export API_KEY=$(cat /shared/api-key)
echo "Loaded API_KEY from shared volume"
else
echo "WARNING: API_KEY file not found, using default"
fi
cd /app
exec ./main
ports:
- name: api
containerPort: 62050
@@ -208,13 +324,7 @@ spec:
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: false
- name: scripts
mountPath: /scripts
readOnly: true
volumes:
- name: shared-data
emptyDir: {}
- name: scripts
configMap:
name: pasarguard-scripts
defaultMode: 0755

View File

@@ -34,7 +34,7 @@ spec:
mountPath: /templates/subscription
containers:
- name: pasarguard-web
image: 'pasarguard/panel:v1.7.2'
image: 'pasarguard/panel:v1.4.1'
imagePullPolicy: Always
envFrom:
- secretRef:

View File

@@ -7,5 +7,5 @@ resources:
- ./deployment.yaml
- ./daemonset.yaml
- ./certificate.yaml
- ./configmap-scripts.yaml
- ./servicemonitor.yaml

View File

@@ -1,21 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: pasarguard-node-metrics
labels:
app: pasarguard-node
release: prometheus
spec:
selector:
matchLabels:
app: pasarguard-node
endpoints:
- port: metrics
path: /scrape
interval: 30s
scrapeTimeout: 10s
honorLabels: true
namespaceSelector:
matchNames:
- pasarguard

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: authentik
repo: https://charts.goauthentik.io
version: 2025.10.1
version: 2025.8.1
releaseName: authentik
namespace: authentik
valuesFile: values.yaml

View File

@@ -1,6 +1,6 @@
global:
image:
tag: "2025.10.1"
tag: "2025.8.1"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
@@ -47,6 +47,7 @@ server:
- minecraft.hexor.cy # Minecraft UI and server
- pass.hexor.cy # k8s-secret for openai
- ps.hexor.cy # pasarguard UI
- ai.hexor.cy # ollama API
tls:
- secretName: idm-tls
hosts:

View File

@@ -8,7 +8,7 @@
# BW_HOST: base64(url)
# BW_USERNAME: base64(name)
# BW_PASSWORD: base64(pass)
# Vaultwarden bot - 81212111-6350-4069-8bcf-19a67d3964a5
# 81212111-6350-4069-8bcf-19a67d3964a5
---
apiVersion: apps/v1
kind: Deployment

View File

@@ -4,16 +4,16 @@ prometheus:
prometheusSpec:
enableRemoteWriteReceiver: true
additionalScrapeConfigs:
- job_name: xray_vpn
metrics_path: /scrape
- job_name: outline_vpn
static_configs:
- targets: ['cy.tail2fe2d.ts.net:9550']
labels: {job: cy}
- targets: ['x86.tail2fe2d.ts.net:9550']
labels: {job: am}
- targets: ['jp.tail2fe2d.ts.net:9550']
labels: {job: jp}
- targets: ['100.117.24.104:9095']
labels: {instance: cy}
- targets: ['100.117.24.104:9096']
labels: {instance: am}
- targets: ['100.117.24.104:9097']
labels: {instance: jp}
- targets: ['100.117.24.104:9098']
labels: {instance: bg}
- job_name: cs_16_server
static_configs:
- targets: ['prom-a2s-exporter.counter-strike.svc:9841']

View File

@@ -167,26 +167,5 @@ oauth_applications = {
create_group = true
signing_key = "1b1b5bec-034a-4d96-871a-133f11322360"
}
"openwebui" = {
name = "OpenWeb UI"
slug = "openwebui"
group = "Tools"
meta_description = "OpenWeb UI"
meta_icon = "https://ollama.com/public/ollama.png"
redirect_uris = [
"https://ai.hexor.cy/oauth/oidc/callback",
]
meta_launch_url = "https://ai.hexor.cy"
client_type = "confidential"
include_claims_in_id_token = true
access_code_validity = "minutes=1"
access_token_validity = "minutes=5"
refresh_token_validity = "days=30"
scope_mappings = ["openid", "profile", "email"]
access_groups = ["admins"]
create_group = true
signing_key = "1b1b5bec-034a-4d96-871a-133f11322360"
}
}

View File

@@ -206,5 +206,19 @@ EOT
create_group = true
access_groups = ["admins"]
}
"ollama" = {
name = "Ollama API"
slug = "ollama"
group = "Tools"
external_host = "https://ai.hexor.cy"
internal_host = "http://ollama.ollama.svc:11434"
internal_host_ssl_validation = false
meta_description = "Ollama API"
meta_icon = "https://ollama.com/public/ollama.png"
mode = "proxy"
outpost = "kubernetes-outpost"
create_group = true
access_groups = ["admins"]
}
}