Compare commits
48 Commits
auto-updat
...
auto-updat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b1ceb1740 | ||
|
|
b433373725 | ||
|
|
3026e53746 | ||
|
|
63669c69ff | ||
|
|
fa98e553cd | ||
|
|
055ef8aa77 | ||
|
|
22b359a7ee | ||
| 611e3e31dd | |||
| ddbd53e476 | |||
| f8a9d91932 | |||
| 262fea115d | |||
|
|
1e1a015dc0 | ||
|
|
e76ebdd8c3 | ||
|
|
0c2ce55a41 | ||
|
|
6e9de5addf | ||
|
|
887a9a2306 | ||
|
|
776109d795 | ||
|
|
c998426b44 | ||
|
|
536be6a61f | ||
|
|
713481c726 | ||
| f6411b7b65 | |||
| 3af6d98be8 | |||
| a45af9d4bc | |||
| 76937930ce | |||
| d4ff8d4665 | |||
| e0cf9371ae | |||
| 1126cb25bc | |||
| 44250dc937 | |||
| d9db73e078 | |||
| 71ce9f15ef | |||
| 6b855294af | |||
| 8dd16e24e6 | |||
| 3df95f46a5 | |||
| c0151eb2c9 | |||
| 6d7e365058 | |||
| 0b5361323a | |||
| 56352fef4b | |||
| 7a1f792391 | |||
| defe0cbdf5 | |||
| 7285c62b37 | |||
| 60f8d86fca | |||
| 2387653edd | |||
| 78a639162b | |||
| 90b197bcbe | |||
|
|
156d26aaf9 | ||
| 700b9cf5ff | |||
| 84bd1fc05a | |||
| cb3defd28c |
@@ -40,9 +40,11 @@ ArgoCD homelab project
|
||||
| **greece-notifier** | [](https://ag.hexor.cy/applications/argocd/greece-notifier) |
|
||||
| **hexound** | [](https://ag.hexor.cy/applications/argocd/hexound) |
|
||||
| **immich** | [](https://ag.hexor.cy/applications/argocd/immich) |
|
||||
| **iperf3** | [](https://ag.hexor.cy/applications/argocd/iperf3) |
|
||||
| **jellyfin** | [](https://ag.hexor.cy/applications/argocd/jellyfin) |
|
||||
| **k8s-secrets** | [](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
|
||||
| **khm** | [](https://ag.hexor.cy/applications/argocd/khm) |
|
||||
| **ollama** | [](https://ag.hexor.cy/applications/argocd/ollama) |
|
||||
| **paperless** | [](https://ag.hexor.cy/applications/argocd/paperless) |
|
||||
| **pasarguard** | [](https://ag.hexor.cy/applications/argocd/pasarguard) |
|
||||
| **qbittorent-nas** | [](https://ag.hexor.cy/applications/argocd/qbittorent-nas) |
|
||||
|
||||
21
k8s/apps/iperf3/app.yaml
Normal file
21
k8s/apps/iperf3/app.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: iperf3
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: apps
|
||||
destination:
|
||||
namespace: iperf3
|
||||
server: https://kubernetes.default.svc
|
||||
source:
|
||||
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
|
||||
targetRevision: HEAD
|
||||
path: k8s/apps/iperf3
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
|
||||
56
k8s/apps/iperf3/daemonset.yaml
Normal file
56
k8s/apps/iperf3/daemonset.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: iperf3-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: iperf3-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: iperf3-server
|
||||
spec:
|
||||
subdomain: iperf3
|
||||
initContainers:
|
||||
- name: set-hostname
|
||||
image: busybox:1.35
|
||||
command: ['sh', '-c']
|
||||
args:
|
||||
- |
|
||||
NODE_NAME=$(echo $NODE_NAME | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]')
|
||||
echo "iperf3-${NODE_NAME}" > /etc/hostname
|
||||
hostname "iperf3-${NODE_NAME}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
containers:
|
||||
- name: iperf3-server
|
||||
image: networkstatic/iperf3:latest
|
||||
args: ["-s"]
|
||||
ports:
|
||||
- containerPort: 5201
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: HOSTNAME
|
||||
value: $(NODE_NAME)
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "500m"
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
40
k8s/apps/iperf3/iperf3-exporter-daemonset.yaml
Normal file
40
k8s/apps/iperf3/iperf3-exporter-daemonset.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: iperf3-exporter
|
||||
labels:
|
||||
app: iperf3-exporter
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: iperf3-exporter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: iperf3-exporter
|
||||
spec:
|
||||
containers:
|
||||
- name: iperf3-exporter
|
||||
image: ghcr.io/edgard/iperf3_exporter:1.2.2
|
||||
ports:
|
||||
- containerPort: 9579
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "200m"
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
15
k8s/apps/iperf3/iperf3-exporter-service.yaml
Normal file
15
k8s/apps/iperf3/iperf3-exporter-service.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: iperf3-exporter
|
||||
labels:
|
||||
app: iperf3-exporter
|
||||
spec:
|
||||
selector:
|
||||
app: iperf3-exporter
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 9579
|
||||
targetPort: 9579
|
||||
10
k8s/apps/iperf3/kustomization.yaml
Normal file
10
k8s/apps/iperf3/kustomization.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- daemonset.yaml
|
||||
- service-headless.yaml
|
||||
- iperf3-exporter-daemonset.yaml
|
||||
- iperf3-exporter-service.yaml
|
||||
- servicemonitor.yaml
|
||||
|
||||
14
k8s/apps/iperf3/service-headless.yaml
Normal file
14
k8s/apps/iperf3/service-headless.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: iperf3
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: iperf3-server
|
||||
ports:
|
||||
- name: iperf3
|
||||
protocol: TCP
|
||||
port: 5201
|
||||
targetPort: 5201
|
||||
37
k8s/apps/iperf3/servicemonitor.yaml
Normal file
37
k8s/apps/iperf3/servicemonitor.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: iperf3-exporter
|
||||
namespace: default
|
||||
labels:
|
||||
app: iperf3-exporter
|
||||
release: prometheus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: iperf3-exporter
|
||||
endpoints:
|
||||
- port: metrics
|
||||
path: /probe
|
||||
interval: 5m
|
||||
scrapeTimeout: 30s
|
||||
params:
|
||||
duration:
|
||||
- "10"
|
||||
streams:
|
||||
- "4"
|
||||
relabelings:
|
||||
- sourceLabels: [__address__]
|
||||
targetLabel: __param_target
|
||||
regex: (.+):9579
|
||||
replacement: iperf3-${1}.iperf3.default.svc.cluster.local:5201
|
||||
- sourceLabels: [__param_target]
|
||||
targetLabel: instance
|
||||
- targetLabel: __address__
|
||||
replacement: iperf3-exporter.default.svc.cluster.local:9579
|
||||
metricRelabelings:
|
||||
- sourceLabels: [__name__]
|
||||
regex: iperf3_(.+)
|
||||
targetLabel: __name__
|
||||
replacement: network_${1}
|
||||
33
k8s/apps/ollama/external-secrets.yaml
Normal file
33
k8s/apps/ollama/external-secrets.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: oidc-secret
|
||||
spec:
|
||||
target:
|
||||
name: oidc-secret
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
OAUTH_CLIENT_SECRET: |-
|
||||
{{ .OAUTH_CLIENT_SECRET }}
|
||||
OAUTH_CLIENT_ID: |-
|
||||
{{ .OAUTH_CLIENT_ID }}
|
||||
data:
|
||||
- secretKey: OAUTH_CLIENT_SECRET
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
|
||||
property: fields[0].value
|
||||
- secretKey: OAUTH_CLIENT_ID
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
|
||||
property: fields[1].value
|
||||
@@ -1,6 +1,9 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- external-secrets.yaml
|
||||
|
||||
helmCharts:
|
||||
- name: ollama
|
||||
repo: https://otwld.github.io/ollama-helm/
|
||||
@@ -8,4 +11,11 @@ helmCharts:
|
||||
releaseName: ollama
|
||||
namespace: ollama
|
||||
valuesFile: ollama-values.yaml
|
||||
includeCRDs: true
|
||||
- name: open-webui
|
||||
repo: https://helm.openwebui.com/
|
||||
version: 8.14.0
|
||||
releaseName: openweb-ui
|
||||
namespace: ollama
|
||||
valuesFile: openweb-ui-values.yaml
|
||||
includeCRDs: true
|
||||
52
k8s/apps/ollama/openweb-ui-values.yaml
Normal file
52
k8s/apps/ollama/openweb-ui-values.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
clusterDomain: ai.hexor.cy
|
||||
|
||||
extraEnvVars:
|
||||
GLOBAL_LOG_LEVEL: debug
|
||||
OAUTH_PROVIDER_NAME: authentik
|
||||
OPENID_PROVIDER_URL: https://idm.hexor.cy/application/o/openwebui/.well-known/openid-configuration
|
||||
OPENID_REDIRECT_URI: https://ai.hexor.cy/oauth/oidc/callback
|
||||
WEBUI_URL: https://ai.hexor.cy
|
||||
# Allows auto-creation of new users using OAuth. Must be paired with ENABLE_LOGIN_FORM=false.
|
||||
ENABLE_OAUTH_SIGNUP: true
|
||||
# Disables user/password login form. Required when ENABLE_OAUTH_SIGNUP=true.
|
||||
ENABLE_LOGIN_FORM: false
|
||||
OAUTH_MERGE_ACCOUNTS_BY_EMAIL: true
|
||||
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: oidc-secret
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
ollamaUrls:
|
||||
- http://ollama.ollama.svc:11434
|
||||
ollama:
|
||||
enabled: false
|
||||
ollama:
|
||||
gpu:
|
||||
enabled: false
|
||||
models:
|
||||
pull:
|
||||
- qwen3-vl:8b
|
||||
run:
|
||||
- qwen3-vl:8b
|
||||
|
||||
pipelines:
|
||||
enabled: true
|
||||
|
||||
tika:
|
||||
enabled: true
|
||||
|
||||
websocket:
|
||||
enabled: true
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
class: traefik
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt
|
||||
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
|
||||
host: "ai.hexor.cy"
|
||||
tls:
|
||||
- hosts:
|
||||
- '*.hexor.cy'
|
||||
secretName: ollama-tls
|
||||
264
k8s/apps/pasarguard/configmap-scripts.yaml
Normal file
264
k8s/apps/pasarguard/configmap-scripts.yaml
Normal file
@@ -0,0 +1,264 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: pasarguard-scripts
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
data:
|
||||
init-uuid.sh: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
echo "Started"
|
||||
# NODE_NAME is already set via environment variable
|
||||
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
||||
|
||||
# Get DNS name from node label xray-node-address
|
||||
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
|
||||
|
||||
if [ -z "${DNS_NAME}" ]; then
|
||||
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Node: ${NODE_NAME}"
|
||||
echo "DNS Name from label: ${DNS_NAME}"
|
||||
|
||||
# Use DNS name for ConfigMap name to ensure uniqueness
|
||||
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
|
||||
|
||||
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
|
||||
|
||||
# Check if ConfigMap exists and get UUID
|
||||
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "ConfigMap exists, reading UUID..."
|
||||
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
|
||||
|
||||
if [ -z "${API_KEY}" ]; then
|
||||
echo "UUID not found in ConfigMap, generating new one..."
|
||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
|
||||
else
|
||||
echo "Using existing UUID from ConfigMap"
|
||||
fi
|
||||
else
|
||||
echo "ConfigMap does not exist, creating new one..."
|
||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
|
||||
--from-literal=API_KEY="${API_KEY}" \
|
||||
--from-literal=NODE_NAME="${NODE_NAME}"
|
||||
fi
|
||||
|
||||
# Save UUID and node info to shared volume for the main container
|
||||
echo -n "${API_KEY}" > /shared/api-key
|
||||
echo -n "${NODE_NAME}" > /shared/node-name
|
||||
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
|
||||
echo "UUID initialized: ${API_KEY}"
|
||||
echo "Node name: ${NODE_NAME}"
|
||||
echo "ConfigMap: ${CONFIGMAP_NAME}"
|
||||
|
||||
# Create Certificate for this node using DNS name from label
|
||||
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
|
||||
|
||||
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
|
||||
|
||||
# Check if Certificate already exists
|
||||
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "Certificate does not exist, creating..."
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ${CERT_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
secretName: ${CERT_NAME}-tls
|
||||
issuerRef:
|
||||
name: letsencrypt
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- ${DNS_NAME}
|
||||
EOF
|
||||
else
|
||||
echo "Certificate already exists"
|
||||
fi
|
||||
|
||||
# Wait for certificate to be ready
|
||||
|
||||
echo "Waiting for certificate to be ready..."
|
||||
for i in {1..600}; do
|
||||
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "Certificate secret is ready!"
|
||||
break
|
||||
fi
|
||||
echo "Waiting for certificate... ($i/600)"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "WARNING: Certificate secret not ready after 600 seconds"
|
||||
else
|
||||
# Extract certificate and key from secret to shared volume
|
||||
echo "Extracting certificate and key..."
|
||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
|
||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
|
||||
echo "Certificate and key extracted successfully."
|
||||
cat /shared/tls.crt
|
||||
fi
|
||||
|
||||
# Create individual Service and Endpoints for this node
|
||||
# Take only first part of node name before first dot
|
||||
NODE_SHORT_NAME="${NODE_NAME%%.*}"
|
||||
SERVICE_NAME="${NODE_SHORT_NAME}"
|
||||
|
||||
# Get node internal IP (take only first IP if multiple)
|
||||
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
|
||||
|
||||
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
|
||||
|
||||
# Create Service without selector
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
node: ${NODE_NAME}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: api
|
||||
port: 62050
|
||||
protocol: TCP
|
||||
targetPort: 62050
|
||||
- name: metrics
|
||||
port: 9550
|
||||
protocol: TCP
|
||||
targetPort: 9550
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
node: ${NODE_NAME}
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: ${NODE_IP}
|
||||
nodeName: ${NODE_NAME}
|
||||
ports:
|
||||
- name: api
|
||||
port: 62050
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9550
|
||||
protocol: TCP
|
||||
EOF
|
||||
|
||||
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
|
||||
|
||||
exporter-start.sh: |
|
||||
#!/bin/sh
|
||||
# Install required tools
|
||||
apk add --no-cache wget curl iproute2-ss bash
|
||||
|
||||
# Download v2ray-exporter
|
||||
echo "Downloading v2ray-exporter..."
|
||||
ARCH=$(uname -m)
|
||||
case $ARCH in
|
||||
x86_64)
|
||||
BINARY_ARCH="amd64"
|
||||
;;
|
||||
aarch64|arm64)
|
||||
BINARY_ARCH="arm64"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported architecture: $ARCH"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Detected architecture: $ARCH, using binary: v2ray-exporter_linux_$BINARY_ARCH"
|
||||
wget -L -O /tmp/v2ray-exporter "https://github.com/wi1dcard/v2ray-exporter/releases/download/v0.6.0/v2ray-exporter_linux_$BINARY_ARCH"
|
||||
mv /tmp/v2ray-exporter /usr/local/bin/v2ray-exporter
|
||||
chmod +x /usr/local/bin/v2ray-exporter
|
||||
|
||||
# Wait for initial API port file
|
||||
echo "Waiting for initial xray API port file..."
|
||||
while [ ! -f /shared/xray-api-port ]; do
|
||||
echo "Waiting for API port file..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Main loop - restart exporter if it crashes or port changes
|
||||
while true; do
|
||||
if [ -f /shared/xray-api-port ]; then
|
||||
API_PORT=$(cat /shared/xray-api-port)
|
||||
if [ -n "$API_PORT" ]; then
|
||||
echo "Starting v2ray-exporter with endpoint 127.0.0.1:$API_PORT"
|
||||
/usr/local/bin/v2ray-exporter --v2ray-endpoint "127.0.0.1:$API_PORT" --listen ":9550" &
|
||||
EXPORTER_PID=$!
|
||||
|
||||
# Wait for exporter to exit or port file to change
|
||||
while kill -0 $EXPORTER_PID 2>/dev/null; do
|
||||
if [ -f /shared/xray-api-port ]; then
|
||||
NEW_PORT=$(cat /shared/xray-api-port)
|
||||
if [ "$NEW_PORT" != "$API_PORT" ]; then
|
||||
echo "API port changed from $API_PORT to $NEW_PORT, restarting exporter"
|
||||
kill $EXPORTER_PID 2>/dev/null
|
||||
wait $EXPORTER_PID 2>/dev/null
|
||||
break
|
||||
fi
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "Exporter stopped, restarting..."
|
||||
wait $EXPORTER_PID 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
pasarguard-start.sh: |
|
||||
#!/bin/sh
|
||||
# Read API_KEY from shared volume created by init container
|
||||
if [ -f /shared/api-key ]; then
|
||||
export API_KEY=$(cat /shared/api-key)
|
||||
echo "Loaded API_KEY from shared volume"
|
||||
else
|
||||
echo "WARNING: API_KEY file not found, using default"
|
||||
fi
|
||||
|
||||
cd /app
|
||||
|
||||
# Start main process in background
|
||||
./main &
|
||||
MAIN_PID=$!
|
||||
|
||||
# Start continuous port monitoring in background
|
||||
{
|
||||
sleep 10 # Wait for xray to start initially
|
||||
LAST_PORT=""
|
||||
|
||||
while true; do
|
||||
API_PORT=$(netstat -tlpn | grep xray | grep 127.0.0.1 | awk '{print $4}' | cut -d: -f2 | head -1)
|
||||
if [ -n "$API_PORT" ] && [ "$API_PORT" != "$LAST_PORT" ]; then
|
||||
echo "Found xray API port: $API_PORT"
|
||||
echo -n "$API_PORT" > /shared/xray-api-port
|
||||
LAST_PORT="$API_PORT"
|
||||
fi
|
||||
sleep 5 # Check every 5 seconds
|
||||
done
|
||||
} &
|
||||
PORT_MONITOR_PID=$!
|
||||
|
||||
# Wait for main process to finish
|
||||
wait $MAIN_PID
|
||||
|
||||
# Clean up port monitor
|
||||
kill $PORT_MONITOR_PID 2>/dev/null
|
||||
@@ -105,174 +105,58 @@ spec:
|
||||
fieldPath: spec.nodeName
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
set -e
|
||||
echo "Started"
|
||||
# NODE_NAME is already set via environment variable
|
||||
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
||||
|
||||
# Get DNS name from node label xray-node-address
|
||||
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
|
||||
|
||||
if [ -z "${DNS_NAME}" ]; then
|
||||
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Node: ${NODE_NAME}"
|
||||
echo "DNS Name from label: ${DNS_NAME}"
|
||||
|
||||
# Use DNS name for ConfigMap name to ensure uniqueness
|
||||
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
|
||||
|
||||
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
|
||||
|
||||
# Check if ConfigMap exists and get UUID
|
||||
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "ConfigMap exists, reading UUID..."
|
||||
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
|
||||
|
||||
if [ -z "${API_KEY}" ]; then
|
||||
echo "UUID not found in ConfigMap, generating new one..."
|
||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
|
||||
else
|
||||
echo "Using existing UUID from ConfigMap"
|
||||
fi
|
||||
else
|
||||
echo "ConfigMap does not exist, creating new one..."
|
||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
|
||||
--from-literal=API_KEY="${API_KEY}" \
|
||||
--from-literal=NODE_NAME="${NODE_NAME}"
|
||||
fi
|
||||
|
||||
# Save UUID and node info to shared volume for the main container
|
||||
echo -n "${API_KEY}" > /shared/api-key
|
||||
echo -n "${NODE_NAME}" > /shared/node-name
|
||||
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
|
||||
echo "UUID initialized: ${API_KEY}"
|
||||
echo "Node name: ${NODE_NAME}"
|
||||
echo "ConfigMap: ${CONFIGMAP_NAME}"
|
||||
|
||||
# Create Certificate for this node using DNS name from label
|
||||
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
|
||||
|
||||
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
|
||||
|
||||
# Check if Certificate already exists
|
||||
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "Certificate does not exist, creating..."
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ${CERT_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
secretName: ${CERT_NAME}-tls
|
||||
issuerRef:
|
||||
name: letsencrypt
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- ${DNS_NAME}
|
||||
EOF
|
||||
else
|
||||
echo "Certificate already exists"
|
||||
fi
|
||||
|
||||
# Wait for certificate to be ready
|
||||
|
||||
echo "Waiting for certificate to be ready..."
|
||||
for i in {1..600}; do
|
||||
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "Certificate secret is ready!"
|
||||
break
|
||||
fi
|
||||
echo "Waiting for certificate... ($i/600)"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "WARNING: Certificate secret not ready after 600 seconds"
|
||||
else
|
||||
# Extract certificate and key from secret to shared volume
|
||||
echo "Extracting certificate and key..."
|
||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
|
||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
|
||||
echo "Certificate and key extracted successfully."
|
||||
cat /shared/tls.crt
|
||||
fi
|
||||
|
||||
# Create individual Service and Endpoints for this node
|
||||
# Take only first part of node name before first dot
|
||||
NODE_SHORT_NAME="${NODE_NAME%%.*}"
|
||||
SERVICE_NAME="${NODE_SHORT_NAME}"
|
||||
|
||||
# Get node internal IP (take only first IP if multiple)
|
||||
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
|
||||
|
||||
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
|
||||
|
||||
# Create Service without selector
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
node: ${NODE_NAME}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: api
|
||||
port: 62050
|
||||
protocol: TCP
|
||||
targetPort: 62050
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
node: ${NODE_NAME}
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: ${NODE_IP}
|
||||
nodeName: ${NODE_NAME}
|
||||
ports:
|
||||
- name: api
|
||||
port: 62050
|
||||
protocol: TCP
|
||||
EOF
|
||||
|
||||
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
|
||||
- /scripts/init-uuid.sh
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /shared
|
||||
- name: scripts
|
||||
mountPath: /scripts
|
||||
containers:
|
||||
- name: xray-exporter
|
||||
image: alpine:3.18
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- /scripts/exporter-start.sh
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 9550
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /scrape
|
||||
port: metrics
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 10
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /scrape
|
||||
port: metrics
|
||||
initialDelaySeconds: 45
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /shared
|
||||
readOnly: true
|
||||
- name: scripts
|
||||
mountPath: /scripts
|
||||
- name: pasarguard-node
|
||||
image: 'pasarguard/node:v0.1.1'
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
# Read API_KEY from shared volume created by init container
|
||||
if [ -f /shared/api-key ]; then
|
||||
export API_KEY=$(cat /shared/api-key)
|
||||
echo "Loaded API_KEY from shared volume"
|
||||
else
|
||||
echo "WARNING: API_KEY file not found, using default"
|
||||
fi
|
||||
|
||||
cd /app
|
||||
exec ./main
|
||||
- /scripts/pasarguard-start.sh
|
||||
ports:
|
||||
- name: api
|
||||
containerPort: 62050
|
||||
@@ -324,7 +208,13 @@ spec:
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /shared
|
||||
readOnly: true
|
||||
readOnly: false
|
||||
- name: scripts
|
||||
mountPath: /scripts
|
||||
volumes:
|
||||
- name: shared-data
|
||||
emptyDir: {}
|
||||
- name: scripts
|
||||
configMap:
|
||||
name: pasarguard-scripts
|
||||
defaultMode: 0755
|
||||
|
||||
@@ -34,7 +34,7 @@ spec:
|
||||
mountPath: /templates/subscription
|
||||
containers:
|
||||
- name: pasarguard-web
|
||||
image: 'pasarguard/panel:v1.4.1'
|
||||
image: 'pasarguard/panel:v1.7.2'
|
||||
imagePullPolicy: Always
|
||||
envFrom:
|
||||
- secretRef:
|
||||
|
||||
@@ -7,5 +7,5 @@ resources:
|
||||
- ./deployment.yaml
|
||||
- ./daemonset.yaml
|
||||
- ./certificate.yaml
|
||||
|
||||
|
||||
- ./configmap-scripts.yaml
|
||||
- ./servicemonitor.yaml
|
||||
|
||||
21
k8s/apps/pasarguard/servicemonitor.yaml
Normal file
21
k8s/apps/pasarguard/servicemonitor.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: pasarguard-node-metrics
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
release: prometheus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: pasarguard-node
|
||||
endpoints:
|
||||
- port: metrics
|
||||
path: /scrape
|
||||
interval: 30s
|
||||
scrapeTimeout: 10s
|
||||
honorLabels: true
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- pasarguard
|
||||
@@ -10,7 +10,7 @@ resources:
|
||||
helmCharts:
|
||||
- name: authentik
|
||||
repo: https://charts.goauthentik.io
|
||||
version: 2025.8.1
|
||||
version: 2025.10.1
|
||||
releaseName: authentik
|
||||
namespace: authentik
|
||||
valuesFile: values.yaml
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
global:
|
||||
image:
|
||||
tag: "2025.8.1"
|
||||
tag: "2025.10.1"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
|
||||
@@ -47,7 +47,6 @@ server:
|
||||
- minecraft.hexor.cy # Minecraft UI and server
|
||||
- pass.hexor.cy # k8s-secret for openai
|
||||
- ps.hexor.cy # pasarguard UI
|
||||
- ai.hexor.cy # ollama API
|
||||
tls:
|
||||
- secretName: idm-tls
|
||||
hosts:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# BW_HOST: base64(url)
|
||||
# BW_USERNAME: base64(name)
|
||||
# BW_PASSWORD: base64(pass)
|
||||
# 81212111-6350-4069-8bcf-19a67d3964a5
|
||||
# Vaultwarden bot - 81212111-6350-4069-8bcf-19a67d3964a5
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
|
||||
@@ -4,16 +4,16 @@ prometheus:
|
||||
prometheusSpec:
|
||||
enableRemoteWriteReceiver: true
|
||||
additionalScrapeConfigs:
|
||||
- job_name: outline_vpn
|
||||
- job_name: xray_vpn
|
||||
metrics_path: /scrape
|
||||
static_configs:
|
||||
- targets: ['100.117.24.104:9095']
|
||||
labels: {instance: cy}
|
||||
- targets: ['100.117.24.104:9096']
|
||||
labels: {instance: am}
|
||||
- targets: ['100.117.24.104:9097']
|
||||
labels: {instance: jp}
|
||||
- targets: ['100.117.24.104:9098']
|
||||
labels: {instance: bg}
|
||||
- targets: ['cy.tail2fe2d.ts.net:9550']
|
||||
labels: {job: cy}
|
||||
- targets: ['x86.tail2fe2d.ts.net:9550']
|
||||
labels: {job: am}
|
||||
- targets: ['jp.tail2fe2d.ts.net:9550']
|
||||
labels: {job: jp}
|
||||
|
||||
- job_name: cs_16_server
|
||||
static_configs:
|
||||
- targets: ['prom-a2s-exporter.counter-strike.svc:9841']
|
||||
|
||||
@@ -167,5 +167,26 @@ oauth_applications = {
|
||||
create_group = true
|
||||
signing_key = "1b1b5bec-034a-4d96-871a-133f11322360"
|
||||
}
|
||||
|
||||
"openwebui" = {
|
||||
name = "OpenWeb UI"
|
||||
slug = "openwebui"
|
||||
group = "Tools"
|
||||
meta_description = "OpenWeb UI"
|
||||
meta_icon = "https://ollama.com/public/ollama.png"
|
||||
redirect_uris = [
|
||||
"https://ai.hexor.cy/oauth/oidc/callback",
|
||||
]
|
||||
meta_launch_url = "https://ai.hexor.cy"
|
||||
client_type = "confidential"
|
||||
include_claims_in_id_token = true
|
||||
access_code_validity = "minutes=1"
|
||||
access_token_validity = "minutes=5"
|
||||
refresh_token_validity = "days=30"
|
||||
scope_mappings = ["openid", "profile", "email"]
|
||||
access_groups = ["admins"]
|
||||
create_group = true
|
||||
signing_key = "1b1b5bec-034a-4d96-871a-133f11322360"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -206,19 +206,5 @@ EOT
|
||||
create_group = true
|
||||
access_groups = ["admins"]
|
||||
}
|
||||
"ollama" = {
|
||||
name = "Ollama API"
|
||||
slug = "ollama"
|
||||
group = "Tools"
|
||||
external_host = "https://ai.hexor.cy"
|
||||
internal_host = "http://ollama.ollama.svc:11434"
|
||||
internal_host_ssl_validation = false
|
||||
meta_description = "Ollama API"
|
||||
meta_icon = "https://ollama.com/public/ollama.png"
|
||||
mode = "proxy"
|
||||
outpost = "kubernetes-outpost"
|
||||
create_group = true
|
||||
access_groups = ["admins"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user