Files
homelab/k8s/apps/pasarguard/daemonset.yaml

407 lines
14 KiB
YAML
Raw Normal View History

2025-11-05 15:34:21 +02:00
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: pasarguard-node
labels:
app: pasarguard-node
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pasarguard-node-configmap
labels:
app: pasarguard-node
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "create", "update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: pasarguard-node-configmap
labels:
app: pasarguard-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pasarguard-node-configmap
subjects:
- kind: ServiceAccount
name: pasarguard-node
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pasarguard-node-reader
labels:
app: pasarguard-node
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: pasarguard-node-reader
labels:
app: pasarguard-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: pasarguard-node-reader
subjects:
- kind: ServiceAccount
name: pasarguard-node
namespace: pasarguard
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: pasarguard-node
labels:
app: pasarguard-node
spec:
selector:
matchLabels:
app: pasarguard-node
revisionHistoryLimit: 3
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: pasarguard-node
spec:
serviceAccountName: pasarguard-node
hostNetwork: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: xray-node-address
operator: Exists
initContainers:
- name: init-uuid
2025-11-06 16:53:41 +02:00
image: bitnami/kubectl:latest
2025-11-05 15:34:21 +02:00
env:
2025-11-06 16:53:41 +02:00
- name: GODEBUG
value: "x509sha1=1"
2025-11-05 15:34:21 +02:00
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
2025-11-06 16:53:41 +02:00
- /bin/bash
2025-11-05 15:34:21 +02:00
- -c
- |
set -e
echo "Started"
2025-11-05 15:34:21 +02:00
# NODE_NAME is already set via environment variable
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Get DNS name from node label xray-node-address
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
if [ -z "${DNS_NAME}" ]; then
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
exit 1
fi
echo "Node: ${NODE_NAME}"
echo "DNS Name from label: ${DNS_NAME}"
# Use DNS name for ConfigMap name to ensure uniqueness
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
# Check if ConfigMap exists and get UUID
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "ConfigMap exists, reading UUID..."
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
if [ -z "${API_KEY}" ]; then
echo "UUID not found in ConfigMap, generating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
else
echo "Using existing UUID from ConfigMap"
fi
else
echo "ConfigMap does not exist, creating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
--from-literal=API_KEY="${API_KEY}" \
--from-literal=NODE_NAME="${NODE_NAME}"
fi
# Save UUID and node info to shared volume for the main container
echo -n "${API_KEY}" > /shared/api-key
echo -n "${NODE_NAME}" > /shared/node-name
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
echo "UUID initialized: ${API_KEY}"
echo "Node name: ${NODE_NAME}"
echo "ConfigMap: ${CONFIGMAP_NAME}"
# Create Certificate for this node using DNS name from label
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
# Check if Certificate already exists
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate does not exist, creating..."
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${CERT_NAME}
namespace: ${NAMESPACE}
spec:
secretName: ${CERT_NAME}-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- ${DNS_NAME}
EOF
else
echo "Certificate already exists"
fi
# Wait for certificate to be ready
2025-11-05 15:34:21 +02:00
echo "Waiting for certificate to be ready..."
for i in {1..600}; do
2025-11-05 15:34:21 +02:00
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate secret is ready!"
break
fi
echo "Waiting for certificate... ($i/600)"
2025-11-05 15:34:21 +02:00
sleep 1
done
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "WARNING: Certificate secret not ready after 600 seconds"
2025-11-05 15:34:21 +02:00
else
# Extract certificate and key from secret to shared volume
echo "Extracting certificate and key..."
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
echo "Certificate and key extracted successfully."
cat /shared/tls.crt
2025-11-05 15:34:21 +02:00
fi
# Create individual Service and Endpoints for this node
# Take only first part of node name before first dot
NODE_SHORT_NAME="${NODE_NAME%%.*}"
SERVICE_NAME="${NODE_SHORT_NAME}"
# Get node internal IP (take only first IP if multiple)
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
# Create Service without selector
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
spec:
clusterIP: None
ports:
- name: api
port: 62050
protocol: TCP
targetPort: 62050
2025-11-18 14:12:10 +02:00
- name: metrics
port: 9550
protocol: TCP
targetPort: 9550
2025-11-05 15:34:21 +02:00
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
subsets:
- addresses:
- ip: ${NODE_IP}
nodeName: ${NODE_NAME}
ports:
- name: api
port: 62050
protocol: TCP
2025-11-18 14:12:10 +02:00
- name: metrics
port: 9550
protocol: TCP
2025-11-05 15:34:21 +02:00
EOF
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
volumeMounts:
- name: shared-data
mountPath: /shared
containers:
2025-11-18 14:12:10 +02:00
- name: xray-exporter
image: alpine:3.18
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
# Install required tools
2025-11-18 14:13:55 +02:00
apk add --no-cache wget curl iproute2-ss bash
2025-11-18 14:12:10 +02:00
# Download v2ray-exporter
echo "Downloading v2ray-exporter..."
wget -O /tmp/v2ray-exporter https://github.com/wi1dcard/v2ray-exporter/releases/latest/download/v2ray-exporter_linux_amd64
mv /tmp/v2ray-exporter /usr/local/bin/v2ray-exporter
chmod +x /usr/local/bin/v2ray-exporter
# Wait for xray to start
echo "Waiting for xray process to start..."
sleep 10
# Find xray API port
while true; do
echo "Looking for xray API port..."
API_PORT=$(ss -tlpn | grep xray | grep 127.0.0.1 | awk '{print $4}' | cut -d: -f2 | head -1)
if [ -n "$API_PORT" ]; then
echo "Found potential API port: $API_PORT"
# Verify it's the correct port
if curl -s -o /dev/null -w "%{http_code}" "127.0.0.1:$API_PORT" 2>&1 | grep -q "Received HTTP/0.9"; then
echo "Verified API port: $API_PORT"
break
fi
fi
echo "API port not found or not verified, retrying in 5 seconds..."
sleep 5
done
# Start exporter
echo "Starting v2ray-exporter with endpoint 127.0.0.1:$API_PORT"
exec /usr/local/bin/v2ray-exporter --v2ray-endpoint "127.0.0.1:$API_PORT" --listen ":9550"
ports:
- name: metrics
containerPort: 9550
protocol: TCP
livenessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "100m"
2025-11-05 15:34:21 +02:00
- name: pasarguard-node
image: 'pasarguard/node:v0.1.1'
imagePullPolicy: Always
command:
- /bin/sh
- -c
- |
# Read API_KEY from shared volume created by init container
if [ -f /shared/api-key ]; then
export API_KEY=$(cat /shared/api-key)
echo "Loaded API_KEY from shared volume"
else
echo "WARNING: API_KEY file not found, using default"
fi
cd /app
exec ./main
ports:
- name: api
containerPort: 62050
protocol: TCP
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_HOST
value: "0.0.0.0"
- name: SERVICE_PORT
value: "62050"
- name: SERVICE_PROTOCOL
value: "grpc"
- name: DEBUG
2025-11-05 16:17:36 +02:00
value: "true"
2025-11-05 15:34:21 +02:00
- name: SSL_CERT_FILE
value: "/shared/tls.crt"
- name: SSL_KEY_FILE
value: "/shared/tls.key"
- name: XRAY_EXECUTABLE_PATH
value: "/usr/local/bin/xray"
- name: XRAY_ASSETS_PATH
value: "/usr/local/share/xray"
- name: API_KEY
value: "change-this-to-a-secure-uuid"
livenessProbe:
2025-11-05 15:36:02 +02:00
tcpSocket:
2025-11-05 15:34:21 +02:00
port: 62050
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
2025-11-05 15:36:02 +02:00
tcpSocket:
2025-11-05 15:34:21 +02:00
port: 62050
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: true
volumes:
- name: shared-data
emptyDir: {}