Files
homelab/k8s/apps/pasarguard/daemonset.yaml

329 lines
11 KiB
YAML
Raw Normal View History

2025-11-05 15:34:21 +02:00
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: pasarguard-node
labels:
app: pasarguard-node
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pasarguard-node-configmap
labels:
app: pasarguard-node
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "create", "update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: pasarguard-node-configmap
labels:
app: pasarguard-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pasarguard-node-configmap
subjects:
- kind: ServiceAccount
name: pasarguard-node
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pasarguard-node-reader
labels:
app: pasarguard-node
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: pasarguard-node-reader
labels:
app: pasarguard-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: pasarguard-node-reader
subjects:
- kind: ServiceAccount
name: pasarguard-node
namespace: pasarguard
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: pasarguard-node
labels:
app: pasarguard-node
spec:
selector:
matchLabels:
app: pasarguard-node
revisionHistoryLimit: 3
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: pasarguard-node
spec:
serviceAccountName: pasarguard-node
hostNetwork: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: xray-node-address
operator: Exists
initContainers:
- name: init-uuid
2025-11-06 16:46:47 +02:00
image: rancher/kubectl:v1.28.2
2025-11-05 15:34:21 +02:00
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /bin/bash
- -c
- |
set -e
echo "Started"
2025-11-05 15:34:21 +02:00
# NODE_NAME is already set via environment variable
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Get DNS name from node label xray-node-address
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
if [ -z "${DNS_NAME}" ]; then
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
exit 1
fi
echo "Node: ${NODE_NAME}"
echo "DNS Name from label: ${DNS_NAME}"
# Use DNS name for ConfigMap name to ensure uniqueness
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
# Check if ConfigMap exists and get UUID
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "ConfigMap exists, reading UUID..."
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
if [ -z "${API_KEY}" ]; then
echo "UUID not found in ConfigMap, generating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
else
echo "Using existing UUID from ConfigMap"
fi
else
echo "ConfigMap does not exist, creating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
--from-literal=API_KEY="${API_KEY}" \
--from-literal=NODE_NAME="${NODE_NAME}"
fi
# Save UUID and node info to shared volume for the main container
echo -n "${API_KEY}" > /shared/api-key
echo -n "${NODE_NAME}" > /shared/node-name
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
echo "UUID initialized: ${API_KEY}"
echo "Node name: ${NODE_NAME}"
echo "ConfigMap: ${CONFIGMAP_NAME}"
# Create Certificate for this node using DNS name from label
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
# Check if Certificate already exists
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate does not exist, creating..."
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${CERT_NAME}
namespace: ${NAMESPACE}
spec:
secretName: ${CERT_NAME}-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- ${DNS_NAME}
EOF
else
echo "Certificate already exists"
fi
# Wait for certificate to be ready
2025-11-05 15:34:21 +02:00
echo "Waiting for certificate to be ready..."
for i in {1..600}; do
2025-11-05 15:34:21 +02:00
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate secret is ready!"
break
fi
echo "Waiting for certificate... ($i/600)"
2025-11-05 15:34:21 +02:00
sleep 1
done
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "WARNING: Certificate secret not ready after 600 seconds"
2025-11-05 15:34:21 +02:00
else
# Extract certificate and key from secret to shared volume
echo "Extracting certificate and key..."
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
echo "Certificate and key extracted successfully."
cat /shared/tls.crt
2025-11-05 15:34:21 +02:00
fi
# Create individual Service and Endpoints for this node
# Take only first part of node name before first dot
NODE_SHORT_NAME="${NODE_NAME%%.*}"
SERVICE_NAME="${NODE_SHORT_NAME}"
# Get node internal IP (take only first IP if multiple)
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
# Create Service without selector
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
spec:
clusterIP: None
ports:
- name: api
port: 62050
protocol: TCP
targetPort: 62050
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
subsets:
- addresses:
- ip: ${NODE_IP}
nodeName: ${NODE_NAME}
ports:
- name: api
port: 62050
protocol: TCP
EOF
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
volumeMounts:
- name: shared-data
mountPath: /shared
containers:
- name: pasarguard-node
image: 'pasarguard/node:v0.1.1'
imagePullPolicy: Always
command:
- /bin/sh
- -c
- |
# Read API_KEY from shared volume created by init container
if [ -f /shared/api-key ]; then
export API_KEY=$(cat /shared/api-key)
echo "Loaded API_KEY from shared volume"
else
echo "WARNING: API_KEY file not found, using default"
fi
cd /app
exec ./main
ports:
- name: api
containerPort: 62050
protocol: TCP
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_HOST
value: "0.0.0.0"
- name: SERVICE_PORT
value: "62050"
- name: SERVICE_PROTOCOL
value: "grpc"
- name: DEBUG
2025-11-05 16:17:36 +02:00
value: "true"
2025-11-05 15:34:21 +02:00
- name: SSL_CERT_FILE
value: "/shared/tls.crt"
- name: SSL_KEY_FILE
value: "/shared/tls.key"
- name: XRAY_EXECUTABLE_PATH
value: "/usr/local/bin/xray"
- name: XRAY_ASSETS_PATH
value: "/usr/local/share/xray"
- name: API_KEY
value: "change-this-to-a-secure-uuid"
livenessProbe:
2025-11-05 15:36:02 +02:00
tcpSocket:
2025-11-05 15:34:21 +02:00
port: 62050
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
2025-11-05 15:36:02 +02:00
tcpSocket:
2025-11-05 15:34:21 +02:00
port: 62050
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: true
volumes:
- name: shared-data
emptyDir: {}