diff --git a/k8s/apps/pasarguard/daemonset.yaml b/k8s/apps/pasarguard/daemonset.yaml new file mode 100644 index 0000000..4fc6e94 --- /dev/null +++ b/k8s/apps/pasarguard/daemonset.yaml @@ -0,0 +1,328 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pasarguard-node + labels: + app: pasarguard-node +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pasarguard-node-configmap + labels: + app: pasarguard-node +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "create", "update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates"] + verbs: ["get", "list", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["services", "endpoints"] + verbs: ["get", "list", "create", "update", "patch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pasarguard-node-configmap + labels: + app: pasarguard-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pasarguard-node-configmap +subjects: + - kind: ServiceAccount + name: pasarguard-node +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pasarguard-node-reader + labels: + app: pasarguard-node +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pasarguard-node-reader + labels: + app: pasarguard-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pasarguard-node-reader +subjects: + - kind: ServiceAccount + name: pasarguard-node + namespace: pasarguard +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: pasarguard-node + labels: + app: pasarguard-node +spec: + selector: + matchLabels: + app: pasarguard-node + revisionHistoryLimit: 3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: pasarguard-node + spec: + serviceAccountName: pasarguard-node + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: xray-node-address + operator: Exists + initContainers: + - name: init-uuid + image: bitnami/kubectl:latest + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - /bin/bash + - -c + - | + set -e + + # NODE_NAME is already set via environment variable + NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + + # Get DNS name from node label xray-node-address + DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}') + + if [ -z "${DNS_NAME}" ]; then + echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'" + exit 1 + fi + + echo "Node: ${NODE_NAME}" + echo "DNS Name from label: ${DNS_NAME}" + + # Use DNS name for ConfigMap name to ensure uniqueness + CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}" + + echo "Checking ConfigMap: ${CONFIGMAP_NAME}" + + # Check if ConfigMap exists and get UUID + if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then + echo "ConfigMap exists, reading UUID..." + API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}') + + if [ -z "${API_KEY}" ]; then + echo "UUID not found in ConfigMap, generating new one..." + API_KEY=$(cat /proc/sys/kernel/random/uuid) + kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}" + else + echo "Using existing UUID from ConfigMap" + fi + else + echo "ConfigMap does not exist, creating new one..." + API_KEY=$(cat /proc/sys/kernel/random/uuid) + kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \ + --from-literal=API_KEY="${API_KEY}" \ + --from-literal=NODE_NAME="${NODE_NAME}" + fi + + # Save UUID and node info to shared volume for the main container + echo -n "${API_KEY}" > /shared/api-key + echo -n "${NODE_NAME}" > /shared/node-name + echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name + echo "UUID initialized: ${API_KEY}" + echo "Node name: ${NODE_NAME}" + echo "ConfigMap: ${CONFIGMAP_NAME}" + + # Create Certificate for this node using DNS name from label + CERT_NAME="pasarguard-node-${DNS_NAME//./-}" + + echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}" + + # Check if Certificate already exists + if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then + echo "Certificate does not exist, creating..." + cat </dev/null; then + echo "Certificate secret is ready!" + break + fi + echo "Waiting for certificate... ($i/60)" + sleep 1 + done + + if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then + echo "WARNING: Certificate secret not ready after 60 seconds" + else + # Extract certificate and key from secret to shared volume + echo "Extracting certificate and key..." + kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt + kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key + echo "Certificate and key extracted successfully" + fi + + # Create individual Service and Endpoints for this node + # Take only first part of node name before first dot + NODE_SHORT_NAME="${NODE_NAME%%.*}" + SERVICE_NAME="${NODE_SHORT_NAME}" + + # Get node internal IP (take only first IP if multiple) + NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}') + + echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}" + + # Create Service without selector + cat < ${NODE_IP}:62050" + volumeMounts: + - name: shared-data + mountPath: /shared + containers: + - name: pasarguard-node + image: 'pasarguard/node:v0.1.1' + imagePullPolicy: Always + command: + - /bin/sh + - -c + - | + # Read API_KEY from shared volume created by init container + if [ -f /shared/api-key ]; then + export API_KEY=$(cat /shared/api-key) + echo "Loaded API_KEY from shared volume" + else + echo "WARNING: API_KEY file not found, using default" + fi + + cd /app + exec ./main + ports: + - name: api + containerPort: 62050 + protocol: TCP + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_HOST + value: "0.0.0.0" + - name: SERVICE_PORT + value: "62050" + - name: SERVICE_PROTOCOL + value: "grpc" + - name: DEBUG + value: "false" + - name: SSL_CERT_FILE + value: "/shared/tls.crt" + - name: SSL_KEY_FILE + value: "/shared/tls.key" + - name: XRAY_EXECUTABLE_PATH + value: "/usr/local/bin/xray" + - name: XRAY_ASSETS_PATH + value: "/usr/local/share/xray" + - name: API_KEY + value: "change-this-to-a-secure-uuid" + livenessProbe: + httpGet: + port: 62050 + path: /health + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + httpGet: + port: 62050 + path: /health + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + volumeMounts: + - name: shared-data + mountPath: /shared + readOnly: true + volumes: + - name: shared-data + emptyDir: {}