Added xray exporter
This commit is contained in:
233
k8s/apps/pasarguard/configmap-scripts.yaml
Normal file
233
k8s/apps/pasarguard/configmap-scripts.yaml
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: pasarguard-scripts
|
||||||
|
labels:
|
||||||
|
app: pasarguard-node
|
||||||
|
data:
|
||||||
|
init-uuid.sh: |
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
echo "Started"
|
||||||
|
# NODE_NAME is already set via environment variable
|
||||||
|
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
||||||
|
|
||||||
|
# Get DNS name from node label xray-node-address
|
||||||
|
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
|
||||||
|
|
||||||
|
if [ -z "${DNS_NAME}" ]; then
|
||||||
|
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Node: ${NODE_NAME}"
|
||||||
|
echo "DNS Name from label: ${DNS_NAME}"
|
||||||
|
|
||||||
|
# Use DNS name for ConfigMap name to ensure uniqueness
|
||||||
|
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
|
||||||
|
|
||||||
|
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
|
||||||
|
|
||||||
|
# Check if ConfigMap exists and get UUID
|
||||||
|
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||||
|
echo "ConfigMap exists, reading UUID..."
|
||||||
|
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
|
||||||
|
|
||||||
|
if [ -z "${API_KEY}" ]; then
|
||||||
|
echo "UUID not found in ConfigMap, generating new one..."
|
||||||
|
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||||
|
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
|
||||||
|
else
|
||||||
|
echo "Using existing UUID from ConfigMap"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "ConfigMap does not exist, creating new one..."
|
||||||
|
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||||
|
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
|
||||||
|
--from-literal=API_KEY="${API_KEY}" \
|
||||||
|
--from-literal=NODE_NAME="${NODE_NAME}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Save UUID and node info to shared volume for the main container
|
||||||
|
echo -n "${API_KEY}" > /shared/api-key
|
||||||
|
echo -n "${NODE_NAME}" > /shared/node-name
|
||||||
|
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
|
||||||
|
echo "UUID initialized: ${API_KEY}"
|
||||||
|
echo "Node name: ${NODE_NAME}"
|
||||||
|
echo "ConfigMap: ${CONFIGMAP_NAME}"
|
||||||
|
|
||||||
|
# Create Certificate for this node using DNS name from label
|
||||||
|
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
|
||||||
|
|
||||||
|
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
|
||||||
|
|
||||||
|
# Check if Certificate already exists
|
||||||
|
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||||
|
echo "Certificate does not exist, creating..."
|
||||||
|
cat <<EOF | kubectl apply -f -
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: Certificate
|
||||||
|
metadata:
|
||||||
|
name: ${CERT_NAME}
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
spec:
|
||||||
|
secretName: ${CERT_NAME}-tls
|
||||||
|
issuerRef:
|
||||||
|
name: letsencrypt
|
||||||
|
kind: ClusterIssuer
|
||||||
|
dnsNames:
|
||||||
|
- ${DNS_NAME}
|
||||||
|
EOF
|
||||||
|
else
|
||||||
|
echo "Certificate already exists"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wait for certificate to be ready
|
||||||
|
|
||||||
|
echo "Waiting for certificate to be ready..."
|
||||||
|
for i in {1..600}; do
|
||||||
|
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||||
|
echo "Certificate secret is ready!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "Waiting for certificate... ($i/600)"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||||
|
echo "WARNING: Certificate secret not ready after 600 seconds"
|
||||||
|
else
|
||||||
|
# Extract certificate and key from secret to shared volume
|
||||||
|
echo "Extracting certificate and key..."
|
||||||
|
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
|
||||||
|
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
|
||||||
|
echo "Certificate and key extracted successfully."
|
||||||
|
cat /shared/tls.crt
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create individual Service and Endpoints for this node
|
||||||
|
# Take only first part of node name before first dot
|
||||||
|
NODE_SHORT_NAME="${NODE_NAME%%.*}"
|
||||||
|
SERVICE_NAME="${NODE_SHORT_NAME}"
|
||||||
|
|
||||||
|
# Get node internal IP (take only first IP if multiple)
|
||||||
|
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
|
||||||
|
|
||||||
|
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
|
||||||
|
|
||||||
|
# Create Service without selector
|
||||||
|
cat <<EOF | kubectl apply -f -
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: ${SERVICE_NAME}
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
labels:
|
||||||
|
app: pasarguard-node
|
||||||
|
node: ${NODE_NAME}
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
ports:
|
||||||
|
- name: api
|
||||||
|
port: 62050
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 62050
|
||||||
|
- name: metrics
|
||||||
|
port: 9550
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 9550
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Endpoints
|
||||||
|
metadata:
|
||||||
|
name: ${SERVICE_NAME}
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
labels:
|
||||||
|
app: pasarguard-node
|
||||||
|
node: ${NODE_NAME}
|
||||||
|
subsets:
|
||||||
|
- addresses:
|
||||||
|
- ip: ${NODE_IP}
|
||||||
|
nodeName: ${NODE_NAME}
|
||||||
|
ports:
|
||||||
|
- name: api
|
||||||
|
port: 62050
|
||||||
|
protocol: TCP
|
||||||
|
- name: metrics
|
||||||
|
port: 9550
|
||||||
|
protocol: TCP
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
|
||||||
|
|
||||||
|
exporter-start.sh: |
|
||||||
|
#!/bin/sh
|
||||||
|
# Install required tools
|
||||||
|
apk add --no-cache wget curl iproute2-ss bash
|
||||||
|
|
||||||
|
# Download v2ray-exporter
|
||||||
|
echo "Downloading v2ray-exporter..."
|
||||||
|
wget -O /tmp/v2ray-exporter https://github.com/wi1dcard/v2ray-exporter/releases/latest/download/v2ray-exporter_linux_amd64
|
||||||
|
mv /tmp/v2ray-exporter /usr/local/bin/v2ray-exporter
|
||||||
|
chmod +x /usr/local/bin/v2ray-exporter
|
||||||
|
|
||||||
|
# Wait for xray API port file from pasarguard-node container
|
||||||
|
echo "Waiting for xray API port file..."
|
||||||
|
while true; do
|
||||||
|
if [ -f /shared/xray-api-port ]; then
|
||||||
|
API_PORT=$(cat /shared/xray-api-port)
|
||||||
|
if [ -n "$API_PORT" ]; then
|
||||||
|
echo "Got xray API port from shared volume: $API_PORT"
|
||||||
|
|
||||||
|
# Verify the port is working
|
||||||
|
if curl -s -o /dev/null -w "%{http_code}" --max-time 2 "127.0.0.1:$API_PORT" 2>&1 | grep -q "Received HTTP/0.9"; then
|
||||||
|
echo "Verified API port: $API_PORT"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Port verification failed, waiting..."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "Waiting for valid xray API port... retrying in 5 seconds"
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
# Start exporter
|
||||||
|
echo "Starting v2ray-exporter with endpoint 127.0.0.1:$API_PORT"
|
||||||
|
exec /usr/local/bin/v2ray-exporter --v2ray-endpoint "127.0.0.1:$API_PORT" --listen ":9550"
|
||||||
|
|
||||||
|
pasarguard-start.sh: |
|
||||||
|
#!/bin/sh
|
||||||
|
# Read API_KEY from shared volume created by init container
|
||||||
|
if [ -f /shared/api-key ]; then
|
||||||
|
export API_KEY=$(cat /shared/api-key)
|
||||||
|
echo "Loaded API_KEY from shared volume"
|
||||||
|
else
|
||||||
|
echo "WARNING: API_KEY file not found, using default"
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd /app
|
||||||
|
|
||||||
|
# Start main process in background
|
||||||
|
./main &
|
||||||
|
MAIN_PID=$!
|
||||||
|
|
||||||
|
# Wait a bit for xray to start
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
# Find xray API port and save to shared volume
|
||||||
|
echo "Looking for xray API port..."
|
||||||
|
for i in {1..60}; do
|
||||||
|
API_PORT=$(netstat -tlpn | grep xray | grep 127.0.0.1 | awk '{print $4}' | cut -d: -f2 | head -1)
|
||||||
|
if [ -n "$API_PORT" ]; then
|
||||||
|
echo "Found xray API port: $API_PORT"
|
||||||
|
echo -n "$API_PORT" > /shared/xray-api-port
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "Waiting for xray API port... ($i/60)"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# Continue running main process
|
||||||
|
wait $MAIN_PID
|
||||||
@@ -105,204 +105,19 @@ spec:
|
|||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
command:
|
command:
|
||||||
- /bin/bash
|
- /bin/bash
|
||||||
- -c
|
- /scripts/init-uuid.sh
|
||||||
- |
|
|
||||||
set -e
|
|
||||||
echo "Started"
|
|
||||||
# NODE_NAME is already set via environment variable
|
|
||||||
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
|
||||||
|
|
||||||
# Get DNS name from node label xray-node-address
|
|
||||||
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
|
|
||||||
|
|
||||||
if [ -z "${DNS_NAME}" ]; then
|
|
||||||
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Node: ${NODE_NAME}"
|
|
||||||
echo "DNS Name from label: ${DNS_NAME}"
|
|
||||||
|
|
||||||
# Use DNS name for ConfigMap name to ensure uniqueness
|
|
||||||
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
|
|
||||||
|
|
||||||
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
|
|
||||||
|
|
||||||
# Check if ConfigMap exists and get UUID
|
|
||||||
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
|
||||||
echo "ConfigMap exists, reading UUID..."
|
|
||||||
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
|
|
||||||
|
|
||||||
if [ -z "${API_KEY}" ]; then
|
|
||||||
echo "UUID not found in ConfigMap, generating new one..."
|
|
||||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
|
||||||
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
|
|
||||||
else
|
|
||||||
echo "Using existing UUID from ConfigMap"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "ConfigMap does not exist, creating new one..."
|
|
||||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
|
||||||
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
|
|
||||||
--from-literal=API_KEY="${API_KEY}" \
|
|
||||||
--from-literal=NODE_NAME="${NODE_NAME}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Save UUID and node info to shared volume for the main container
|
|
||||||
echo -n "${API_KEY}" > /shared/api-key
|
|
||||||
echo -n "${NODE_NAME}" > /shared/node-name
|
|
||||||
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
|
|
||||||
echo "UUID initialized: ${API_KEY}"
|
|
||||||
echo "Node name: ${NODE_NAME}"
|
|
||||||
echo "ConfigMap: ${CONFIGMAP_NAME}"
|
|
||||||
|
|
||||||
# Create Certificate for this node using DNS name from label
|
|
||||||
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
|
|
||||||
|
|
||||||
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
|
|
||||||
|
|
||||||
# Check if Certificate already exists
|
|
||||||
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
|
||||||
echo "Certificate does not exist, creating..."
|
|
||||||
cat <<EOF | kubectl apply -f -
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: ${CERT_NAME}
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
spec:
|
|
||||||
secretName: ${CERT_NAME}-tls
|
|
||||||
issuerRef:
|
|
||||||
name: letsencrypt
|
|
||||||
kind: ClusterIssuer
|
|
||||||
dnsNames:
|
|
||||||
- ${DNS_NAME}
|
|
||||||
EOF
|
|
||||||
else
|
|
||||||
echo "Certificate already exists"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Wait for certificate to be ready
|
|
||||||
|
|
||||||
echo "Waiting for certificate to be ready..."
|
|
||||||
for i in {1..600}; do
|
|
||||||
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
|
||||||
echo "Certificate secret is ready!"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
echo "Waiting for certificate... ($i/600)"
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
|
||||||
echo "WARNING: Certificate secret not ready after 600 seconds"
|
|
||||||
else
|
|
||||||
# Extract certificate and key from secret to shared volume
|
|
||||||
echo "Extracting certificate and key..."
|
|
||||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
|
|
||||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
|
|
||||||
echo "Certificate and key extracted successfully."
|
|
||||||
cat /shared/tls.crt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create individual Service and Endpoints for this node
|
|
||||||
# Take only first part of node name before first dot
|
|
||||||
NODE_SHORT_NAME="${NODE_NAME%%.*}"
|
|
||||||
SERVICE_NAME="${NODE_SHORT_NAME}"
|
|
||||||
|
|
||||||
# Get node internal IP (take only first IP if multiple)
|
|
||||||
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
|
|
||||||
|
|
||||||
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
|
|
||||||
|
|
||||||
# Create Service without selector
|
|
||||||
cat <<EOF | kubectl apply -f -
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: ${SERVICE_NAME}
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
labels:
|
|
||||||
app: pasarguard-node
|
|
||||||
node: ${NODE_NAME}
|
|
||||||
spec:
|
|
||||||
clusterIP: None
|
|
||||||
ports:
|
|
||||||
- name: api
|
|
||||||
port: 62050
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: 62050
|
|
||||||
- name: metrics
|
|
||||||
port: 9550
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: 9550
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Endpoints
|
|
||||||
metadata:
|
|
||||||
name: ${SERVICE_NAME}
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
labels:
|
|
||||||
app: pasarguard-node
|
|
||||||
node: ${NODE_NAME}
|
|
||||||
subsets:
|
|
||||||
- addresses:
|
|
||||||
- ip: ${NODE_IP}
|
|
||||||
nodeName: ${NODE_NAME}
|
|
||||||
ports:
|
|
||||||
- name: api
|
|
||||||
port: 62050
|
|
||||||
protocol: TCP
|
|
||||||
- name: metrics
|
|
||||||
port: 9550
|
|
||||||
protocol: TCP
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: shared-data
|
- name: shared-data
|
||||||
mountPath: /shared
|
mountPath: /shared
|
||||||
|
- name: scripts
|
||||||
|
mountPath: /scripts
|
||||||
containers:
|
containers:
|
||||||
- name: xray-exporter
|
- name: xray-exporter
|
||||||
image: alpine:3.18
|
image: alpine:3.18
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- /scripts/exporter-start.sh
|
||||||
- |
|
|
||||||
# Install required tools
|
|
||||||
apk add --no-cache wget curl iproute2-ss bash
|
|
||||||
|
|
||||||
# Download v2ray-exporter
|
|
||||||
echo "Downloading v2ray-exporter..."
|
|
||||||
wget -O /tmp/v2ray-exporter https://github.com/wi1dcard/v2ray-exporter/releases/latest/download/v2ray-exporter_linux_amd64
|
|
||||||
mv /tmp/v2ray-exporter /usr/local/bin/v2ray-exporter
|
|
||||||
chmod +x /usr/local/bin/v2ray-exporter
|
|
||||||
|
|
||||||
# Wait for xray API port file from pasarguard-node container
|
|
||||||
echo "Waiting for xray API port file..."
|
|
||||||
while true; do
|
|
||||||
if [ -f /shared/xray-api-port ]; then
|
|
||||||
API_PORT=$(cat /shared/xray-api-port)
|
|
||||||
if [ -n "$API_PORT" ]; then
|
|
||||||
echo "Got xray API port from shared volume: $API_PORT"
|
|
||||||
|
|
||||||
# Verify the port is working
|
|
||||||
if curl -s -o /dev/null -w "%{http_code}" --max-time 2 "127.0.0.1:$API_PORT" 2>&1 | grep -q "Received HTTP/0.9"; then
|
|
||||||
echo "Verified API port: $API_PORT"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo "Port verification failed, waiting..."
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
echo "Waiting for valid xray API port... retrying in 5 seconds"
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
|
|
||||||
# Start exporter
|
|
||||||
echo "Starting v2ray-exporter with endpoint 127.0.0.1:$API_PORT"
|
|
||||||
exec /usr/local/bin/v2ray-exporter --v2ray-endpoint "127.0.0.1:$API_PORT" --listen ":9550"
|
|
||||||
ports:
|
ports:
|
||||||
- name: metrics
|
- name: metrics
|
||||||
containerPort: 9550
|
containerPort: 9550
|
||||||
@@ -334,45 +149,14 @@ spec:
|
|||||||
- name: shared-data
|
- name: shared-data
|
||||||
mountPath: /shared
|
mountPath: /shared
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
- name: scripts
|
||||||
|
mountPath: /scripts
|
||||||
- name: pasarguard-node
|
- name: pasarguard-node
|
||||||
image: 'pasarguard/node:v0.1.1'
|
image: 'pasarguard/node:v0.1.1'
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- /scripts/pasarguard-start.sh
|
||||||
- |
|
|
||||||
# Read API_KEY from shared volume created by init container
|
|
||||||
if [ -f /shared/api-key ]; then
|
|
||||||
export API_KEY=$(cat /shared/api-key)
|
|
||||||
echo "Loaded API_KEY from shared volume"
|
|
||||||
else
|
|
||||||
echo "WARNING: API_KEY file not found, using default"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd /app
|
|
||||||
|
|
||||||
# Start main process in background
|
|
||||||
./main &
|
|
||||||
MAIN_PID=$!
|
|
||||||
|
|
||||||
# Wait a bit for xray to start
|
|
||||||
sleep 10
|
|
||||||
|
|
||||||
# Find xray API port and save to shared volume
|
|
||||||
echo "Looking for xray API port..."
|
|
||||||
for i in {1..60}; do
|
|
||||||
API_PORT=$(ss -tlpn | grep xray | grep 127.0.0.1 | awk '{print $4}' | cut -d: -f2 | head -1)
|
|
||||||
if [ -n "$API_PORT" ]; then
|
|
||||||
echo "Found xray API port: $API_PORT"
|
|
||||||
echo -n "$API_PORT" > /shared/xray-api-port
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
echo "Waiting for xray API port... ($i/60)"
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
# Continue running main process
|
|
||||||
wait $MAIN_PID
|
|
||||||
ports:
|
ports:
|
||||||
- name: api
|
- name: api
|
||||||
containerPort: 62050
|
containerPort: 62050
|
||||||
@@ -425,6 +209,12 @@ spec:
|
|||||||
- name: shared-data
|
- name: shared-data
|
||||||
mountPath: /shared
|
mountPath: /shared
|
||||||
readOnly: false
|
readOnly: false
|
||||||
|
- name: scripts
|
||||||
|
mountPath: /scripts
|
||||||
volumes:
|
volumes:
|
||||||
- name: shared-data
|
- name: shared-data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
- name: scripts
|
||||||
|
configMap:
|
||||||
|
name: pasarguard-scripts
|
||||||
|
defaultMode: 0755
|
||||||
|
|||||||
@@ -7,5 +7,6 @@ resources:
|
|||||||
- ./deployment.yaml
|
- ./deployment.yaml
|
||||||
- ./daemonset.yaml
|
- ./daemonset.yaml
|
||||||
- ./certificate.yaml
|
- ./certificate.yaml
|
||||||
|
- ./configmap-scripts.yaml
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user