Compare commits
49 Commits
auto-updat
...
auto-updat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5cdb11ae39 | ||
|
|
39a27c596f | ||
|
|
353bb877be | ||
|
|
e523bb8bea | ||
|
|
b433373725 | ||
|
|
3026e53746 | ||
|
|
63669c69ff | ||
|
|
fa98e553cd | ||
|
|
055ef8aa77 | ||
|
|
22b359a7ee | ||
| 611e3e31dd | |||
| ddbd53e476 | |||
| f8a9d91932 | |||
| 262fea115d | |||
|
|
1e1a015dc0 | ||
|
|
e76ebdd8c3 | ||
|
|
0c2ce55a41 | ||
|
|
6e9de5addf | ||
|
|
887a9a2306 | ||
|
|
776109d795 | ||
|
|
c998426b44 | ||
|
|
536be6a61f | ||
|
|
713481c726 | ||
| f6411b7b65 | |||
| 3af6d98be8 | |||
| a45af9d4bc | |||
| 76937930ce | |||
| d4ff8d4665 | |||
| e0cf9371ae | |||
| 1126cb25bc | |||
| 44250dc937 | |||
| d9db73e078 | |||
| 71ce9f15ef | |||
| 6b855294af | |||
| 8dd16e24e6 | |||
| 3df95f46a5 | |||
| c0151eb2c9 | |||
| 6d7e365058 | |||
| 0b5361323a | |||
| 56352fef4b | |||
| 7a1f792391 | |||
| defe0cbdf5 | |||
| 7285c62b37 | |||
| 60f8d86fca | |||
| 2387653edd | |||
| 78a639162b | |||
| 90b197bcbe | |||
|
|
156d26aaf9 | ||
| 700b9cf5ff |
@@ -40,6 +40,7 @@ ArgoCD homelab project
|
|||||||
| **greece-notifier** | [](https://ag.hexor.cy/applications/argocd/greece-notifier) |
|
| **greece-notifier** | [](https://ag.hexor.cy/applications/argocd/greece-notifier) |
|
||||||
| **hexound** | [](https://ag.hexor.cy/applications/argocd/hexound) |
|
| **hexound** | [](https://ag.hexor.cy/applications/argocd/hexound) |
|
||||||
| **immich** | [](https://ag.hexor.cy/applications/argocd/immich) |
|
| **immich** | [](https://ag.hexor.cy/applications/argocd/immich) |
|
||||||
|
| **iperf3** | [](https://ag.hexor.cy/applications/argocd/iperf3) |
|
||||||
| **jellyfin** | [](https://ag.hexor.cy/applications/argocd/jellyfin) |
|
| **jellyfin** | [](https://ag.hexor.cy/applications/argocd/jellyfin) |
|
||||||
| **k8s-secrets** | [](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
|
| **k8s-secrets** | [](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
|
||||||
| **khm** | [](https://ag.hexor.cy/applications/argocd/khm) |
|
| **khm** | [](https://ag.hexor.cy/applications/argocd/khm) |
|
||||||
|
|||||||
21
k8s/apps/iperf3/app.yaml
Normal file
21
k8s/apps/iperf3/app.yaml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: iperf3
|
||||||
|
namespace: argocd
|
||||||
|
spec:
|
||||||
|
project: apps
|
||||||
|
destination:
|
||||||
|
namespace: iperf3
|
||||||
|
server: https://kubernetes.default.svc
|
||||||
|
source:
|
||||||
|
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
|
||||||
|
targetRevision: HEAD
|
||||||
|
path: k8s/apps/iperf3
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
selfHeal: true
|
||||||
|
prune: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
||||||
|
|
||||||
92
k8s/apps/iperf3/daemonset.yaml
Normal file
92
k8s/apps/iperf3/daemonset.yaml
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: iperf3-server
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: iperf3-server
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: iperf3-server
|
||||||
|
spec:
|
||||||
|
serviceAccountName: iperf3-server
|
||||||
|
subdomain: iperf3
|
||||||
|
initContainers:
|
||||||
|
- name: create-service
|
||||||
|
image: bitnami/kubectl:latest
|
||||||
|
env:
|
||||||
|
- name: NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: POD_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.podIP
|
||||||
|
command:
|
||||||
|
- /bin/bash
|
||||||
|
- -c
|
||||||
|
- |
|
||||||
|
# Clean node name for service name
|
||||||
|
NODE_CLEAN=$(echo "$NODE_NAME" | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]' | tr '_' '-')
|
||||||
|
SERVICE_NAME="iperf3-${NODE_CLEAN}"
|
||||||
|
|
||||||
|
# Create service for this pod
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: ${SERVICE_NAME}
|
||||||
|
namespace: iperf3
|
||||||
|
labels:
|
||||||
|
app: iperf3-node-service
|
||||||
|
target-node: "${NODE_NAME}"
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
ports:
|
||||||
|
- name: iperf3
|
||||||
|
port: 5201
|
||||||
|
protocol: TCP
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Endpoints
|
||||||
|
metadata:
|
||||||
|
name: ${SERVICE_NAME}
|
||||||
|
namespace: iperf3
|
||||||
|
labels:
|
||||||
|
app: iperf3-node-service
|
||||||
|
target-node: "${NODE_NAME}"
|
||||||
|
subsets:
|
||||||
|
- addresses:
|
||||||
|
- ip: ${POD_IP}
|
||||||
|
ports:
|
||||||
|
- name: iperf3
|
||||||
|
port: 5201
|
||||||
|
protocol: TCP
|
||||||
|
EOF
|
||||||
|
containers:
|
||||||
|
- name: iperf3-server
|
||||||
|
image: networkstatic/iperf3:latest
|
||||||
|
args: ["-s"]
|
||||||
|
ports:
|
||||||
|
- containerPort: 5201
|
||||||
|
protocol: TCP
|
||||||
|
env:
|
||||||
|
- name: NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "100m"
|
||||||
|
limits:
|
||||||
|
memory: "256Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
|
- effect: NoExecute
|
||||||
|
operator: Exists
|
||||||
40
k8s/apps/iperf3/iperf3-exporter-daemonset.yaml
Normal file
40
k8s/apps/iperf3/iperf3-exporter-daemonset.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: iperf3-exporter
|
||||||
|
labels:
|
||||||
|
app: iperf3-exporter
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: iperf3-exporter
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: iperf3-exporter
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: iperf3-exporter
|
||||||
|
image: ghcr.io/edgard/iperf3_exporter:1.2.2
|
||||||
|
ports:
|
||||||
|
- containerPort: 9579
|
||||||
|
name: metrics
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "50m"
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "200m"
|
||||||
|
env:
|
||||||
|
- name: NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
|
- effect: NoExecute
|
||||||
|
operator: Exists
|
||||||
15
k8s/apps/iperf3/iperf3-exporter-service.yaml
Normal file
15
k8s/apps/iperf3/iperf3-exporter-service.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: iperf3-exporter
|
||||||
|
labels:
|
||||||
|
app: iperf3-exporter
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: iperf3-exporter
|
||||||
|
ports:
|
||||||
|
- name: metrics
|
||||||
|
protocol: TCP
|
||||||
|
port: 9579
|
||||||
|
targetPort: 9579
|
||||||
11
k8s/apps/iperf3/kustomization.yaml
Normal file
11
k8s/apps/iperf3/kustomization.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- rbac.yaml
|
||||||
|
- daemonset.yaml
|
||||||
|
- service-headless.yaml
|
||||||
|
- iperf3-exporter-daemonset.yaml
|
||||||
|
- iperf3-exporter-service.yaml
|
||||||
|
- servicemonitor.yaml
|
||||||
|
|
||||||
36
k8s/apps/iperf3/rbac.yaml
Normal file
36
k8s/apps/iperf3/rbac.yaml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: iperf3-server
|
||||||
|
namespace: iperf3
|
||||||
|
labels:
|
||||||
|
app: iperf3-server
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: iperf3-service-manager
|
||||||
|
namespace: iperf3
|
||||||
|
labels:
|
||||||
|
app: iperf3-server
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["services", "endpoints"]
|
||||||
|
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: iperf3-service-manager
|
||||||
|
namespace: iperf3
|
||||||
|
labels:
|
||||||
|
app: iperf3-server
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: iperf3-service-manager
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: iperf3-server
|
||||||
|
namespace: iperf3
|
||||||
14
k8s/apps/iperf3/service-headless.yaml
Normal file
14
k8s/apps/iperf3/service-headless.yaml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: iperf3
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
selector:
|
||||||
|
app: iperf3-server
|
||||||
|
ports:
|
||||||
|
- name: iperf3
|
||||||
|
protocol: TCP
|
||||||
|
port: 5201
|
||||||
|
targetPort: 5201
|
||||||
36
k8s/apps/iperf3/servicemonitor.yaml
Normal file
36
k8s/apps/iperf3/servicemonitor.yaml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
name: iperf3-exporter
|
||||||
|
labels:
|
||||||
|
app: iperf3-exporter
|
||||||
|
release: prometheus
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: iperf3-exporter
|
||||||
|
endpoints:
|
||||||
|
- port: metrics
|
||||||
|
path: /probe
|
||||||
|
interval: 5m
|
||||||
|
scrapeTimeout: 30s
|
||||||
|
params:
|
||||||
|
target:
|
||||||
|
- "iperf3.iperf3.svc.cluster.local:5201"
|
||||||
|
duration:
|
||||||
|
- "10"
|
||||||
|
streams:
|
||||||
|
- "4"
|
||||||
|
relabelings:
|
||||||
|
- sourceLabels: [__param_target]
|
||||||
|
targetLabel: instance
|
||||||
|
- sourceLabels: [__param_target]
|
||||||
|
targetLabel: __param_target
|
||||||
|
- targetLabel: __address__
|
||||||
|
replacement: iperf3-exporter.iperf3.svc.cluster.local:9579
|
||||||
|
metricRelabelings:
|
||||||
|
- sourceLabels: [__name__]
|
||||||
|
regex: iperf3_(.+)
|
||||||
|
targetLabel: __name__
|
||||||
|
replacement: network_${1}
|
||||||
33
k8s/apps/ollama/external-secrets.yaml
Normal file
33
k8s/apps/ollama/external-secrets.yaml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
apiVersion: external-secrets.io/v1beta1
|
||||||
|
kind: ExternalSecret
|
||||||
|
metadata:
|
||||||
|
name: oidc-secret
|
||||||
|
spec:
|
||||||
|
target:
|
||||||
|
name: oidc-secret
|
||||||
|
deletionPolicy: Delete
|
||||||
|
template:
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
OAUTH_CLIENT_SECRET: |-
|
||||||
|
{{ .OAUTH_CLIENT_SECRET }}
|
||||||
|
OAUTH_CLIENT_ID: |-
|
||||||
|
{{ .OAUTH_CLIENT_ID }}
|
||||||
|
data:
|
||||||
|
- secretKey: OAUTH_CLIENT_SECRET
|
||||||
|
sourceRef:
|
||||||
|
storeRef:
|
||||||
|
name: vaultwarden-login
|
||||||
|
kind: ClusterSecretStore
|
||||||
|
remoteRef:
|
||||||
|
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
|
||||||
|
property: fields[0].value
|
||||||
|
- secretKey: OAUTH_CLIENT_ID
|
||||||
|
sourceRef:
|
||||||
|
storeRef:
|
||||||
|
name: vaultwarden-login
|
||||||
|
kind: ClusterSecretStore
|
||||||
|
remoteRef:
|
||||||
|
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
|
||||||
|
property: fields[1].value
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- external-secrets.yaml
|
||||||
|
|
||||||
helmCharts:
|
helmCharts:
|
||||||
- name: ollama
|
- name: ollama
|
||||||
repo: https://otwld.github.io/ollama-helm/
|
repo: https://otwld.github.io/ollama-helm/
|
||||||
|
|||||||
52
k8s/apps/ollama/openweb-ui-values.yaml
Normal file
52
k8s/apps/ollama/openweb-ui-values.yaml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
clusterDomain: ai.hexor.cy
|
||||||
|
|
||||||
|
extraEnvVars:
|
||||||
|
GLOBAL_LOG_LEVEL: debug
|
||||||
|
OAUTH_PROVIDER_NAME: authentik
|
||||||
|
OPENID_PROVIDER_URL: https://idm.hexor.cy/application/o/openwebui/.well-known/openid-configuration
|
||||||
|
OPENID_REDIRECT_URI: https://ai.hexor.cy/oauth/oidc/callback
|
||||||
|
WEBUI_URL: https://ai.hexor.cy
|
||||||
|
# Allows auto-creation of new users using OAuth. Must be paired with ENABLE_LOGIN_FORM=false.
|
||||||
|
ENABLE_OAUTH_SIGNUP: true
|
||||||
|
# Disables user/password login form. Required when ENABLE_OAUTH_SIGNUP=true.
|
||||||
|
ENABLE_LOGIN_FORM: false
|
||||||
|
OAUTH_MERGE_ACCOUNTS_BY_EMAIL: true
|
||||||
|
|
||||||
|
extraEnvFrom:
|
||||||
|
- secretRef:
|
||||||
|
name: oidc-secret
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||||
|
ollamaUrls:
|
||||||
|
- http://ollama.ollama.svc:11434
|
||||||
|
ollama:
|
||||||
|
enabled: false
|
||||||
|
ollama:
|
||||||
|
gpu:
|
||||||
|
enabled: false
|
||||||
|
models:
|
||||||
|
pull:
|
||||||
|
- qwen3-vl:8b
|
||||||
|
run:
|
||||||
|
- qwen3-vl:8b
|
||||||
|
|
||||||
|
pipelines:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
tika:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
websocket:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
class: traefik
|
||||||
|
annotations:
|
||||||
|
cert-manager.io/cluster-issuer: letsencrypt
|
||||||
|
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
|
||||||
|
host: "ai.hexor.cy"
|
||||||
|
tls:
|
||||||
|
- hosts:
|
||||||
|
- '*.hexor.cy'
|
||||||
|
secretName: ollama-tls
|
||||||
264
k8s/apps/pasarguard/configmap-scripts.yaml
Normal file
264
k8s/apps/pasarguard/configmap-scripts.yaml
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: pasarguard-scripts
|
||||||
|
labels:
|
||||||
|
app: pasarguard-node
|
||||||
|
data:
|
||||||
|
init-uuid.sh: |
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
echo "Started"
|
||||||
|
# NODE_NAME is already set via environment variable
|
||||||
|
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
||||||
|
|
||||||
|
# Get DNS name from node label xray-node-address
|
||||||
|
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
|
||||||
|
|
||||||
|
if [ -z "${DNS_NAME}" ]; then
|
||||||
|
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Node: ${NODE_NAME}"
|
||||||
|
echo "DNS Name from label: ${DNS_NAME}"
|
||||||
|
|
||||||
|
# Use DNS name for ConfigMap name to ensure uniqueness
|
||||||
|
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
|
||||||
|
|
||||||
|
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
|
||||||
|
|
||||||
|
# Check if ConfigMap exists and get UUID
|
||||||
|
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||||
|
echo "ConfigMap exists, reading UUID..."
|
||||||
|
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
|
||||||
|
|
||||||
|
if [ -z "${API_KEY}" ]; then
|
||||||
|
echo "UUID not found in ConfigMap, generating new one..."
|
||||||
|
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||||
|
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
|
||||||
|
else
|
||||||
|
echo "Using existing UUID from ConfigMap"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "ConfigMap does not exist, creating new one..."
|
||||||
|
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||||
|
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
|
||||||
|
--from-literal=API_KEY="${API_KEY}" \
|
||||||
|
--from-literal=NODE_NAME="${NODE_NAME}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Save UUID and node info to shared volume for the main container
|
||||||
|
echo -n "${API_KEY}" > /shared/api-key
|
||||||
|
echo -n "${NODE_NAME}" > /shared/node-name
|
||||||
|
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
|
||||||
|
echo "UUID initialized: ${API_KEY}"
|
||||||
|
echo "Node name: ${NODE_NAME}"
|
||||||
|
echo "ConfigMap: ${CONFIGMAP_NAME}"
|
||||||
|
|
||||||
|
# Create Certificate for this node using DNS name from label
|
||||||
|
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
|
||||||
|
|
||||||
|
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
|
||||||
|
|
||||||
|
# Check if Certificate already exists
|
||||||
|
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||||
|
echo "Certificate does not exist, creating..."
|
||||||
|
cat <<EOF | kubectl apply -f -
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: Certificate
|
||||||
|
metadata:
|
||||||
|
name: ${CERT_NAME}
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
spec:
|
||||||
|
secretName: ${CERT_NAME}-tls
|
||||||
|
issuerRef:
|
||||||
|
name: letsencrypt
|
||||||
|
kind: ClusterIssuer
|
||||||
|
dnsNames:
|
||||||
|
- ${DNS_NAME}
|
||||||
|
EOF
|
||||||
|
else
|
||||||
|
echo "Certificate already exists"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wait for certificate to be ready
|
||||||
|
|
||||||
|
echo "Waiting for certificate to be ready..."
|
||||||
|
for i in {1..600}; do
|
||||||
|
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||||
|
echo "Certificate secret is ready!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "Waiting for certificate... ($i/600)"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||||
|
echo "WARNING: Certificate secret not ready after 600 seconds"
|
||||||
|
else
|
||||||
|
# Extract certificate and key from secret to shared volume
|
||||||
|
echo "Extracting certificate and key..."
|
||||||
|
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
|
||||||
|
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
|
||||||
|
echo "Certificate and key extracted successfully."
|
||||||
|
cat /shared/tls.crt
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create individual Service and Endpoints for this node
|
||||||
|
# Take only first part of node name before first dot
|
||||||
|
NODE_SHORT_NAME="${NODE_NAME%%.*}"
|
||||||
|
SERVICE_NAME="${NODE_SHORT_NAME}"
|
||||||
|
|
||||||
|
# Get node internal IP (take only first IP if multiple)
|
||||||
|
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
|
||||||
|
|
||||||
|
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
|
||||||
|
|
||||||
|
# Create Service without selector
|
||||||
|
cat <<EOF | kubectl apply -f -
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: ${SERVICE_NAME}
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
labels:
|
||||||
|
app: pasarguard-node
|
||||||
|
node: ${NODE_NAME}
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
ports:
|
||||||
|
- name: api
|
||||||
|
port: 62050
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 62050
|
||||||
|
- name: metrics
|
||||||
|
port: 9550
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 9550
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Endpoints
|
||||||
|
metadata:
|
||||||
|
name: ${SERVICE_NAME}
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
labels:
|
||||||
|
app: pasarguard-node
|
||||||
|
node: ${NODE_NAME}
|
||||||
|
subsets:
|
||||||
|
- addresses:
|
||||||
|
- ip: ${NODE_IP}
|
||||||
|
nodeName: ${NODE_NAME}
|
||||||
|
ports:
|
||||||
|
- name: api
|
||||||
|
port: 62050
|
||||||
|
protocol: TCP
|
||||||
|
- name: metrics
|
||||||
|
port: 9550
|
||||||
|
protocol: TCP
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
|
||||||
|
|
||||||
|
exporter-start.sh: |
|
||||||
|
#!/bin/sh
|
||||||
|
# Install required tools
|
||||||
|
apk add --no-cache wget curl iproute2-ss bash
|
||||||
|
|
||||||
|
# Download v2ray-exporter
|
||||||
|
echo "Downloading v2ray-exporter..."
|
||||||
|
ARCH=$(uname -m)
|
||||||
|
case $ARCH in
|
||||||
|
x86_64)
|
||||||
|
BINARY_ARCH="amd64"
|
||||||
|
;;
|
||||||
|
aarch64|arm64)
|
||||||
|
BINARY_ARCH="arm64"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unsupported architecture: $ARCH"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "Detected architecture: $ARCH, using binary: v2ray-exporter_linux_$BINARY_ARCH"
|
||||||
|
wget -L -O /tmp/v2ray-exporter "https://github.com/wi1dcard/v2ray-exporter/releases/download/v0.6.0/v2ray-exporter_linux_$BINARY_ARCH"
|
||||||
|
mv /tmp/v2ray-exporter /usr/local/bin/v2ray-exporter
|
||||||
|
chmod +x /usr/local/bin/v2ray-exporter
|
||||||
|
|
||||||
|
# Wait for initial API port file
|
||||||
|
echo "Waiting for initial xray API port file..."
|
||||||
|
while [ ! -f /shared/xray-api-port ]; do
|
||||||
|
echo "Waiting for API port file..."
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
|
||||||
|
# Main loop - restart exporter if it crashes or port changes
|
||||||
|
while true; do
|
||||||
|
if [ -f /shared/xray-api-port ]; then
|
||||||
|
API_PORT=$(cat /shared/xray-api-port)
|
||||||
|
if [ -n "$API_PORT" ]; then
|
||||||
|
echo "Starting v2ray-exporter with endpoint 127.0.0.1:$API_PORT"
|
||||||
|
/usr/local/bin/v2ray-exporter --v2ray-endpoint "127.0.0.1:$API_PORT" --listen ":9550" &
|
||||||
|
EXPORTER_PID=$!
|
||||||
|
|
||||||
|
# Wait for exporter to exit or port file to change
|
||||||
|
while kill -0 $EXPORTER_PID 2>/dev/null; do
|
||||||
|
if [ -f /shared/xray-api-port ]; then
|
||||||
|
NEW_PORT=$(cat /shared/xray-api-port)
|
||||||
|
if [ "$NEW_PORT" != "$API_PORT" ]; then
|
||||||
|
echo "API port changed from $API_PORT to $NEW_PORT, restarting exporter"
|
||||||
|
kill $EXPORTER_PID 2>/dev/null
|
||||||
|
wait $EXPORTER_PID 2>/dev/null
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Exporter stopped, restarting..."
|
||||||
|
wait $EXPORTER_PID 2>/dev/null
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
|
||||||
|
pasarguard-start.sh: |
|
||||||
|
#!/bin/sh
|
||||||
|
# Read API_KEY from shared volume created by init container
|
||||||
|
if [ -f /shared/api-key ]; then
|
||||||
|
export API_KEY=$(cat /shared/api-key)
|
||||||
|
echo "Loaded API_KEY from shared volume"
|
||||||
|
else
|
||||||
|
echo "WARNING: API_KEY file not found, using default"
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd /app
|
||||||
|
|
||||||
|
# Start main process in background
|
||||||
|
./main &
|
||||||
|
MAIN_PID=$!
|
||||||
|
|
||||||
|
# Start continuous port monitoring in background
|
||||||
|
{
|
||||||
|
sleep 10 # Wait for xray to start initially
|
||||||
|
LAST_PORT=""
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
API_PORT=$(netstat -tlpn | grep xray | grep 127.0.0.1 | awk '{print $4}' | cut -d: -f2 | head -1)
|
||||||
|
if [ -n "$API_PORT" ] && [ "$API_PORT" != "$LAST_PORT" ]; then
|
||||||
|
echo "Found xray API port: $API_PORT"
|
||||||
|
echo -n "$API_PORT" > /shared/xray-api-port
|
||||||
|
LAST_PORT="$API_PORT"
|
||||||
|
fi
|
||||||
|
sleep 5 # Check every 5 seconds
|
||||||
|
done
|
||||||
|
} &
|
||||||
|
PORT_MONITOR_PID=$!
|
||||||
|
|
||||||
|
# Wait for main process to finish
|
||||||
|
wait $MAIN_PID
|
||||||
|
|
||||||
|
# Clean up port monitor
|
||||||
|
kill $PORT_MONITOR_PID 2>/dev/null
|
||||||
@@ -105,174 +105,58 @@ spec:
|
|||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
command:
|
command:
|
||||||
- /bin/bash
|
- /bin/bash
|
||||||
- -c
|
- /scripts/init-uuid.sh
|
||||||
- |
|
|
||||||
set -e
|
|
||||||
echo "Started"
|
|
||||||
# NODE_NAME is already set via environment variable
|
|
||||||
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
|
||||||
|
|
||||||
# Get DNS name from node label xray-node-address
|
|
||||||
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
|
|
||||||
|
|
||||||
if [ -z "${DNS_NAME}" ]; then
|
|
||||||
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Node: ${NODE_NAME}"
|
|
||||||
echo "DNS Name from label: ${DNS_NAME}"
|
|
||||||
|
|
||||||
# Use DNS name for ConfigMap name to ensure uniqueness
|
|
||||||
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
|
|
||||||
|
|
||||||
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
|
|
||||||
|
|
||||||
# Check if ConfigMap exists and get UUID
|
|
||||||
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
|
||||||
echo "ConfigMap exists, reading UUID..."
|
|
||||||
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
|
|
||||||
|
|
||||||
if [ -z "${API_KEY}" ]; then
|
|
||||||
echo "UUID not found in ConfigMap, generating new one..."
|
|
||||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
|
||||||
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
|
|
||||||
else
|
|
||||||
echo "Using existing UUID from ConfigMap"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "ConfigMap does not exist, creating new one..."
|
|
||||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
|
||||||
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
|
|
||||||
--from-literal=API_KEY="${API_KEY}" \
|
|
||||||
--from-literal=NODE_NAME="${NODE_NAME}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Save UUID and node info to shared volume for the main container
|
|
||||||
echo -n "${API_KEY}" > /shared/api-key
|
|
||||||
echo -n "${NODE_NAME}" > /shared/node-name
|
|
||||||
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
|
|
||||||
echo "UUID initialized: ${API_KEY}"
|
|
||||||
echo "Node name: ${NODE_NAME}"
|
|
||||||
echo "ConfigMap: ${CONFIGMAP_NAME}"
|
|
||||||
|
|
||||||
# Create Certificate for this node using DNS name from label
|
|
||||||
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
|
|
||||||
|
|
||||||
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
|
|
||||||
|
|
||||||
# Check if Certificate already exists
|
|
||||||
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
|
||||||
echo "Certificate does not exist, creating..."
|
|
||||||
cat <<EOF | kubectl apply -f -
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: ${CERT_NAME}
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
spec:
|
|
||||||
secretName: ${CERT_NAME}-tls
|
|
||||||
issuerRef:
|
|
||||||
name: letsencrypt
|
|
||||||
kind: ClusterIssuer
|
|
||||||
dnsNames:
|
|
||||||
- ${DNS_NAME}
|
|
||||||
EOF
|
|
||||||
else
|
|
||||||
echo "Certificate already exists"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Wait for certificate to be ready
|
|
||||||
|
|
||||||
echo "Waiting for certificate to be ready..."
|
|
||||||
for i in {1..600}; do
|
|
||||||
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
|
||||||
echo "Certificate secret is ready!"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
echo "Waiting for certificate... ($i/600)"
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
|
||||||
echo "WARNING: Certificate secret not ready after 600 seconds"
|
|
||||||
else
|
|
||||||
# Extract certificate and key from secret to shared volume
|
|
||||||
echo "Extracting certificate and key..."
|
|
||||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
|
|
||||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
|
|
||||||
echo "Certificate and key extracted successfully."
|
|
||||||
cat /shared/tls.crt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create individual Service and Endpoints for this node
|
|
||||||
# Take only first part of node name before first dot
|
|
||||||
NODE_SHORT_NAME="${NODE_NAME%%.*}"
|
|
||||||
SERVICE_NAME="${NODE_SHORT_NAME}"
|
|
||||||
|
|
||||||
# Get node internal IP (take only first IP if multiple)
|
|
||||||
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
|
|
||||||
|
|
||||||
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
|
|
||||||
|
|
||||||
# Create Service without selector
|
|
||||||
cat <<EOF | kubectl apply -f -
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: ${SERVICE_NAME}
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
labels:
|
|
||||||
app: pasarguard-node
|
|
||||||
node: ${NODE_NAME}
|
|
||||||
spec:
|
|
||||||
clusterIP: None
|
|
||||||
ports:
|
|
||||||
- name: api
|
|
||||||
port: 62050
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: 62050
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Endpoints
|
|
||||||
metadata:
|
|
||||||
name: ${SERVICE_NAME}
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
labels:
|
|
||||||
app: pasarguard-node
|
|
||||||
node: ${NODE_NAME}
|
|
||||||
subsets:
|
|
||||||
- addresses:
|
|
||||||
- ip: ${NODE_IP}
|
|
||||||
nodeName: ${NODE_NAME}
|
|
||||||
ports:
|
|
||||||
- name: api
|
|
||||||
port: 62050
|
|
||||||
protocol: TCP
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: shared-data
|
- name: shared-data
|
||||||
mountPath: /shared
|
mountPath: /shared
|
||||||
|
- name: scripts
|
||||||
|
mountPath: /scripts
|
||||||
containers:
|
containers:
|
||||||
|
- name: xray-exporter
|
||||||
|
image: alpine:3.18
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- /scripts/exporter-start.sh
|
||||||
|
ports:
|
||||||
|
- name: metrics
|
||||||
|
containerPort: 9550
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /scrape
|
||||||
|
port: metrics
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
periodSeconds: 30
|
||||||
|
timeoutSeconds: 10
|
||||||
|
failureThreshold: 3
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /scrape
|
||||||
|
port: metrics
|
||||||
|
initialDelaySeconds: 45
|
||||||
|
periodSeconds: 10
|
||||||
|
timeoutSeconds: 5
|
||||||
|
failureThreshold: 3
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "50m"
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "100m"
|
||||||
|
volumeMounts:
|
||||||
|
- name: shared-data
|
||||||
|
mountPath: /shared
|
||||||
|
readOnly: true
|
||||||
|
- name: scripts
|
||||||
|
mountPath: /scripts
|
||||||
- name: pasarguard-node
|
- name: pasarguard-node
|
||||||
image: 'pasarguard/node:v0.1.1'
|
image: 'pasarguard/node:v0.1.1'
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- /scripts/pasarguard-start.sh
|
||||||
- |
|
|
||||||
# Read API_KEY from shared volume created by init container
|
|
||||||
if [ -f /shared/api-key ]; then
|
|
||||||
export API_KEY=$(cat /shared/api-key)
|
|
||||||
echo "Loaded API_KEY from shared volume"
|
|
||||||
else
|
|
||||||
echo "WARNING: API_KEY file not found, using default"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd /app
|
|
||||||
exec ./main
|
|
||||||
ports:
|
ports:
|
||||||
- name: api
|
- name: api
|
||||||
containerPort: 62050
|
containerPort: 62050
|
||||||
@@ -324,7 +208,13 @@ spec:
|
|||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: shared-data
|
- name: shared-data
|
||||||
mountPath: /shared
|
mountPath: /shared
|
||||||
readOnly: true
|
readOnly: false
|
||||||
|
- name: scripts
|
||||||
|
mountPath: /scripts
|
||||||
volumes:
|
volumes:
|
||||||
- name: shared-data
|
- name: shared-data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
- name: scripts
|
||||||
|
configMap:
|
||||||
|
name: pasarguard-scripts
|
||||||
|
defaultMode: 0755
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ spec:
|
|||||||
mountPath: /templates/subscription
|
mountPath: /templates/subscription
|
||||||
containers:
|
containers:
|
||||||
- name: pasarguard-web
|
- name: pasarguard-web
|
||||||
image: 'pasarguard/panel:v1.4.1'
|
image: 'pasarguard/panel:v1.7.2'
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
envFrom:
|
envFrom:
|
||||||
- secretRef:
|
- secretRef:
|
||||||
|
|||||||
@@ -7,5 +7,5 @@ resources:
|
|||||||
- ./deployment.yaml
|
- ./deployment.yaml
|
||||||
- ./daemonset.yaml
|
- ./daemonset.yaml
|
||||||
- ./certificate.yaml
|
- ./certificate.yaml
|
||||||
|
- ./configmap-scripts.yaml
|
||||||
|
- ./servicemonitor.yaml
|
||||||
|
|||||||
21
k8s/apps/pasarguard/servicemonitor.yaml
Normal file
21
k8s/apps/pasarguard/servicemonitor.yaml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
name: pasarguard-node-metrics
|
||||||
|
labels:
|
||||||
|
app: pasarguard-node
|
||||||
|
release: prometheus
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: pasarguard-node
|
||||||
|
endpoints:
|
||||||
|
- port: metrics
|
||||||
|
path: /scrape
|
||||||
|
interval: 30s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
honorLabels: true
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- pasarguard
|
||||||
@@ -10,7 +10,7 @@ resources:
|
|||||||
helmCharts:
|
helmCharts:
|
||||||
- name: authentik
|
- name: authentik
|
||||||
repo: https://charts.goauthentik.io
|
repo: https://charts.goauthentik.io
|
||||||
version: 2025.8.1
|
version: 2025.10.1
|
||||||
releaseName: authentik
|
releaseName: authentik
|
||||||
namespace: authentik
|
namespace: authentik
|
||||||
valuesFile: values.yaml
|
valuesFile: values.yaml
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
global:
|
global:
|
||||||
image:
|
image:
|
||||||
tag: "2025.8.1"
|
tag: "2025.10.1"
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||||
|
|
||||||
@@ -47,7 +47,6 @@ server:
|
|||||||
- minecraft.hexor.cy # Minecraft UI and server
|
- minecraft.hexor.cy # Minecraft UI and server
|
||||||
- pass.hexor.cy # k8s-secret for openai
|
- pass.hexor.cy # k8s-secret for openai
|
||||||
- ps.hexor.cy # pasarguard UI
|
- ps.hexor.cy # pasarguard UI
|
||||||
- ai.hexor.cy # ollama API
|
|
||||||
tls:
|
tls:
|
||||||
- secretName: idm-tls
|
- secretName: idm-tls
|
||||||
hosts:
|
hosts:
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
# BW_HOST: base64(url)
|
# BW_HOST: base64(url)
|
||||||
# BW_USERNAME: base64(name)
|
# BW_USERNAME: base64(name)
|
||||||
# BW_PASSWORD: base64(pass)
|
# BW_PASSWORD: base64(pass)
|
||||||
# 81212111-6350-4069-8bcf-19a67d3964a5
|
# Vaultwarden bot - 81212111-6350-4069-8bcf-19a67d3964a5
|
||||||
---
|
---
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
|
|||||||
@@ -4,16 +4,16 @@ prometheus:
|
|||||||
prometheusSpec:
|
prometheusSpec:
|
||||||
enableRemoteWriteReceiver: true
|
enableRemoteWriteReceiver: true
|
||||||
additionalScrapeConfigs:
|
additionalScrapeConfigs:
|
||||||
- job_name: outline_vpn
|
- job_name: xray_vpn
|
||||||
|
metrics_path: /scrape
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['100.117.24.104:9095']
|
- targets: ['cy.tail2fe2d.ts.net:9550']
|
||||||
labels: {instance: cy}
|
labels: {job: cy}
|
||||||
- targets: ['100.117.24.104:9096']
|
- targets: ['x86.tail2fe2d.ts.net:9550']
|
||||||
labels: {instance: am}
|
labels: {job: am}
|
||||||
- targets: ['100.117.24.104:9097']
|
- targets: ['jp.tail2fe2d.ts.net:9550']
|
||||||
labels: {instance: jp}
|
labels: {job: jp}
|
||||||
- targets: ['100.117.24.104:9098']
|
|
||||||
labels: {instance: bg}
|
|
||||||
- job_name: cs_16_server
|
- job_name: cs_16_server
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['prom-a2s-exporter.counter-strike.svc:9841']
|
- targets: ['prom-a2s-exporter.counter-strike.svc:9841']
|
||||||
|
|||||||
@@ -167,5 +167,26 @@ oauth_applications = {
|
|||||||
create_group = true
|
create_group = true
|
||||||
signing_key = "1b1b5bec-034a-4d96-871a-133f11322360"
|
signing_key = "1b1b5bec-034a-4d96-871a-133f11322360"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
"openwebui" = {
|
||||||
|
name = "OpenWeb UI"
|
||||||
|
slug = "openwebui"
|
||||||
|
group = "Tools"
|
||||||
|
meta_description = "OpenWeb UI"
|
||||||
|
meta_icon = "https://ollama.com/public/ollama.png"
|
||||||
|
redirect_uris = [
|
||||||
|
"https://ai.hexor.cy/oauth/oidc/callback",
|
||||||
|
]
|
||||||
|
meta_launch_url = "https://ai.hexor.cy"
|
||||||
|
client_type = "confidential"
|
||||||
|
include_claims_in_id_token = true
|
||||||
|
access_code_validity = "minutes=1"
|
||||||
|
access_token_validity = "minutes=5"
|
||||||
|
refresh_token_validity = "days=30"
|
||||||
|
scope_mappings = ["openid", "profile", "email"]
|
||||||
|
access_groups = ["admins"]
|
||||||
|
create_group = true
|
||||||
|
signing_key = "1b1b5bec-034a-4d96-871a-133f11322360"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -206,19 +206,5 @@ EOT
|
|||||||
create_group = true
|
create_group = true
|
||||||
access_groups = ["admins"]
|
access_groups = ["admins"]
|
||||||
}
|
}
|
||||||
"ollama" = {
|
|
||||||
name = "Ollama API"
|
|
||||||
slug = "ollama"
|
|
||||||
group = "Tools"
|
|
||||||
external_host = "https://ai.hexor.cy"
|
|
||||||
internal_host = "http://ollama.ollama.svc:11434"
|
|
||||||
internal_host_ssl_validation = false
|
|
||||||
meta_description = "Ollama API"
|
|
||||||
meta_icon = "https://ollama.com/public/ollama.png"
|
|
||||||
mode = "proxy"
|
|
||||||
outpost = "kubernetes-outpost"
|
|
||||||
create_group = true
|
|
||||||
access_groups = ["admins"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user