Compare commits

..

1 Commits

Author SHA1 Message Date
Gitea Actions Bot
c642ac7b51 Auto-update README with current k8s applications
All checks were successful
Terraform / Terraform (pull_request) Successful in 38s
Generated by CI/CD workflow on 2025-11-07 14:11:15

This PR updates the README.md file with the current list of applications found in the k8s/ directory structure.
2025-11-07 14:11:15 +00:00
114 changed files with 300 additions and 3113 deletions

View File

@@ -30,29 +30,21 @@ jobs:
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
- name: Terraform Init
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform init
working-directory: ./terraform/authentik
- name: Terraform Format
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform fmt -check
continue-on-error: true
working-directory: ./terraform/authentik
- name: Terraform Apply
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform apply -var-file proxy-apps.tfvars -var-file oauth2-apps.tfvars -var-file terraform.tfvars -var-file groups.tfvars -input=false -auto-approve -parallelism=100
working-directory: ./terraform/authentik
- name: Generate Wiki Content
if: success()
continue-on-error: true
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: |
echo "📋 Starting Wiki generation..."
cd ./terraform/authentik

View File

@@ -22,13 +22,12 @@ jobs:
- name: Install Python dependencies
run: |
python3 -m venv .venv
.venv/bin/pip install pyyaml
pip install pyyaml
- name: Generate K8s Services Wiki
run: |
echo "📋 Starting K8s wiki generation..."
.venv/bin/python .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
python3 .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
if [ -f "Kubernetes-Services.md" ]; then
echo "✅ Wiki content generated successfully"

View File

@@ -40,7 +40,6 @@ ArgoCD homelab project
| **greece-notifier** | [![greece-notifier](https://ag.hexor.cy/api/badge?name=greece-notifier&revision=true)](https://ag.hexor.cy/applications/argocd/greece-notifier) |
| **hexound** | [![hexound](https://ag.hexor.cy/api/badge?name=hexound&revision=true)](https://ag.hexor.cy/applications/argocd/hexound) |
| **immich** | [![immich](https://ag.hexor.cy/api/badge?name=immich&revision=true)](https://ag.hexor.cy/applications/argocd/immich) |
| **iperf3** | [![iperf3](https://ag.hexor.cy/api/badge?name=iperf3&revision=true)](https://ag.hexor.cy/applications/argocd/iperf3) |
| **jellyfin** | [![jellyfin](https://ag.hexor.cy/api/badge?name=jellyfin&revision=true)](https://ag.hexor.cy/applications/argocd/jellyfin) |
| **k8s-secrets** | [![k8s-secrets](https://ag.hexor.cy/api/badge?name=k8s-secrets&revision=true)](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
| **khm** | [![khm](https://ag.hexor.cy/api/badge?name=khm&revision=true)](https://ag.hexor.cy/applications/argocd/khm) |
@@ -48,7 +47,6 @@ ArgoCD homelab project
| **paperless** | [![paperless](https://ag.hexor.cy/api/badge?name=paperless&revision=true)](https://ag.hexor.cy/applications/argocd/paperless) |
| **pasarguard** | [![pasarguard](https://ag.hexor.cy/api/badge?name=pasarguard&revision=true)](https://ag.hexor.cy/applications/argocd/pasarguard) |
| **qbittorent-nas** | [![qbittorent-nas](https://ag.hexor.cy/api/badge?name=qbittorent-nas&revision=true)](https://ag.hexor.cy/applications/argocd/qbittorent-nas) |
| **remnawave** | [![remnawave](https://ag.hexor.cy/api/badge?name=remnawave&revision=true)](https://ag.hexor.cy/applications/argocd/remnawave) |
| **rustdesk** | [![rustdesk](https://ag.hexor.cy/api/badge?name=rustdesk&revision=true)](https://ag.hexor.cy/applications/argocd/rustdesk) |
| **sonarr-stack** | [![sonarr-stack](https://ag.hexor.cy/api/badge?name=sonarr-stack&revision=true)](https://ag.hexor.cy/applications/argocd/sonarr-stack) |
| **stirling-pdf** | [![stirling-pdf](https://ag.hexor.cy/api/badge?name=stirling-pdf&revision=true)](https://ag.hexor.cy/applications/argocd/stirling-pdf) |
@@ -56,7 +54,6 @@ ArgoCD homelab project
| **tg-bots** | [![tg-bots](https://ag.hexor.cy/api/badge?name=tg-bots&revision=true)](https://ag.hexor.cy/applications/argocd/tg-bots) |
| **vaultwarden** | [![vaultwarden](https://ag.hexor.cy/api/badge?name=vaultwarden&revision=true)](https://ag.hexor.cy/applications/argocd/vaultwarden) |
| **vpn** | [![vpn](https://ag.hexor.cy/api/badge?name=vpn&revision=true)](https://ag.hexor.cy/applications/argocd/vpn) |
| **xandikos** | [![xandikos](https://ag.hexor.cy/api/badge?name=xandikos&revision=true)](https://ag.hexor.cy/applications/argocd/xandikos) |
</td>
</tr>

View File

@@ -36,7 +36,7 @@ spec:
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
cpu: "1000m"
env:
- name: GITEA__service__REGISTER_MANUAL_CONFIRM
value: "true"
@@ -77,8 +77,8 @@ spec:
labels:
app: gitea-runner
spec:
#nodeSelector:
# kubernetes.io/hostname: home.homenet
nodeSelector:
kubernetes.io/hostname: home.homenet
volumes:
- name: docker-sock
hostPath:
@@ -90,30 +90,27 @@ spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- home.homenet
- weight: 2
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- master.tail2fe2d.ts.net
- weight: 3
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- it.tail2fe2d.ts.net
- ch.tail2fe2d.ts.net
- us.tail2fe2d.ts.net
- home.homenet
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- master.tail2fe2d.ts.net
- weight: 2
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- nas.homenet
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
@@ -121,9 +118,7 @@ spec:
operator: In
values:
- home.homenet
- it.tail2fe2d.ts.net
- ch.tail2fe2d.ts.net
- us.tail2fe2d.ts.net
- nas.homenet
- master.tail2fe2d.ts.net
containers:
- name: gitea-runner
@@ -134,7 +129,7 @@ spec:
memory: "256Mi"
ephemeral-storage: "1Gi" # reserve ephemeral storage
limits:
cpu: "3000m"
cpu: "2000m"
memory: "4Gi"
ephemeral-storage: "28Gi" # hard cap for /data usage
volumeMounts:

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitea-runner-token
@@ -24,7 +24,7 @@ spec:
property: login.password
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitea-recapcha-creds

View File

@@ -30,7 +30,7 @@ spec:
cpu: "100m"
memory: "256Mi"
limits:
cpu: "3000m"
cpu: "2000m"
memory: "1Gi"
volumeMounts:
- name: data

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: greece-notifier-creds

View File

@@ -30,7 +30,7 @@ spec:
cpu: "50m"
limits:
memory: "128Mi"
cpu: "300m"
cpu: "200m"
command:
- git
- clone
@@ -49,7 +49,7 @@ spec:
cpu: "50m"
limits:
memory: "256Mi"
cpu: "300m"
cpu: "200m"
volumeMounts:
- name: hexound-repo
mountPath: /var/www/html

View File

@@ -23,7 +23,7 @@ spec:
cpu: "500m"
limits:
memory: "4Gi"
cpu: "3000m"
cpu: "2000m"
ports:
- containerPort: 2283
env:
@@ -160,7 +160,7 @@ spec:
cpu: "1000m"
limits:
memory: "8Gi"
cpu: "6000m"
cpu: "4000m"
env:
- name: TZ
value: Asia/Nicosia
@@ -201,7 +201,7 @@ spec:
cpu: "100m"
limits:
memory: "512Mi"
cpu: "750m"
cpu: "500m"
readinessProbe:
exec:
command: ["redis-cli", "ping"]

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: iperf3
namespace: argocd
spec:
project: apps
destination:
namespace: iperf3
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/iperf3
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,92 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: iperf3-server
spec:
selector:
matchLabels:
app: iperf3-server
template:
metadata:
labels:
app: iperf3-server
spec:
serviceAccountName: iperf3-server
subdomain: iperf3
initContainers:
- name: create-service
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- /bin/bash
- -c
- |
# Clean node name for service name
NODE_CLEAN=$(echo "$NODE_NAME" | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]' | tr '_' '-')
SERVICE_NAME="iperf3-${NODE_CLEAN}"
# Create service for this pod
kubectl apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-node-service
target-node: "${NODE_NAME}"
spec:
type: ClusterIP
ports:
- name: iperf3
port: 5201
protocol: TCP
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-node-service
target-node: "${NODE_NAME}"
subsets:
- addresses:
- ip: ${POD_IP}
ports:
- name: iperf3
port: 5201
protocol: TCP
EOF
containers:
- name: iperf3-server
image: networkstatic/iperf3:latest
args: ["-s"]
ports:
- containerPort: 5201
protocol: TCP
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "750m"
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists

View File

@@ -1,92 +0,0 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: iperf3-exporter
labels:
app: iperf3-exporter
spec:
selector:
matchLabels:
app: iperf3-exporter
template:
metadata:
labels:
app: iperf3-exporter
spec:
serviceAccountName: iperf3-server
initContainers:
- name: create-exporter-service
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- /bin/bash
- -c
- |
NODE_CLEAN=$(echo "$NODE_NAME" | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]' | tr '_' '-')
SERVICE_NAME="iperf3-exporter-${NODE_CLEAN}"
kubectl apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-exporter-service
target-node: "${NODE_NAME}"
spec:
type: ClusterIP
ports:
- name: metrics
port: 9579
protocol: TCP
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-exporter-service
target-node: "${NODE_NAME}"
subsets:
- addresses:
- ip: ${POD_IP}
ports:
- name: metrics
port: 9579
protocol: TCP
EOF
containers:
- name: iperf3-exporter
image: ghcr.io/edgard/iperf3_exporter:1.2.2
ports:
- containerPort: 9579
name: metrics
protocol: TCP
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "300m"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: iperf3-exporter
labels:
app: iperf3-exporter
spec:
selector:
app: iperf3-exporter
ports:
- name: metrics
protocol: TCP
port: 9579
targetPort: 9579

View File

@@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- rbac.yaml
- daemonset.yaml
- service-headless.yaml
- iperf3-exporter-daemonset.yaml
- iperf3-exporter-service.yaml
- servicemonitor.yaml

View File

@@ -1,36 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: iperf3-server
namespace: iperf3
labels:
app: iperf3-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: iperf3-service-manager
namespace: iperf3
labels:
app: iperf3-server
rules:
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: iperf3-service-manager
namespace: iperf3
labels:
app: iperf3-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: iperf3-service-manager
subjects:
- kind: ServiceAccount
name: iperf3-server
namespace: iperf3

View File

@@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: iperf3
spec:
clusterIP: None
selector:
app: iperf3-server
ports:
- name: iperf3
protocol: TCP
port: 5201
targetPort: 5201

View File

@@ -1,122 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: iperf3-exporter
labels:
app: iperf3-exporter
release: prometheus
spec:
selector:
matchLabels:
app: iperf3-exporter
endpoints:
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-ch.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-ch.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-us.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-us.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-iris.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-iris.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-home.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-home.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-master.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-master.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-it.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-it.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-nas.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-nas.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-spb.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-spb.iperf3.svc:9579
metricRelabelings:
- sourceLabels: [__name__]
regex: iperf3_(.+)
targetLabel: __name__
replacement: network_${1}

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: vpn-creds
@@ -76,14 +76,11 @@ spec:
secretKeyRef:
name: vpn-creds
key: ss_link
command: ["/bin/bash", "-c", "rm /etc/shadowsocks-rust/config.json && sslocal --server-url $SS_LINK --local-addr 127.0.0.1:8081 -U --protocol http"]
command: ["/bin/bash", "-c", "rm /etc/shadowsocks-rust/config.json && sslocal --online-config-url $SS_LINK --local-addr 127.0.0.1:8081 -U --protocol http"]
resources:
requests:
memory: "64Mi"
cpu: "300m"
limits:
memory: "128Mi"
cpu: "300m"
cpu: "200m"
---
apiVersion: v1
kind: Service

View File

@@ -1,12 +1,12 @@
image:
tag: 10.11.4
tag: 10.10.7
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "8Gi"
cpu: "6000m"
cpu: "4000m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
persistence:
@@ -36,40 +36,8 @@ ingress:
paths:
- path: /
pathType: Prefix
- host: us.hexor.cy
paths:
- path: /
pathType: Prefix
- host: ch.hexor.cy
paths:
- path: /
pathType: Prefix
- host: jp.hexor.cy
paths:
- path: /
pathType: Prefix
- host: spb.hexor.cy
paths:
- path: /
pathType: Prefix
- host: cy.hexor.cy
paths:
- path: /
pathType: Prefix
- host: am.hexor.cy
paths:
- path: /
pathType: Prefix
- host: de.hexor.cy
paths:
- path: /
pathType: Prefix
- host: it.hexor.cy
paths:
- path: /
pathType: Prefix
tls:
- secretName: jellyfin-tls
hosts:
- '*.hexor.cy'
- 'jf.hexor.cy'

View File

@@ -19,7 +19,7 @@ spec:
kubernetes.io/os: linux
containers:
- name: secret-reader
image: ultradesu/k8s-secrets:0.2.1
image: ultradesu/k8s-secrets:0.1.1
imagePullPolicy: Always
args:
- "--secrets"
@@ -28,7 +28,6 @@ spec:
- "k8s-secret"
- "--port"
- "3000"
- "--webhook"
ports:
- containerPort: 3000
name: http
@@ -41,7 +40,7 @@ spec:
cpu: "50m"
limits:
memory: "128Mi"
cpu: "150m"
cpu: "100m"
livenessProbe:
httpGet:
path: /health

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: openai-creds

View File

@@ -29,7 +29,7 @@ spec:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
command:
- /bin/sh
- -c

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: khm-pg-creds

View File

@@ -1,33 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: oidc-secret
spec:
target:
name: oidc-secret
deletionPolicy: Delete
template:
type: Opaque
data:
OAUTH_CLIENT_SECRET: |-
{{ .OAUTH_CLIENT_SECRET }}
OAUTH_CLIENT_ID: |-
{{ .OAUTH_CLIENT_ID }}
data:
- secretKey: OAUTH_CLIENT_SECRET
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
property: fields[0].value
- secretKey: OAUTH_CLIENT_ID
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
property: fields[1].value

View File

@@ -1,21 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- external-secrets.yaml
helmCharts:
- name: ollama
- name: ollama-helm
repo: https://otwld.github.io/ollama-helm/
version: 0.4.0
releaseName: ollama
namespace: ollama
valuesFile: ollama-values.yaml
includeCRDs: true
- name: open-webui
repo: https://helm.openwebui.com/
version: 8.14.0
releaseName: openweb-ui
namespace: ollama
valuesFile: openweb-ui-values.yaml
includeCRDs: true

View File

@@ -5,4 +5,17 @@ image:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: false
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
hosts:
- host: ai.hexor.cy
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- '*.hexor.cy'
secretName: ollama-tls

View File

@@ -1,52 +0,0 @@
clusterDomain: ai.hexor.cy
extraEnvVars:
GLOBAL_LOG_LEVEL: debug
OAUTH_PROVIDER_NAME: authentik
OPENID_PROVIDER_URL: https://idm.hexor.cy/application/o/openwebui/.well-known/openid-configuration
OPENID_REDIRECT_URI: https://ai.hexor.cy/oauth/oidc/callback
WEBUI_URL: https://ai.hexor.cy
# Allows auto-creation of new users using OAuth. Must be paired with ENABLE_LOGIN_FORM=false.
ENABLE_OAUTH_SIGNUP: true
# Disables user/password login form. Required when ENABLE_OAUTH_SIGNUP=true.
ENABLE_LOGIN_FORM: false
OAUTH_MERGE_ACCOUNTS_BY_EMAIL: true
extraEnvFrom:
- secretRef:
name: oidc-secret
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
ollamaUrls:
- http://ollama.ollama.svc:11434
ollama:
enabled: false
ollama:
gpu:
enabled: false
models:
pull:
- qwen3-vl:8b
run:
- qwen3-vl:8b
pipelines:
enabled: true
tika:
enabled: true
websocket:
enabled: true
ingress:
enabled: true
class: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
host: "ai.hexor.cy"
tls:
- hosts:
- '*.hexor.cy'
secretName: ollama-tls

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: postgres-and-oauth-creds

View File

@@ -13,7 +13,7 @@ resources:
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
cpu: "1000m"
service:
type: ClusterIP

View File

@@ -27,11 +27,4 @@ helmCharts:
namespace: paperless
valuesFile: gotenberg-values.yaml
includeCRDs: true
#- name: redis
# repo: oci://registry-1.docker.io/bitnamicharts/redis
# version: 24.1.0
# releaseName: redis
# namespace: paperless
# includeCRDs: true
# valuesFile: bazarr-values.yaml

View File

@@ -6,7 +6,7 @@ resources:
cpu: "500m"
limits:
memory: "4Gi"
cpu: "3000m"
cpu: "2000m"
initContainers:
install-tesseract-langs:
image: ghcr.io/paperless-ngx/paperless-ngx:2.18.2
@@ -16,7 +16,7 @@ initContainers:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
command: ["/bin/sh", "-c"]
args:
- apt-get update && apt-get install -y --reinstall tesseract-ocr-rus tesseract-ocr-jpn tesseract-ocr-chi-sim tesseract-ocr-eng tesseract-ocr-ell && cp -v -r /usr/share/tesseract-ocr/5/tessdata/* /custom-tessdata/
@@ -107,8 +107,6 @@ persistence:
- path: /usr/src/paperless/consume
redis:
enabled: true
image:
tag: latest
master:
nodeSelector:
kubernetes.io/hostname: nas.homenet

View File

@@ -13,7 +13,7 @@ resources:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
service:
type: ClusterIP

View File

@@ -1,212 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: pasarguard-scripts-ingress
labels:
app: pasarguard-node-ingress
data:
init-uuid-ingress.sh: |
#!/bin/bash
set -e
echo "Started"
# NODE_NAME is already set via environment variable
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Get DNS name from node label xray-public-address
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-public-address}')
if [ -z "${DNS_NAME}" ]; then
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-public-address'"
exit 1
fi
echo "Node: ${NODE_NAME}"
echo "DNS Name from label: ${DNS_NAME}"
# Use DNS name for ConfigMap name to ensure uniqueness
CONFIGMAP_NAME="node-uuid-ingress-${DNS_NAME//./-}"
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
# Check if ConfigMap exists and get UUID
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "ConfigMap exists, reading UUID..."
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
if [ -z "${API_KEY}" ]; then
echo "UUID not found in ConfigMap, generating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
else
echo "Using existing UUID from ConfigMap"
fi
else
echo "ConfigMap does not exist, creating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
--from-literal=API_KEY="${API_KEY}" \
--from-literal=NODE_NAME="${NODE_NAME}"
fi
# Save UUID and node info to shared volume for the main container
echo -n "${API_KEY}" > /shared/api-key
echo -n "${NODE_NAME}" > /shared/node-name
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
echo "UUID initialized: ${API_KEY}"
echo "Node name: ${NODE_NAME}"
echo "ConfigMap: ${CONFIGMAP_NAME}"
# Create Certificate for this node using DNS name from label
CERT_NAME="pasarguard-node-ingress-${DNS_NAME//./-}"
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
# Check if Certificate already exists
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate does not exist, creating..."
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${CERT_NAME}
namespace: ${NAMESPACE}
spec:
secretName: ${CERT_NAME}-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- ${DNS_NAME}
EOF
else
echo "Certificate already exists"
fi
# Wait for certificate to be ready
echo "Waiting for certificate to be ready..."
for i in {1..600}; do
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate secret is ready!"
break
fi
echo "Waiting for certificate... ($i/600)"
sleep 1
done
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "WARNING: Certificate secret not ready after 600 seconds"
else
# Extract certificate and key from secret to shared volume
echo "Extracting certificate and key..."
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
echo "Certificate and key extracted successfully."
cat /shared/tls.crt
fi
# Create ClusterIP Service for this node (pod selector based)
NODE_SHORT_NAME="${NODE_NAME%%.*}"
SERVICE_NAME="${NODE_SHORT_NAME}-ingress"
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME})"
# Create Service with pod selector including node name
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node-ingress
node: ${NODE_NAME}
spec:
type: ClusterIP
selector:
app: pasarguard-node-ingress
node-name: ${NODE_SHORT_NAME}
ports:
- name: proxy
port: 443
protocol: TCP
targetPort: 443
- name: api
port: 62050
protocol: TCP
targetPort: 62050
EOF
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local"
# Create IngressRouteTCP for this DNS name with TLS passthrough
INGRESS_NAME="pasarguard-tcp-${DNS_NAME//./-}"
echo "Creating IngressRouteTCP: ${INGRESS_NAME} for ${DNS_NAME}"
cat <<EOF | kubectl apply -f -
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: ${INGRESS_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node-ingress
node: ${NODE_NAME}
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(\`${DNS_NAME}\`)
services:
- name: ${SERVICE_NAME}
port: 443
tls:
passthrough: true
EOF
echo "IngressRouteTCP created: ${INGRESS_NAME}"
echo "Traffic to ${DNS_NAME}:443 will be routed to ${SERVICE_NAME}:443"
# Create second IngressRouteTCP for API port 62051
INGRESS_API_NAME="pasarguard-api-${DNS_NAME//./-}"
echo "Creating IngressRouteTCP for API: ${INGRESS_API_NAME} for ${DNS_NAME}:62051"
cat <<EOF | kubectl apply -f -
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: ${INGRESS_API_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node-ingress
node: ${NODE_NAME}
spec:
entryPoints:
- pasarguard-api
routes:
- match: HostSNI(\`${DNS_NAME}\`)
services:
- name: ${SERVICE_NAME}
port: 62050
tls:
passthrough: true
EOF
echo "IngressRouteTCP API created: ${INGRESS_API_NAME}"
echo "Traffic to ${DNS_NAME}:62051 will be routed to ${SERVICE_NAME}:62050"
pasarguard-start.sh: |
#!/bin/sh
# Read API_KEY from shared volume created by init container
if [ -f /shared/api-key ]; then
export API_KEY=$(cat /shared/api-key)
echo "Loaded API_KEY from shared volume"
else
echo "WARNING: API_KEY file not found, using default"
fi
cd /app
exec ./main

View File

@@ -1,264 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: pasarguard-scripts
labels:
app: pasarguard-node
data:
init-uuid.sh: |
#!/bin/bash
set -e
echo "Started"
# NODE_NAME is already set via environment variable
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Get DNS name from node label xray-node-address
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
if [ -z "${DNS_NAME}" ]; then
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
exit 1
fi
echo "Node: ${NODE_NAME}"
echo "DNS Name from label: ${DNS_NAME}"
# Use DNS name for ConfigMap name to ensure uniqueness
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
# Check if ConfigMap exists and get UUID
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "ConfigMap exists, reading UUID..."
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
if [ -z "${API_KEY}" ]; then
echo "UUID not found in ConfigMap, generating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
else
echo "Using existing UUID from ConfigMap"
fi
else
echo "ConfigMap does not exist, creating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
--from-literal=API_KEY="${API_KEY}" \
--from-literal=NODE_NAME="${NODE_NAME}"
fi
# Save UUID and node info to shared volume for the main container
echo -n "${API_KEY}" > /shared/api-key
echo -n "${NODE_NAME}" > /shared/node-name
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
echo "UUID initialized: ${API_KEY}"
echo "Node name: ${NODE_NAME}"
echo "ConfigMap: ${CONFIGMAP_NAME}"
# Create Certificate for this node using DNS name from label
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
# Check if Certificate already exists
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate does not exist, creating..."
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${CERT_NAME}
namespace: ${NAMESPACE}
spec:
secretName: ${CERT_NAME}-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- ${DNS_NAME}
EOF
else
echo "Certificate already exists"
fi
# Wait for certificate to be ready
echo "Waiting for certificate to be ready..."
for i in {1..600}; do
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate secret is ready!"
break
fi
echo "Waiting for certificate... ($i/600)"
sleep 1
done
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "WARNING: Certificate secret not ready after 600 seconds"
else
# Extract certificate and key from secret to shared volume
echo "Extracting certificate and key..."
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
echo "Certificate and key extracted successfully."
cat /shared/tls.crt
fi
# Create individual Service and Endpoints for this node
# Take only first part of node name before first dot
NODE_SHORT_NAME="${NODE_NAME%%.*}"
SERVICE_NAME="${NODE_SHORT_NAME}"
# Get node internal IP (take only first IP if multiple)
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
# Create Service without selector
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
spec:
clusterIP: None
ports:
- name: api
port: 62050
protocol: TCP
targetPort: 62050
- name: metrics
port: 9550
protocol: TCP
targetPort: 9550
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
subsets:
- addresses:
- ip: ${NODE_IP}
nodeName: ${NODE_NAME}
ports:
- name: api
port: 62050
protocol: TCP
- name: metrics
port: 9550
protocol: TCP
EOF
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
exporter-start.sh: |
#!/bin/sh
# Install required tools
apk add --no-cache wget curl iproute2-ss bash
# Download v2ray-exporter
echo "Downloading v2ray-exporter..."
ARCH=$(uname -m)
case $ARCH in
x86_64)
BINARY_ARCH="amd64"
;;
aarch64|arm64)
BINARY_ARCH="arm64"
;;
*)
echo "Unsupported architecture: $ARCH"
exit 1
;;
esac
echo "Detected architecture: $ARCH, using binary: v2ray-exporter_linux_$BINARY_ARCH"
wget -L -O /tmp/v2ray-exporter "https://github.com/wi1dcard/v2ray-exporter/releases/download/v0.6.0/v2ray-exporter_linux_$BINARY_ARCH"
mv /tmp/v2ray-exporter /usr/local/bin/v2ray-exporter
chmod +x /usr/local/bin/v2ray-exporter
# Wait for initial API port file
echo "Waiting for initial xray API port file..."
while [ ! -f /shared/xray-api-port ]; do
echo "Waiting for API port file..."
sleep 2
done
# Main loop - restart exporter if it crashes or port changes
while true; do
if [ -f /shared/xray-api-port ]; then
API_PORT=$(cat /shared/xray-api-port)
if [ -n "$API_PORT" ]; then
echo "Starting v2ray-exporter with endpoint 127.0.0.1:$API_PORT"
/usr/local/bin/v2ray-exporter --v2ray-endpoint "127.0.0.1:$API_PORT" --listen ":9550" &
EXPORTER_PID=$!
# Wait for exporter to exit or port file to change
while kill -0 $EXPORTER_PID 2>/dev/null; do
if [ -f /shared/xray-api-port ]; then
NEW_PORT=$(cat /shared/xray-api-port)
if [ "$NEW_PORT" != "$API_PORT" ]; then
echo "API port changed from $API_PORT to $NEW_PORT, restarting exporter"
kill $EXPORTER_PID 2>/dev/null
wait $EXPORTER_PID 2>/dev/null
break
fi
fi
sleep 5
done
echo "Exporter stopped, restarting..."
wait $EXPORTER_PID 2>/dev/null
fi
fi
sleep 2
done
pasarguard-start.sh: |
#!/bin/sh
# Read API_KEY from shared volume created by init container
if [ -f /shared/api-key ]; then
export API_KEY=$(cat /shared/api-key)
echo "Loaded API_KEY from shared volume"
else
echo "WARNING: API_KEY file not found, using default"
fi
cd /app
# Start main process in background
./main &
MAIN_PID=$!
# Start continuous port monitoring in background
{
sleep 10 # Wait for xray to start initially
LAST_PORT=""
while true; do
API_PORT=$(netstat -tlpn | grep xray | grep 127.0.0.1 | awk '{print $4}' | cut -d: -f2 | head -1)
if [ -n "$API_PORT" ] && [ "$API_PORT" != "$LAST_PORT" ]; then
echo "Found xray API port: $API_PORT"
echo -n "$API_PORT" > /shared/xray-api-port
LAST_PORT="$API_PORT"
fi
sleep 5 # Check every 5 seconds
done
} &
PORT_MONITOR_PID=$!
# Wait for main process to finish
wait $MAIN_PID
# Clean up port monitor
kill $PORT_MONITOR_PID 2>/dev/null

View File

@@ -1,211 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: pasarguard-node-ingress
labels:
app: pasarguard-node-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pasarguard-node-ingress-configmap
labels:
app: pasarguard-node-ingress
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "create", "update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
- apiGroups: ["traefik.io", "traefik.containo.us"]
resources: ["ingressroutetcps"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "patch", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: pasarguard-node-ingress-configmap
labels:
app: pasarguard-node-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pasarguard-node-ingress-configmap
subjects:
- kind: ServiceAccount
name: pasarguard-node-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pasarguard-node-ingress-reader
labels:
app: pasarguard-node-ingress
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: pasarguard-node-ingress-reader
labels:
app: pasarguard-node-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: pasarguard-node-ingress-reader
subjects:
- kind: ServiceAccount
name: pasarguard-node-ingress
namespace: pasarguard
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: pasarguard-node-ingress
labels:
app: pasarguard-node-ingress
spec:
selector:
matchLabels:
app: pasarguard-node-ingress
revisionHistoryLimit: 3
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: pasarguard-node-ingress
spec:
serviceAccountName: pasarguard-node-ingress
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: xray-public-address
operator: Exists
initContainers:
- name: label-pod
image: bitnami/kubectl:latest
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /bin/bash
- -c
- |
# Add node label to pod
NODE_SHORT=$(echo ${NODE_NAME} | cut -d. -f1)
kubectl label pod ${POD_NAME} -n ${POD_NAMESPACE} node-name=${NODE_SHORT} --overwrite
- name: init-uuid
image: bitnami/kubectl:latest
env:
- name: GODEBUG
value: "x509sha1=1"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /bin/bash
- /scripts/init-uuid-ingress.sh
volumeMounts:
- name: shared-data
mountPath: /shared
- name: scripts
mountPath: /scripts
containers:
- name: pasarguard-node
image: 'pasarguard/node:v0.1.3'
imagePullPolicy: Always
command:
- /bin/sh
- /scripts/pasarguard-start.sh
ports:
- name: api
containerPort: 62050
protocol: TCP
- name: proxy
containerPort: 443
protocol: TCP
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_HOST
value: "0.0.0.0"
- name: SERVICE_PORT
value: "62050"
- name: SERVICE_PROTOCOL
value: "grpc"
- name: DEBUG
value: "true"
- name: SSL_CERT_FILE
value: "/shared/tls.crt"
- name: SSL_KEY_FILE
value: "/shared/tls.key"
- name: XRAY_EXECUTABLE_PATH
value: "/usr/local/bin/xray"
- name: XRAY_ASSETS_PATH
value: "/usr/local/share/xray"
- name: API_KEY
value: "change-this-to-a-secure-uuid"
livenessProbe:
tcpSocket:
port: 62050
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
tcpSocket:
port: 62050
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
resources:
requests:
memory: "128Mi"
cpu: "300m"
limits:
memory: "512Mi"
cpu: "1000m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: false
- name: scripts
mountPath: /scripts
volumes:
- name: shared-data
emptyDir: {}
- name: scripts
configMap:
name: pasarguard-scripts-ingress
defaultMode: 0755

View File

@@ -105,19 +105,174 @@ spec:
fieldPath: spec.nodeName
command:
- /bin/bash
- /scripts/init-uuid.sh
- -c
- |
set -e
echo "Started"
# NODE_NAME is already set via environment variable
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Get DNS name from node label xray-node-address
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
if [ -z "${DNS_NAME}" ]; then
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
exit 1
fi
echo "Node: ${NODE_NAME}"
echo "DNS Name from label: ${DNS_NAME}"
# Use DNS name for ConfigMap name to ensure uniqueness
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
# Check if ConfigMap exists and get UUID
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "ConfigMap exists, reading UUID..."
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
if [ -z "${API_KEY}" ]; then
echo "UUID not found in ConfigMap, generating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
else
echo "Using existing UUID from ConfigMap"
fi
else
echo "ConfigMap does not exist, creating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
--from-literal=API_KEY="${API_KEY}" \
--from-literal=NODE_NAME="${NODE_NAME}"
fi
# Save UUID and node info to shared volume for the main container
echo -n "${API_KEY}" > /shared/api-key
echo -n "${NODE_NAME}" > /shared/node-name
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
echo "UUID initialized: ${API_KEY}"
echo "Node name: ${NODE_NAME}"
echo "ConfigMap: ${CONFIGMAP_NAME}"
# Create Certificate for this node using DNS name from label
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
# Check if Certificate already exists
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate does not exist, creating..."
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${CERT_NAME}
namespace: ${NAMESPACE}
spec:
secretName: ${CERT_NAME}-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- ${DNS_NAME}
EOF
else
echo "Certificate already exists"
fi
# Wait for certificate to be ready
echo "Waiting for certificate to be ready..."
for i in {1..600}; do
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate secret is ready!"
break
fi
echo "Waiting for certificate... ($i/600)"
sleep 1
done
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "WARNING: Certificate secret not ready after 600 seconds"
else
# Extract certificate and key from secret to shared volume
echo "Extracting certificate and key..."
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
echo "Certificate and key extracted successfully."
cat /shared/tls.crt
fi
# Create individual Service and Endpoints for this node
# Take only first part of node name before first dot
NODE_SHORT_NAME="${NODE_NAME%%.*}"
SERVICE_NAME="${NODE_SHORT_NAME}"
# Get node internal IP (take only first IP if multiple)
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
# Create Service without selector
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
spec:
clusterIP: None
ports:
- name: api
port: 62050
protocol: TCP
targetPort: 62050
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
subsets:
- addresses:
- ip: ${NODE_IP}
nodeName: ${NODE_NAME}
ports:
- name: api
port: 62050
protocol: TCP
EOF
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
volumeMounts:
- name: shared-data
mountPath: /shared
- name: scripts
mountPath: /scripts
containers:
- name: pasarguard-node
image: 'pasarguard/node:v0.1.4'
image: 'pasarguard/node:v0.1.1'
imagePullPolicy: Always
command:
- /bin/sh
- /scripts/pasarguard-start.sh
- -c
- |
# Read API_KEY from shared volume created by init container
if [ -f /shared/api-key ]; then
export API_KEY=$(cat /shared/api-key)
echo "Loaded API_KEY from shared volume"
else
echo "WARNING: API_KEY file not found, using default"
fi
cd /app
exec ./main
ports:
- name: api
containerPort: 62050
@@ -162,60 +317,14 @@ spec:
resources:
requests:
memory: "128Mi"
cpu: "500m"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1200m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: false
- name: scripts
mountPath: /scripts
- name: xray-exporter
image: alpine:3.18
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- /scripts/exporter-start.sh
ports:
- name: metrics
containerPort: 9550
protocol: TCP
livenessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "500m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: true
- name: scripts
mountPath: /scripts
volumes:
- name: shared-data
emptyDir: {}
- name: scripts
configMap:
name: pasarguard-scripts
defaultMode: 0755

View File

@@ -34,7 +34,7 @@ spec:
mountPath: /templates/subscription
containers:
- name: pasarguard-web
image: 'pasarguard/panel:latest'
image: 'pasarguard/panel:v1.4.1'
imagePullPolicy: Always
envFrom:
- secretRef:

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: pasarguard-secrets

View File

@@ -7,8 +7,5 @@ resources:
- ./deployment.yaml
- ./daemonset.yaml
- ./certificate.yaml
- ./configmap-scripts.yaml
- ./servicemonitor.yaml
- ./configmap-scripts-ingress.yaml
# - ./daemonset-ingress.yaml
# - ./traefik-pasarguard-entrypoint.yaml

View File

@@ -1,21 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: pasarguard-node-metrics
labels:
app: pasarguard-node
release: prometheus
spec:
selector:
matchLabels:
app: pasarguard-node
endpoints:
- port: metrics
path: /scrape
interval: 30s
scrapeTimeout: 10s
honorLabels: true
namespaceSelector:
matchNames:
- pasarguard

View File

@@ -1,66 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: traefik
namespace: kube-system
spec:
template:
spec:
containers:
- name: traefik
args:
- --entryPoints.metrics.address=:9100/tcp
- --entryPoints.traefik.address=:8080/tcp
- --entryPoints.web.address=:8000/tcp
- --entryPoints.websecure.address=:8443/tcp
- --entryPoints.pasarguard-api.address=:62051/tcp
- --api.dashboard=true
- --ping=true
- --metrics.prometheus=true
- --metrics.prometheus.entrypoint=metrics
- --providers.kubernetescrd
- --providers.kubernetescrd.allowEmptyServices=true
- --providers.kubernetesingress
- --providers.kubernetesingress.allowEmptyServices=true
- --providers.kubernetesingress.ingressendpoint.publishedservice=kube-system/traefik
- --entryPoints.websecure.http.tls=true
- --log.level=INFO
- --entryPoints.web.transport.respondingTimeouts.readTimeout=0s
- --entryPoints.websecure.transport.respondingTimeouts.readTimeout=0s
ports:
- containerPort: 9100
name: metrics
protocol: TCP
- containerPort: 8080
name: traefik
protocol: TCP
- containerPort: 8000
name: web
protocol: TCP
- containerPort: 8443
name: websecure
protocol: TCP
- containerPort: 62051
name: pasarguard-api
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: traefik
namespace: kube-system
spec:
ports:
- name: web
port: 80
protocol: TCP
targetPort: web
- name: websecure
port: 443
protocol: TCP
targetPort: websecure
- name: pasarguard-api
port: 62051
protocol: TCP
targetPort: pasarguard-api

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: remnawave
namespace: argocd
spec:
project: apps
destination:
namespace: remnawave
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/remnawave
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,71 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: remnawave
labels:
app: remnawave
annotations:
reloader.stakater.com/auto: "true"
spec:
selector:
matchLabels:
app: remnawave
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: remnawave
spec:
containers:
- name: remnawave
image: 'remnawave/backend:2'
imagePullPolicy: Always
envFrom:
- secretRef:
name: remnawave-secrets
env:
- name: REDIS_URL
value: "redis://remnawave-redis:6379"
ports:
- name: http
containerPort: 3000
protocol: TCP
- name: metrics
containerPort: 3001
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 3001
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: 3001
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 3
---
apiVersion: v1
kind: Service
metadata:
name: remnawave
spec:
selector:
app: remnawave
ports:
- name: http
protocol: TCP
port: 3000
targetPort: 3000
- name: metrics
protocol: TCP
port: 3001
targetPort: 3001

View File

@@ -1,70 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: remnawave-secrets
spec:
target:
name: remnawave-secrets
deletionPolicy: Delete
template:
type: Opaque
data:
METRICS_USER: admin
FRONT_END_DOMAIN: rw.hexor.cy
SUB_PUBLIC_DOMAIN: sub.hexor.cy
REDIS_HOST: remnawave-redis
REDIS_PORT: "6379"
DATABASE_URL: |-
postgresql://remnawave:{{ .pg_pass }}@psql.psql.svc:5432/remnawave
JWT_AUTH_SECRET: |-
{{ .jwt_auth_secret }}
JWT_API_TOKENS_SECRET: |-
{{ .jwt_api_tokens_secret }}
METRICS_PASS: |-
{{ .metrics_pass }}
WEBHOOK_SECRET_HEADER: |-
{{ .webhook_secret }}
data:
- secretKey: pg_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[10].value
- secretKey: jwt_auth_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 0d090436-5e82-453a-914c-19cec2abded1
property: fields[0].value
- secretKey: jwt_api_tokens_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 0d090436-5e82-453a-914c-19cec2abded1
property: fields[1].value
- secretKey: metrics_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 0d090436-5e82-453a-914c-19cec2abded1
property: fields[2].value
- secretKey: webhook_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 0d090436-5e82-453a-914c-19cec2abded1
property: fields[3].value

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./external-secrets.yaml
- ./deployment.yaml
- ./redis-deployment.yaml
- ./subscription-page-configmap.yaml
- ./subscription-page-deployment.yaml
- ./servicemonitor.yaml
- ./user-ui-ingress.yaml
- ./panel-ingress.yaml

View File

@@ -1,37 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: panel-ui
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: rw.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave
port:
number: 3000
- host: rw.hexor.ru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave
port:
number: 3000
tls:
- secretName: remnawave-panel-tls
hosts:
- rw.hexor.cy
- rw.hexor.ru

View File

@@ -1,71 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: remnawave-redis
labels:
app: remnawave-redis
spec:
selector:
matchLabels:
app: remnawave-redis
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: remnawave-redis
spec:
containers:
- name: redis
image: 'valkey/valkey:8.1-alpine'
imagePullPolicy: Always
ports:
- name: redis
containerPort: 6379
protocol: TCP
livenessProbe:
exec:
command:
- valkey-cli
- ping
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
exec:
command:
- valkey-cli
- ping
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
volumeMounts:
- name: redis-data
mountPath: /data
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "256Mi"
cpu: "200m"
volumes:
- name: redis-data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: remnawave-redis
spec:
selector:
app: remnawave-redis
ports:
- name: redis
protocol: TCP
port: 6379
targetPort: 6379

View File

@@ -1,21 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: remnawave-metrics
labels:
app: remnawave
release: prometheus
spec:
selector:
matchLabels:
app: remnawave
endpoints:
- port: metrics
path: /metrics
interval: 30s
scrapeTimeout: 10s
honorLabels: true
namespaceSelector:
matchNames:
- remnawave

View File

@@ -1,27 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: remnawave-subscription-page-config
labels:
app: remnawave-subscription-page
data:
APP_PORT: "3010"
REMNAWAVE_PANEL_URL: "https://rw.hexor.cy"
META_TITLE: "RemnaWave Subscription"
META_DESCRIPTION: "Your VPN subscription portal"
META_KEYWORDS: "vpn,subscription,remnawave"
META_AUTHOR: "RemnaWave"
ENABLE_ANALYTICS: "false"
ANALYTICS_MEASUREMENT_ID: ""
CUSTOM_SUB_PREFIX: ""
THEME: "dark"
CUSTOM_LOGO_URL: ""
SHOW_SUBSCRIPTION_INFO: "true"
SHOW_CONNECTION_INFO: "true"
SHOW_QR_CODE: "true"
QR_CODE_SIZE: "256"
REFRESH_INTERVAL: "30000"
SUBSCRIPTION_TEXT_COLOR: "#ffffff"
BACKGROUND_COLOR: "#1a1a1a"
ACCENT_COLOR: "#007bff"

View File

@@ -1,52 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: remnawave-subscription-page
labels:
app: remnawave-subscription-page
spec:
selector:
matchLabels:
app: remnawave-subscription-page
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: remnawave-subscription-page
spec:
containers:
- name: subscription-page
image: 'remnawave/subscription-page:latest'
imagePullPolicy: Always
envFrom:
- configMapRef:
name: remnawave-subscription-page-config
ports:
- name: http
containerPort: 3010
protocol: TCP
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "256Mi"
cpu: "200m"
---
apiVersion: v1
kind: Service
metadata:
name: remnawave-subscription-page
labels:
app: remnawave-subscription-page
spec:
selector:
app: remnawave-subscription-page
ports:
- name: http
protocol: TCP
port: 3010
targetPort: 3010

View File

@@ -1,37 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: user-ui
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: sub.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave-subscription-page
port:
number: 3010
- host: sub.hexor.ru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave-subscription-page
port:
number: 3010
tls:
- secretName: remnawave-user-ui-tls
hosts:
- sub.hexor.cy
- sub.hexor.ru

View File

@@ -27,7 +27,7 @@ spec:
cpu: "100m"
limits:
memory: "512Mi"
cpu: "750m"
cpu: "500m"
command: ["hbbs"]
args:
- "--relay-servers"
@@ -98,7 +98,7 @@ spec:
cpu: "100m"
limits:
memory: "512Mi"
cpu: "750m"
cpu: "500m"
command: ["hbbr"]
args:
- "--port"

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: rustdesk-keys

View File

@@ -4,7 +4,7 @@ resources:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net

View File

@@ -6,7 +6,7 @@ resources:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net

View File

@@ -6,7 +6,7 @@ resources:
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
cpu: "1000m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net

View File

@@ -33,7 +33,7 @@ resources:
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
cpu: "1000m"
probes:
liveness:

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: get-id-bot
@@ -24,7 +24,7 @@ spec:
property: fields[0].value
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: desubot

View File

@@ -30,7 +30,7 @@ spec:
name: get-id-bot
env:
- name: RUST_LOG
value: "info,teloxide::error_handlers=off"
value: "info"

View File

@@ -7,4 +7,3 @@ resources:
- get-id-bot.yaml
- external-secrets.yaml
- desubot.yaml
- restart-job.yaml

View File

@@ -1,56 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tg-bots-restart-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tg-bots-restart-role
rules:
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tg-bots-restart-rb
subjects:
- kind: ServiceAccount
name: tg-bots-restart-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: tg-bots-restart-role
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: tg-bots-daily-restart
spec:
schedule: "0 4 * * *" # every day at 04:00
jobTemplate:
spec:
template:
spec:
serviceAccountName: tg-bots-restart-sa
restartPolicy: OnFailure
containers:
- name: kubectl
image: bitnami/kubectl:latest
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /bin/sh
- -c
- |
kubectl -n "$POD_NAMESPACE" rollout restart deployment/desubot
kubectl -n "$POD_NAMESPACE" rollout restart deployment/get-id-bot

View File

@@ -37,7 +37,7 @@ spec:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
env:
- name: DOMAIN
value: https://vw.hexor.cy

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: admin-token

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: outfleet-secrets
@@ -51,7 +51,7 @@ spec:
property: fields[1].value
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: outline-config

View File

@@ -174,7 +174,7 @@ spec:
resources:
limits:
memory: "512Mi"
cpu: "750m"
cpu: "500m"
requests:
memory: "256Mi"
cpu: "250m"

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: xandikos
namespace: argocd
spec:
project: apps
destination:
namespace: xandikos
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/xandikos
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,70 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: xandikos
labels:
app: xandikos
spec:
selector:
matchLabels:
app: xandikos
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: xandikos
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
volumes:
- name: storage
hostPath:
path: /k8s/xandikos
type: Directory
containers:
- name: xandikos
image: ghcr.io/jelmer/xandikos:latest
imagePullPolicy: Always
command:
- "python3"
- "-m"
- "xandikos.web"
- "--port=8081"
- "-d/data"
- "--defaults"
- "--listen-address=0.0.0.0"
- "--route-prefix=/dav"
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /
port: 8081
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8081
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
ports:
- name: http
containerPort: 8081
protocol: TCP
volumeMounts:
- name: storage
mountPath: /data

View File

@@ -1,31 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: mmdl-secrets
spec:
target:
name: mmdl-secrets
deletionPolicy: Delete
template:
type: Opaque
data:
DB_DIALECT: 'postgres'
DB_HOST: psql.psql.svc
DB_USER: mmdl
DB_NAME: mmdl
DB_PORT: "5432"
DB_PASS: |-
{{ .pg_pass }}
AES_PASSWORD: |-
{{ .pg_pass }}
data:
- secretKey: pg_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[12].value

View File

@@ -1,47 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: xandikos
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: cal.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mmdl
port:
number: 3000
- path: /dav
pathType: Prefix
backend:
service:
name: xandikos
port:
number: 8081
- path: /.well-known/carddav
pathType: Exact
backend:
service:
name: xandikos
port:
number: 8081
- path: /.well-known/caldav
pathType: Exact
backend:
service:
name: xandikos
port:
number: 8081
tls:
- secretName: xandikos-tls
hosts:
- cal.hexor.cy

View File

@@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- mmdl-deployment.yaml
- mmdl-service.yaml
- ingress.yaml
- external-secrets.yaml

View File

@@ -1,61 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mmdl
labels:
app: mmdl
spec:
selector:
matchLabels:
app: mmdl
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: mmdl
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: mmdl
image: intriin/mmdl:latest
imagePullPolicy: Always
envFrom:
- secretRef:
name: mmdl-secrets
env:
- name: NEXTAUTH_URL
value: "https://cal.hexor.cy"
- name: CALDAV_SERVER_URL
value: "https://cal.hexor.cy/dav"
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
ports:
- name: http
containerPort: 3000
protocol: TCP

View File

@@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: mmdl
spec:
selector:
app: mmdl
type: ClusterIP
ports:
- name: http
port: 3000
protocol: TCP
targetPort: 3000

View File

@@ -1,16 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: xandikos
labels:
app: xandikos
spec:
selector:
app: xandikos
ports:
- protocol: TCP
port: 8081
targetPort: 8081
name: http
type: ClusterIP

View File

@@ -47,20 +47,3 @@ spec:
server: https://kubernetes.default.svc
sourceRepos:
- ssh://git@gt.hexor.cy:30022/ab/homelab.git
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: desktop
namespace: argocd
spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
description: Hexor Home Lab Desktop Apps
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
sourceRepos:
- ssh://git@gt.hexor.cy:30022/ab/homelab.git

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: oidc-creds

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: argo-cd
repo: https://argoproj.github.io/argo-helm
version: 9.1.4
version: 8.1.3
releaseName: argocd
namespace: argocd
valuesFile: values.yaml

View File

@@ -2,7 +2,7 @@
global:
domain: ag.hexor.cy
nodeSelector: &nodeSelector
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
logging:
format: text
@@ -55,15 +55,15 @@ configs:
controller:
replicas: 1
nodeSelector:
<<: *nodeSelector
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# Dex OIDC provider
dex:
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
enabled: false
# Standard Redis disabled because Redis HA is enabled
@@ -86,7 +86,7 @@ redis-ha:
server:
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: false
@@ -99,11 +99,8 @@ server:
# Repository Server
repoServer:
replicas: 1
livenessProbe:
timeoutSeconds: 10
periodSeconds: 60
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# ApplicationSet Controller
@@ -111,7 +108,7 @@ applicationSet:
enabled: true # Enabled by default
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# Notifications Controller
@@ -119,5 +116,5 @@ notifications:
enabled: true # Enabled by default
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add notifiers, triggers, templates configurations if needed

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: authentik-creds

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: authentik
repo: https://charts.goauthentik.io
version: 2025.10.1
version: 2025.8.1
releaseName: authentik
namespace: authentik
valuesFile: values.yaml

View File

@@ -1,6 +1,6 @@
global:
image:
tag: "2025.10.1"
tag: "2025.8.1"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
@@ -47,7 +47,6 @@ server:
- minecraft.hexor.cy # Minecraft UI and server
- pass.hexor.cy # k8s-secret for openai
- ps.hexor.cy # pasarguard UI
# - rw.hexor.cy # RemnaWave UI
tls:
- secretName: idm-tls
hosts:

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: cloudflare-creds
@@ -22,7 +22,7 @@ spec:
key: 8ae1dcb1-1182-48a1-8733-ca1144ea754b
property: fields[0].value
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: aws-creds

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: cert-manager
repo: https://charts.jetstack.io
version: 1.19.1
version: 1.17.1
releaseName: cert-manager
namespace: cert-manager
valuesFile: values.yaml

View File

@@ -1,6 +1,2 @@
crds:
enabled: true
prometheus:
enabled: true
servicemonitor:
enabled: true

View File

@@ -18,4 +18,4 @@ spec:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@@ -8,7 +8,7 @@
# BW_HOST: base64(url)
# BW_USERNAME: base64(name)
# BW_PASSWORD: base64(pass)
# Vaultwarden bot - 81212111-6350-4069-8bcf-19a67d3964a5
# 81212111-6350-4069-8bcf-19a67d3964a5
---
apiVersion: apps/v1
kind: Deployment
@@ -42,10 +42,10 @@ spec:
resources:
requests:
memory: "128Mi"
cpu: "300m"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "800m"
cpu: "500m"
env:
- name: BW_HOST
valueFrom:
@@ -128,7 +128,7 @@ spec:
app.kubernetes.io/name: external-secrets
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: vaultwarden-login
@@ -141,7 +141,7 @@ spec:
result:
jsonPath: "$.data.{{ .remoteRef.property }}"
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: vaultwarden-fields

View File

@@ -2,12 +2,13 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
- bitwarden-store.yaml
helmCharts:
- name: external-secrets
repo: https://charts.external-secrets.io
version: 1.1.0
version: 0.16.2
releaseName: external-secrets
namespace: external-secrets
valuesFile: values.yaml

View File

@@ -3,6 +3,5 @@ kind: Kustomization
resources:
- app.yaml
- nfs-storage.yaml
- coredns-internal-resolve.yaml

View File

@@ -1,13 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
server: 10.0.5.2
share: /mnt/storage/Storage/PVC
reclaimPolicy: Retain
volumeBindingMode: Immediate
mountOptions:
- vers=4.1

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: postgres-creds
@@ -90,7 +90,7 @@ spec:
key: 832042b9-7edb-4f4c-9254-3c8884ba9733
property: fields[2].value
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: postgres-users
@@ -119,12 +119,6 @@ spec:
{{ .outfleet_rs }}
USER_pasarguard: |-
{{ .pasarguard }}
USER_remnawave: |-
{{ .remnawave }}
USER_umami: |-
{{ .umami }}
USER_mmdl: |-
{{ .mmdl }}
data:
- secretKey: authentik
sourceRef:
@@ -225,36 +219,3 @@ spec:
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[9].value
- secretKey: remnawave
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[10].value
- secretKey: umami
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[11].value
- secretKey: mmdl
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[12].value

View File

@@ -28,7 +28,7 @@ spec:
type: DirectoryOrCreate
containers:
- name: psql
image: 'bitnamilegacy/postgresql:17'
image: 'bitnami/postgresql:17'
env:
- name: POSTGRESQL_PASSWORD
valueFrom:
@@ -63,7 +63,7 @@ spec:
containerPort: 9187
protocol: TCP
- name: user-creation
image: 'bitnamilegacy/postgresql:17'
image: 'bitnami/postgresql:17'
command:
- /bin/bash
- -c

View File

@@ -28,7 +28,7 @@ spec:
type: DirectoryOrCreate
containers:
- name: psql
image: 'bitnamilegacy/postgresql:17'
image: 'bitnami/postgresql:17'
env:
- name: POSTGRESQL_PASSWORD
valueFrom:
@@ -60,7 +60,7 @@ spec:
containerPort: 9187
protocol: TCP
- name: user-creation
image: 'bitnamilegacy/postgresql:17'
image: 'bitnami/postgresql:17'
command:
- /bin/bash
- -c

View File

@@ -1,5 +1,5 @@
image:
tag: "latest"
tag: "9.9"
pullPolicy: Always
env:
email: "postgres@hexor.cy"

View File

@@ -13,6 +13,9 @@ spec:
targetRevision: HEAD
path: k8s/core/prom-stack
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: grafana-admin
@@ -79,83 +79,3 @@ spec:
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[2].value
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: alertmanager-telegram
spec:
target:
name: alertmanager-telegram-secret
deletionPolicy: Delete
template:
type: Opaque
data:
TELEGRAM_BOT_TOKEN: |-
{{ .bot_token }}
TELEGRAM_CHAT_ID: |-
{{ .chat_id }}
data:
- secretKey: bot_token
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: eca0fb0b-3939-40a8-890a-6294863e5a65
property: fields[0].value
- secretKey: chat_id
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: eca0fb0b-3939-40a8-890a-6294863e5a65
property: fields[1].value
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: grafana-telegram
spec:
target:
name: grafana-telegram
deletionPolicy: Delete
template:
type: Opaque
data:
bot-token: |-
{{ .bot_token }}
chat-id: |-
{{ .chat_id }}
data:
- secretKey: bot_token
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: eca0fb0b-3939-40a8-890a-6294863e5a65
property: fields[0].value
- secretKey: chat_id
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: eca0fb0b-3939-40a8-890a-6294863e5a65
property: fields[1].value

View File

@@ -1,152 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-alerting
namespace: prometheus
data:
rules.yaml: |
apiVersion: 1
groups:
- orgId: 1
name: pasarguard_alerts
folder: Kubernetes
interval: 1m
rules:
- uid: pasarguard_cpu_throttling
title: VPN CPU Throttle
condition: B
data:
- refId: A
relativeTimeRange:
from: 600
to: 0
datasourceUid: P76F38748CEC837F0
model:
expr: 'rate(container_cpu_cfs_throttled_periods_total{container="pasarguard-node"}[5m])'
refId: A
intervalMs: 1000
maxDataPoints: 43200
- refId: B
relativeTimeRange:
from: 600
to: 0
datasourceUid: __expr__
model:
conditions:
- evaluator:
params:
- 0.1
type: gt
operator:
type: and
query:
params: []
datasource:
type: __expr__
uid: __expr__
expression: A
reducer: last
refId: B
type: reduce
noDataState: NoData
execErrState: Alerting
for: 5m
annotations:
pod: '{{ $labels.pod }}'
node: '{{ $labels.node }}'
namespace: '{{ $labels.namespace }}'
throttle_rate: '{{ printf "%.2f" $values.A }}'
summary: 'VPN node throttling CPU'
labels:
severity: warning
- orgId: 1
name: kubernetes_alerts
folder: Kubernetes
interval: 30s
rules:
- uid: node_not_ready
title: Kubernetes Node Not Ready
condition: B
data:
- refId: A
relativeTimeRange:
from: 300
to: 0
datasourceUid: P76F38748CEC837F0
model:
expr: 'kube_node_status_condition{condition="Ready",status="true"} == 0'
refId: A
intervalMs: 1000
maxDataPoints: 43200
- refId: B
relativeTimeRange:
from: 300
to: 0
datasourceUid: __expr__
model:
conditions:
- evaluator:
params:
- 0
type: gt
operator:
type: and
query:
params: []
datasource:
type: __expr__
uid: __expr__
expression: A
reducer: last
refId: B
type: reduce
noDataState: Alerting
execErrState: Alerting
for: 0s
annotations:
node: '{{ $labels.node }}'
condition: '{{ $labels.condition }}'
summary: 'Kubernetes node is not ready'
labels:
severity: critical
contactpoints.yaml: |
apiVersion: 1
contactPoints:
- orgId: 1
name: telegram
receivers:
- uid: telegram_default
type: telegram
disableResolveMessage: false
settings:
bottoken: $TELEGRAM_BOT_TOKEN
chatid: "124317807"
message: |
{{ if eq .Status "firing" }}🔥 FIRING{{ else }}✅ RESOLVED{{ end }}
{{ range .Alerts }}
📊 <b>{{ .Labels.alertname }}</b>
{{ .Annotations.summary }}
{{ if .Annotations.node }}🖥 <b>Node:</b> <code>{{ .Annotations.node }}</code>{{ end }}
{{ if .Annotations.pod }}📦 <b>Pod:</b> <code>{{ .Annotations.pod }}</code>{{ end }}
{{ if .Annotations.namespace }}📁 <b>Namespace:</b> <code>{{ .Annotations.namespace }}</code>{{ end }}
{{ if .Annotations.throttle_rate }}⚠️ <b>Throttling rate:</b> {{ .Annotations.throttle_rate }}{{ end }}
🔗 <a href="{{ .GeneratorURL }}">View in Grafana</a>
{{ end }}
parse_mode: HTML
policies.yaml: |
apiVersion: 1
policies:
- orgId: 1
receiver: telegram
group_by:
- grafana_folder
- alertname
group_wait: 10s
group_interval: 5m
repeat_interval: 4h

View File

@@ -38,10 +38,6 @@ datasources:
url: http://prometheus-kube-prometheus-prometheus.prometheus.svc:9090
access: proxy
isDefault: true
- name: Loki
type: loki
url: http://loki-gateway.prometheus.svc:80
access: proxy
ingress:
enabled: true
@@ -56,30 +52,3 @@ ingress:
hosts:
- '*.hexor.cy'
extraConfigmapMounts:
- name: grafana-alerting-rules
mountPath: /etc/grafana/provisioning/alerting/rules.yaml
configMap: grafana-alerting
subPath: rules.yaml
readOnly: true
- name: grafana-alerting-contactpoints
mountPath: /etc/grafana/provisioning/alerting/contactpoints.yaml
configMap: grafana-alerting
subPath: contactpoints.yaml
readOnly: true
- name: grafana-alerting-policies
mountPath: /etc/grafana/provisioning/alerting/policies.yaml
configMap: grafana-alerting
subPath: policies.yaml
readOnly: true
envValueFrom:
TELEGRAM_BOT_TOKEN:
secretKeyRef:
name: grafana-telegram
key: bot-token
TELEGRAM_CHAT_ID:
secretKeyRef:
name: grafana-telegram
key: chat-id

View File

@@ -2,14 +2,14 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
- persistentVolume.yaml
- external-secrets.yaml
- grafana-alerting-configmap.yaml
helmCharts:
- name: kube-prometheus-stack
repo: https://prometheus-community.github.io/helm-charts
version: 79.7.1
version: 70.4.2
releaseName: prometheus
namespace: prometheus
valuesFile: prom-values.yaml
@@ -17,24 +17,9 @@ helmCharts:
- name: grafana
repo: https://grafana.github.io/helm-charts
version: 10.2.0
version: 8.11.4
releaseName: grafana
namespace: prometheus
valuesFile: grafana-values.yaml
includeCRDs: true
- name: loki
repo: https://grafana.github.io/helm-charts
version: 6.29.0
releaseName: loki
namespace: prometheus
valuesFile: loki-values.yaml
includeCRDs: true
- name: promtail
repo: https://grafana.github.io/helm-charts
version: 6.16.6
releaseName: promtail
namespace: prometheus
valuesFile: promtail-values.yaml

View File

@@ -1,75 +0,0 @@
# Loki SingleBinary mode - optimal for homelab
deploymentMode: SingleBinary
loki:
auth_enabled: false
commonConfig:
replication_factor: 1
path_prefix: /var/loki
schemaConfig:
configs:
- from: 2024-01-01
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
storage:
type: filesystem
filesystem:
chunks_directory: /var/loki/chunks
rules_directory: /var/loki/rules
limits_config:
reject_old_samples: false
ingestion_rate_mb: 16
ingestion_burst_size_mb: 32
max_query_parallelism: 32
volume_enabled: true
singleBinary:
replicas: 1
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
persistence:
enabled: true
size: 50Gi
storageClass: ""
# Disable distributed mode components
read:
replicas: 0
write:
replicas: 0
backend:
replicas: 0
# Disable memcached (not needed for SingleBinary)
chunksCache:
enabled: false
resultsCache:
enabled: false
# Gateway for Loki access
gateway:
enabled: true
replicas: 1
service:
type: ClusterIP
# Disable tests and canary
test:
enabled: false
lokiCanary:
enabled: false
# Monitoring
monitoring:
dashboards:
enabled: false
rules:
enabled: false
serviceMonitor:
enabled: false
selfMonitoring:
enabled: false

View File

@@ -1,49 +1,19 @@
grafana:
enabled: false
alertmanager:
config:
global:
telegram_api_url: "https://api.telegram.org"
route:
group_by: ['alertname', 'cluster', 'service']
group_wait: 10s
group_interval: 10s
repeat_interval: 12h
receiver: 'telegram'
receivers:
- name: 'telegram'
telegram_configs:
- bot_token: '${TELEGRAM_BOT_TOKEN}'
chat_id: ${TELEGRAM_CHAT_ID}
parse_mode: 'HTML'
message: |
{{ range .Alerts }}
<b>{{ .Labels.alertname }}</b>
{{ if .Labels.severity }}<b>Severity:</b> {{ .Labels.severity }}{{ end }}
<b>Status:</b> {{ .Status }}
{{ if .Annotations.summary }}<b>Summary:</b> {{ .Annotations.summary }}{{ end }}
{{ if .Annotations.description }}<b>Description:</b> {{ .Annotations.description }}{{ end }}
{{ end }}
alertmanagerSpec:
secrets:
- alertmanager-telegram-secret
prometheus:
prometheusSpec:
enableRemoteWriteReceiver: true
additionalScrapeConfigs:
- job_name: xray_vpn
metrics_path: /scrape
- job_name: outline_vpn
static_configs:
- targets: ['cy.tail2fe2d.ts.net:9550']
labels: {job: cy}
- targets: ['x86.tail2fe2d.ts.net:9550']
labels: {job: am}
- targets: ['jp.tail2fe2d.ts.net:9550']
labels: {job: jp}
- targets: ['100.117.24.104:9095']
labels: {instance: cy}
- targets: ['100.117.24.104:9096']
labels: {instance: am}
- targets: ['100.117.24.104:9097']
labels: {instance: jp}
- targets: ['100.117.24.104:9098']
labels: {instance: bg}
- job_name: cs_16_server
static_configs:
- targets: ['prom-a2s-exporter.counter-strike.svc:9841']

View File

@@ -1,37 +0,0 @@
# Promtail - log collection agent for all cluster pods
config:
clients:
- url: http://loki-gateway.prometheus.svc:80/loki/api/v1/push
# DaemonSet - runs on every node
daemonset:
enabled: true
# Tolerations for master/control-plane nodes
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
# Init container to increase inotify limits
initContainer:
- name: init-inotify
image: docker.io/busybox:1.36
imagePullPolicy: IfNotPresent
command:
- sh
- -c
- sysctl -w fs.inotify.max_user_instances=512
securityContext:
privileged: true
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 128Mi

Some files were not shown because too many files have changed in this diff Show More