Compare commits

..

1 Commits

Author SHA1 Message Date
Gitea Actions Bot
0821ed8f46 Auto-update README with current k8s applications
All checks were successful
Terraform / Terraform (pull_request) Successful in 37s
Generated by CI/CD workflow on 2025-11-07 14:12:40

This PR updates the README.md file with the current list of applications found in the k8s/ directory structure.
2025-11-07 14:12:40 +00:00
125 changed files with 376 additions and 3043 deletions

View File

@@ -30,29 +30,21 @@ jobs:
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
- name: Terraform Init
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform init
working-directory: ./terraform/authentik
- name: Terraform Format
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform fmt -check
continue-on-error: true
working-directory: ./terraform/authentik
- name: Terraform Apply
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform apply -var-file proxy-apps.tfvars -var-file oauth2-apps.tfvars -var-file terraform.tfvars -var-file groups.tfvars -input=false -auto-approve -parallelism=100
working-directory: ./terraform/authentik
- name: Generate Wiki Content
if: success()
continue-on-error: true
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: |
echo "📋 Starting Wiki generation..."
cd ./terraform/authentik

View File

@@ -22,13 +22,12 @@ jobs:
- name: Install Python dependencies
run: |
python3 -m venv .venv
.venv/bin/pip install pyyaml
pip install pyyaml
- name: Generate K8s Services Wiki
run: |
echo "📋 Starting K8s wiki generation..."
.venv/bin/python .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
python3 .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
if [ -f "Kubernetes-Services.md" ]; then
echo "✅ Wiki content generated successfully"

View File

@@ -40,16 +40,13 @@ ArgoCD homelab project
| **greece-notifier** | [![greece-notifier](https://ag.hexor.cy/api/badge?name=greece-notifier&revision=true)](https://ag.hexor.cy/applications/argocd/greece-notifier) |
| **hexound** | [![hexound](https://ag.hexor.cy/api/badge?name=hexound&revision=true)](https://ag.hexor.cy/applications/argocd/hexound) |
| **immich** | [![immich](https://ag.hexor.cy/api/badge?name=immich&revision=true)](https://ag.hexor.cy/applications/argocd/immich) |
| **iperf3** | [![iperf3](https://ag.hexor.cy/api/badge?name=iperf3&revision=true)](https://ag.hexor.cy/applications/argocd/iperf3) |
| **jellyfin** | [![jellyfin](https://ag.hexor.cy/api/badge?name=jellyfin&revision=true)](https://ag.hexor.cy/applications/argocd/jellyfin) |
| **k8s-secrets** | [![k8s-secrets](https://ag.hexor.cy/api/badge?name=k8s-secrets&revision=true)](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
| **khm** | [![khm](https://ag.hexor.cy/api/badge?name=khm&revision=true)](https://ag.hexor.cy/applications/argocd/khm) |
| **n8n** | [![n8n](https://ag.hexor.cy/api/badge?name=n8n&revision=true)](https://ag.hexor.cy/applications/argocd/n8n) |
| **ollama** | [![ollama](https://ag.hexor.cy/api/badge?name=ollama&revision=true)](https://ag.hexor.cy/applications/argocd/ollama) |
| **paperless** | [![paperless](https://ag.hexor.cy/api/badge?name=paperless&revision=true)](https://ag.hexor.cy/applications/argocd/paperless) |
| **pasarguard** | [![pasarguard](https://ag.hexor.cy/api/badge?name=pasarguard&revision=true)](https://ag.hexor.cy/applications/argocd/pasarguard) |
| **qbittorent-nas** | [![qbittorent-nas](https://ag.hexor.cy/api/badge?name=qbittorent-nas&revision=true)](https://ag.hexor.cy/applications/argocd/qbittorent-nas) |
| **remnawave** | [![remnawave](https://ag.hexor.cy/api/badge?name=remnawave&revision=true)](https://ag.hexor.cy/applications/argocd/remnawave) |
| **rustdesk** | [![rustdesk](https://ag.hexor.cy/api/badge?name=rustdesk&revision=true)](https://ag.hexor.cy/applications/argocd/rustdesk) |
| **sonarr-stack** | [![sonarr-stack](https://ag.hexor.cy/api/badge?name=sonarr-stack&revision=true)](https://ag.hexor.cy/applications/argocd/sonarr-stack) |
| **stirling-pdf** | [![stirling-pdf](https://ag.hexor.cy/api/badge?name=stirling-pdf&revision=true)](https://ag.hexor.cy/applications/argocd/stirling-pdf) |
@@ -57,7 +54,6 @@ ArgoCD homelab project
| **tg-bots** | [![tg-bots](https://ag.hexor.cy/api/badge?name=tg-bots&revision=true)](https://ag.hexor.cy/applications/argocd/tg-bots) |
| **vaultwarden** | [![vaultwarden](https://ag.hexor.cy/api/badge?name=vaultwarden&revision=true)](https://ag.hexor.cy/applications/argocd/vaultwarden) |
| **vpn** | [![vpn](https://ag.hexor.cy/api/badge?name=vpn&revision=true)](https://ag.hexor.cy/applications/argocd/vpn) |
| **xandikos** | [![xandikos](https://ag.hexor.cy/api/badge?name=xandikos&revision=true)](https://ag.hexor.cy/applications/argocd/xandikos) |
</td>
</tr>

View File

@@ -36,7 +36,7 @@ spec:
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
cpu: "1000m"
env:
- name: GITEA__service__REGISTER_MANUAL_CONFIRM
value: "true"
@@ -77,8 +77,8 @@ spec:
labels:
app: gitea-runner
spec:
#nodeSelector:
# kubernetes.io/hostname: home.homenet
nodeSelector:
kubernetes.io/hostname: home.homenet
volumes:
- name: docker-sock
hostPath:
@@ -90,30 +90,27 @@ spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- home.homenet
- weight: 2
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- master.tail2fe2d.ts.net
- weight: 3
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- it.tail2fe2d.ts.net
- ch.tail2fe2d.ts.net
- us.tail2fe2d.ts.net
- home.homenet
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- master.tail2fe2d.ts.net
- weight: 2
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- nas.homenet
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
@@ -121,9 +118,7 @@ spec:
operator: In
values:
- home.homenet
- it.tail2fe2d.ts.net
- ch.tail2fe2d.ts.net
- us.tail2fe2d.ts.net
- nas.homenet
- master.tail2fe2d.ts.net
containers:
- name: gitea-runner
@@ -134,7 +129,7 @@ spec:
memory: "256Mi"
ephemeral-storage: "1Gi" # reserve ephemeral storage
limits:
cpu: "3000m"
cpu: "2000m"
memory: "4Gi"
ephemeral-storage: "28Gi" # hard cap for /data usage
volumeMounts:

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitea-runner-token
@@ -24,7 +24,7 @@ spec:
property: login.password
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitea-recapcha-creds

View File

@@ -30,7 +30,7 @@ spec:
cpu: "100m"
memory: "256Mi"
limits:
cpu: "3000m"
cpu: "2000m"
memory: "1Gi"
volumeMounts:
- name: data

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: greece-notifier-creds

View File

@@ -30,7 +30,7 @@ spec:
cpu: "50m"
limits:
memory: "128Mi"
cpu: "300m"
cpu: "200m"
command:
- git
- clone
@@ -49,7 +49,7 @@ spec:
cpu: "50m"
limits:
memory: "256Mi"
cpu: "300m"
cpu: "200m"
volumeMounts:
- name: hexound-repo
mountPath: /var/www/html

View File

@@ -23,7 +23,7 @@ spec:
cpu: "500m"
limits:
memory: "4Gi"
cpu: "3000m"
cpu: "2000m"
ports:
- containerPort: 2283
env:
@@ -74,14 +74,19 @@ spec:
- nas.homenet
volumes:
- name: upload-storage
persistentVolumeClaim:
claimName: immich-upload-pvc
nfs:
server: nas.homenet
path: /mnt/storage/Storage/k8s/immich/library/
readOnly: false
- name: gphoto-storage
persistentVolumeClaim:
claimName: immich-gphoto-pvc
nfs:
server: nas.homenet
path: /mnt/storage/Storage/k8s/immich/GPHOTO/
readOnly: false
- name: camera
persistentVolumeClaim:
claimName: immich-camera-pvc
nfs:
server: nas.homenet
path: /mnt/storage/Storage/Syncthing-repos/PhoneCamera/
readOnly: true
- name: localtime
hostPath:
@@ -155,7 +160,7 @@ spec:
cpu: "1000m"
limits:
memory: "8Gi"
cpu: "6000m"
cpu: "4000m"
env:
- name: TZ
value: Asia/Nicosia
@@ -196,7 +201,7 @@ spec:
cpu: "100m"
limits:
memory: "512Mi"
cpu: "750m"
cpu: "500m"
readinessProbe:
exec:
command: ["redis-cli", "ping"]

View File

@@ -1,52 +1,79 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-upload-pv
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/storage/Storage/k8s/immich/library
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-upload-pvc
namespace: immich
spec:
storageClassName: ""
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
- ReadWriteOnce
volumeName: immich-upload-pv
resources:
requests:
storage: 500Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-gphoto-pv
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/storage/Storage/k8s/immich/GPHOTO
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-gphoto-pvc
namespace: immich
spec:
storageClassName: ""
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
- ReadWriteOnce
volumeName: immich-gphoto-pv
resources:
requests:
storage: 500Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-db-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/storage/Storage/k8s/immich/db
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-db-pvc
namespace: immich
spec:
storageClassName: ""
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
- ReadWriteOnce
volumeName: immich-db-pv
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-camera-pvc
namespace: immich
spec:
accessModes:
- ReadOnlyMany
storageClassName: nfs-csi
resources:
requests:
storage: 100Gi

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: iperf3
namespace: argocd
spec:
project: apps
destination:
namespace: iperf3
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/iperf3
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,92 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: iperf3-server
spec:
selector:
matchLabels:
app: iperf3-server
template:
metadata:
labels:
app: iperf3-server
spec:
serviceAccountName: iperf3-server
subdomain: iperf3
initContainers:
- name: create-service
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- /bin/bash
- -c
- |
# Clean node name for service name
NODE_CLEAN=$(echo "$NODE_NAME" | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]' | tr '_' '-')
SERVICE_NAME="iperf3-${NODE_CLEAN}"
# Create service for this pod
kubectl apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-node-service
target-node: "${NODE_NAME}"
spec:
type: ClusterIP
ports:
- name: iperf3
port: 5201
protocol: TCP
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-node-service
target-node: "${NODE_NAME}"
subsets:
- addresses:
- ip: ${POD_IP}
ports:
- name: iperf3
port: 5201
protocol: TCP
EOF
containers:
- name: iperf3-server
image: networkstatic/iperf3:latest
args: ["-s"]
ports:
- containerPort: 5201
protocol: TCP
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "750m"
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists

View File

@@ -1,92 +0,0 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: iperf3-exporter
labels:
app: iperf3-exporter
spec:
selector:
matchLabels:
app: iperf3-exporter
template:
metadata:
labels:
app: iperf3-exporter
spec:
serviceAccountName: iperf3-server
initContainers:
- name: create-exporter-service
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- /bin/bash
- -c
- |
NODE_CLEAN=$(echo "$NODE_NAME" | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]' | tr '_' '-')
SERVICE_NAME="iperf3-exporter-${NODE_CLEAN}"
kubectl apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-exporter-service
target-node: "${NODE_NAME}"
spec:
type: ClusterIP
ports:
- name: metrics
port: 9579
protocol: TCP
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: iperf3
labels:
app: iperf3-exporter-service
target-node: "${NODE_NAME}"
subsets:
- addresses:
- ip: ${POD_IP}
ports:
- name: metrics
port: 9579
protocol: TCP
EOF
containers:
- name: iperf3-exporter
image: ghcr.io/edgard/iperf3_exporter:1.2.2
ports:
- containerPort: 9579
name: metrics
protocol: TCP
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "300m"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: iperf3-exporter
labels:
app: iperf3-exporter
spec:
selector:
app: iperf3-exporter
ports:
- name: metrics
protocol: TCP
port: 9579
targetPort: 9579

View File

@@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- rbac.yaml
- daemonset.yaml
- service-headless.yaml
- iperf3-exporter-daemonset.yaml
- iperf3-exporter-service.yaml
- servicemonitor.yaml

View File

@@ -1,36 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: iperf3-server
namespace: iperf3
labels:
app: iperf3-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: iperf3-service-manager
namespace: iperf3
labels:
app: iperf3-server
rules:
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: iperf3-service-manager
namespace: iperf3
labels:
app: iperf3-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: iperf3-service-manager
subjects:
- kind: ServiceAccount
name: iperf3-server
namespace: iperf3

View File

@@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: iperf3
spec:
clusterIP: None
selector:
app: iperf3-server
ports:
- name: iperf3
protocol: TCP
port: 5201
targetPort: 5201

View File

@@ -1,122 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: iperf3-exporter
labels:
app: iperf3-exporter
release: prometheus
spec:
selector:
matchLabels:
app: iperf3-exporter
endpoints:
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-ch.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-ch.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-us.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-us.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-iris.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-iris.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-home.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-home.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-master.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-master.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-it.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-it.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-nas.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-nas.iperf3.svc:9579
- port: metrics
path: /probe
interval: 5m
scrapeTimeout: 30s
params:
target: ['iperf3-spb.iperf3.svc.cluster.local:5201']
period: ['10s']
streams: ['4']
relabelings:
- sourceLabels: [__param_target]
targetLabel: instance
- targetLabel: __address__
replacement: iperf3-exporter-spb.iperf3.svc:9579
metricRelabelings:
- sourceLabels: [__name__]
regex: iperf3_(.+)
targetLabel: __name__
replacement: network_${1}

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: vpn-creds
@@ -76,14 +76,11 @@ spec:
secretKeyRef:
name: vpn-creds
key: ss_link
command: ["/bin/bash", "-c", "rm /etc/shadowsocks-rust/config.json && sslocal --server-url $SS_LINK --local-addr 127.0.0.1:8081 -U --protocol http"]
command: ["/bin/bash", "-c", "rm /etc/shadowsocks-rust/config.json && sslocal --online-config-url $SS_LINK --local-addr 127.0.0.1:8081 -U --protocol http"]
resources:
requests:
memory: "64Mi"
cpu: "300m"
limits:
memory: "128Mi"
cpu: "300m"
cpu: "200m"
---
apiVersion: v1
kind: Service

View File

@@ -1,12 +1,12 @@
image:
tag: 10.11.4
tag: 10.10.7
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "8Gi"
cpu: "6000m"
cpu: "4000m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
persistence:
@@ -36,40 +36,8 @@ ingress:
paths:
- path: /
pathType: Prefix
- host: us.hexor.cy
paths:
- path: /
pathType: Prefix
- host: ch.hexor.cy
paths:
- path: /
pathType: Prefix
- host: jp.hexor.cy
paths:
- path: /
pathType: Prefix
- host: spb.hexor.cy
paths:
- path: /
pathType: Prefix
- host: cy.hexor.cy
paths:
- path: /
pathType: Prefix
- host: am.hexor.cy
paths:
- path: /
pathType: Prefix
- host: de.hexor.cy
paths:
- path: /
pathType: Prefix
- host: it.hexor.cy
paths:
- path: /
pathType: Prefix
tls:
- secretName: jellyfin-tls
hosts:
- '*.hexor.cy'
- 'jf.hexor.cy'

View File

@@ -19,7 +19,7 @@ spec:
kubernetes.io/os: linux
containers:
- name: secret-reader
image: ultradesu/k8s-secrets:0.2.1
image: ultradesu/k8s-secrets:0.1.1
imagePullPolicy: Always
args:
- "--secrets"
@@ -28,7 +28,6 @@ spec:
- "k8s-secret"
- "--port"
- "3000"
- "--webhook"
ports:
- containerPort: 3000
name: http
@@ -41,7 +40,7 @@ spec:
cpu: "50m"
limits:
memory: "128Mi"
cpu: "150m"
cpu: "100m"
livenessProbe:
httpGet:
path: /health

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: openai-creds

View File

@@ -29,7 +29,7 @@ spec:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
command:
- /bin/sh
- -c

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: khm-pg-creds

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: n8n
namespace: argocd
spec:
project: apps
destination:
namespace: n8n
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/n8n
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,37 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: credentials
spec:
target:
name: credentials
deletionPolicy: Delete
template:
type: Opaque
data:
postgres-password: "{{ .psql | trim }}"
N8N_ENCRYPTION_KEY: "{{ .enc_pass | trim }}"
data:
- secretKey: psql
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[13].value
- secretKey: enc_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 18c92d73-9637-4419-8642-7f7b308460cb
property: fields[0].value

View File

@@ -1,29 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- external-secrets.yaml
- storage.yaml
helmCharts:
- name: n8n
repo: https://community-charts.github.io/helm-charts
version: 1.16.28
releaseName: n8n
namespace: n8n
valuesFile: values-n8n.yaml
includeCRDs: true
- name: searxng
repo: https://unknowniq.github.io/helm-charts/
version: 0.1.3
releaseName: searxng
namespace: n8n
valuesFile: values-searxng.yaml
includeCRDs: true
- name: yacy
repo: https://raw.githubusercontent.com/yacy/yacy_search_server/master/charts
version: 0.1.0
releaseName: yacy
namespace: n8n
valuesFile: values-yacy.yaml
includeCRDs: true

View File

@@ -1,12 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: n8n-home
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
resources:
requests:
storage: 10Gi

View File

@@ -1,53 +0,0 @@
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
db:
type: postgresdb
main:
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 512m
memory: 512Mi
persistence:
enabled: true
existingClaim: n8n-home
mountPath: /home/node/.n8n
worker:
mode: regular
webhook:
url: https://n8n.hexor.cy
redis:
enabled: true
existingEncryptionKeySecret: credentials
externalPostgresql:
existingSecret: credentials
host: "psql.psql.svc"
username: "n8n"
database: "n8n"
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
hosts:
- host: n8n.hexor.cy
paths:
- path: /
pathType: Prefix
tls:
- secretName: n8n-tls
hosts:
- '*.hexor.cy'

View File

@@ -1,24 +0,0 @@
config:
general:
instance_name: "HexorSearXNG"
debug: true
server:
limiter: false
public_instance: false
method: "POST"
search:
safe_search: 0
extraConfig:
botdetection:
ip_lists:
pass_ip:
- '0.0.0.0/0'
- '::0/0'
ip_limit:
filter_link_local: false
link_token: false
valkey:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net

View File

@@ -1,61 +0,0 @@
replicaCount: 1
image:
repository: yacy/yacy_search_server
pullPolicy: IfNotPresent
tag: "latest"
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
automount: true
annotations: {}
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
service:
type: ClusterIP
port: 8090
ingress:
enabled: false
resources:
limits:
memory: 2Gi
requests:
cpu: 100m
memory: 1Gi
persistence:
enabled: true
size: 10Gi
accessMode: ReadWriteOnce
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
yacy:
adminPassword: "yacy123"
initCrawlURLs:
- "https://www.example.com/"
memory: "Xmx1024m"
network:
mode: "intranet" # standalone mode for local usage

View File

@@ -1,33 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: oidc-secret
spec:
target:
name: oidc-secret
deletionPolicy: Delete
template:
type: Opaque
data:
OAUTH_CLIENT_SECRET: |-
{{ .OAUTH_CLIENT_SECRET }}
OAUTH_CLIENT_ID: |-
{{ .OAUTH_CLIENT_ID }}
data:
- secretKey: OAUTH_CLIENT_SECRET
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
property: fields[0].value
- secretKey: OAUTH_CLIENT_ID
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
property: fields[1].value

View File

@@ -1,9 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- external-secrets.yaml
helmCharts:
- name: ollama
repo: https://otwld.github.io/ollama-helm/
@@ -11,11 +8,4 @@ helmCharts:
releaseName: ollama
namespace: ollama
valuesFile: ollama-values.yaml
includeCRDs: true
- name: open-webui
repo: https://helm.openwebui.com/
version: 8.14.0
releaseName: openweb-ui
namespace: ollama
valuesFile: openweb-ui-values.yaml
includeCRDs: true

View File

@@ -5,4 +5,17 @@ image:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: false
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
hosts:
- host: ai.hexor.cy
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- '*.hexor.cy'
secretName: ollama-tls

View File

@@ -1,52 +0,0 @@
clusterDomain: ai.hexor.cy
extraEnvVars:
GLOBAL_LOG_LEVEL: debug
OAUTH_PROVIDER_NAME: authentik
OPENID_PROVIDER_URL: https://idm.hexor.cy/application/o/openwebui/.well-known/openid-configuration
OPENID_REDIRECT_URI: https://ai.hexor.cy/oauth/oidc/callback
WEBUI_URL: https://ai.hexor.cy
# Allows auto-creation of new users using OAuth. Must be paired with ENABLE_LOGIN_FORM=false.
ENABLE_OAUTH_SIGNUP: true
# Disables user/password login form. Required when ENABLE_OAUTH_SIGNUP=true.
ENABLE_LOGIN_FORM: false
OAUTH_MERGE_ACCOUNTS_BY_EMAIL: true
extraEnvFrom:
- secretRef:
name: oidc-secret
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
ollamaUrls:
- http://ollama.ollama.svc:11434
ollama:
enabled: false
ollama:
gpu:
enabled: false
models:
pull:
- qwen3-vl:8b
run:
- qwen3-vl:8b
pipelines:
enabled: true
tika:
enabled: true
websocket:
enabled: true
ingress:
enabled: true
class: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
host: "ai.hexor.cy"
tls:
- hosts:
- '*.hexor.cy'
secretName: ollama-tls

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: postgres-and-oauth-creds

View File

@@ -13,7 +13,7 @@ resources:
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
cpu: "1000m"
service:
type: ClusterIP

View File

@@ -4,7 +4,6 @@ kind: Kustomization
resources:
- app.yaml
- external-secrets.yaml
- paperless-ai.yaml
helmCharts:
- name: paperless-ngx
@@ -28,11 +27,4 @@ helmCharts:
namespace: paperless
valuesFile: gotenberg-values.yaml
includeCRDs: true
#- name: redis
# repo: oci://registry-1.docker.io/bitnamicharts/redis
# version: 24.1.0
# releaseName: redis
# namespace: paperless
# includeCRDs: true
# valuesFile: bazarr-values.yaml

View File

@@ -1,101 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: paperless-ai
labels:
app: paperless-ai
spec:
replicas: 1
selector:
matchLabels:
app: paperless-ai
template:
metadata:
labels:
app: paperless-ai
spec:
nodeSelector:
kubernetes.io/hostname: nas.homenet
containers:
- name: paperless-ai
image: clusterzx/paperless-ai:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
name: http
env:
- name: NODE_ENV
value: production
- name: PAPERLESS_AI_PORT
value: "3000"
resources:
requests:
memory: 512Mi
cpu: 500m
limits:
memory: 1024Mi
cpu: 2000m
#livenessProbe:
# httpGet:
# path: /
# port: 8000
# initialDelaySeconds: 30
# periodSeconds: 10
#readinessProbe:
# httpGet:
# path: /
# port: 8000
# initialDelaySeconds: 5
# periodSeconds: 5
volumeMounts:
- name: data
mountPath: /app/data
volumes:
- name: data
hostPath:
path: /mnt/storage/Storage/k8s/paperless/ai-data
type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
name: paperless-ai
namespace: paperless
labels:
app: paperless-ai
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app: paperless-ai
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: paperless-ai-ingress
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: ai-docs.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: paperless-ai
port:
number: 3000
tls:
- secretName: docs-tls
hosts:
- '*.hexor.cy'

View File

@@ -1,12 +1,12 @@
image:
tag: 2.20.3
tag: 2.19.3
resources:
requests:
memory: "1Gi"
cpu: "500m"
limits:
memory: "4Gi"
cpu: "3000m"
cpu: "2000m"
initContainers:
install-tesseract-langs:
image: ghcr.io/paperless-ngx/paperless-ngx:2.18.2
@@ -16,7 +16,7 @@ initContainers:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
command: ["/bin/sh", "-c"]
args:
- apt-get update && apt-get install -y --reinstall tesseract-ocr-rus tesseract-ocr-jpn tesseract-ocr-chi-sim tesseract-ocr-eng tesseract-ocr-ell && cp -v -r /usr/share/tesseract-ocr/5/tessdata/* /custom-tessdata/
@@ -107,8 +107,6 @@ persistence:
- path: /usr/src/paperless/consume
redis:
enabled: true
image:
tag: latest
master:
nodeSelector:
kubernetes.io/hostname: nas.homenet

View File

@@ -13,7 +13,7 @@ resources:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
service:
type: ClusterIP

View File

@@ -1,264 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: pasarguard-scripts
labels:
app: pasarguard-node
data:
init-uuid.sh: |
#!/bin/bash
set -e
echo "Started"
# NODE_NAME is already set via environment variable
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Get DNS name from node label xray-node-address
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
if [ -z "${DNS_NAME}" ]; then
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
exit 1
fi
echo "Node: ${NODE_NAME}"
echo "DNS Name from label: ${DNS_NAME}"
# Use DNS name for ConfigMap name to ensure uniqueness
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
# Check if ConfigMap exists and get UUID
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "ConfigMap exists, reading UUID..."
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
if [ -z "${API_KEY}" ]; then
echo "UUID not found in ConfigMap, generating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
else
echo "Using existing UUID from ConfigMap"
fi
else
echo "ConfigMap does not exist, creating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
--from-literal=API_KEY="${API_KEY}" \
--from-literal=NODE_NAME="${NODE_NAME}"
fi
# Save UUID and node info to shared volume for the main container
echo -n "${API_KEY}" > /shared/api-key
echo -n "${NODE_NAME}" > /shared/node-name
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
echo "UUID initialized: ${API_KEY}"
echo "Node name: ${NODE_NAME}"
echo "ConfigMap: ${CONFIGMAP_NAME}"
# Create Certificate for this node using DNS name from label
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
# Check if Certificate already exists
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate does not exist, creating..."
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${CERT_NAME}
namespace: ${NAMESPACE}
spec:
secretName: ${CERT_NAME}-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- ${DNS_NAME}
EOF
else
echo "Certificate already exists"
fi
# Wait for certificate to be ready
echo "Waiting for certificate to be ready..."
for i in {1..600}; do
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate secret is ready!"
break
fi
echo "Waiting for certificate... ($i/600)"
sleep 1
done
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "WARNING: Certificate secret not ready after 600 seconds"
else
# Extract certificate and key from secret to shared volume
echo "Extracting certificate and key..."
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
echo "Certificate and key extracted successfully."
cat /shared/tls.crt
fi
# Create individual Service and Endpoints for this node
# Take only first part of node name before first dot
NODE_SHORT_NAME="${NODE_NAME%%.*}"
SERVICE_NAME="${NODE_SHORT_NAME}"
# Get node internal IP (take only first IP if multiple)
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
# Create Service without selector
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
spec:
clusterIP: None
ports:
- name: api
port: 62050
protocol: TCP
targetPort: 62050
- name: metrics
port: 9550
protocol: TCP
targetPort: 9550
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
subsets:
- addresses:
- ip: ${NODE_IP}
nodeName: ${NODE_NAME}
ports:
- name: api
port: 62050
protocol: TCP
- name: metrics
port: 9550
protocol: TCP
EOF
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
exporter-start.sh: |
#!/bin/sh
# Install required tools
apk add --no-cache wget curl iproute2-ss bash
# Download v2ray-exporter
echo "Downloading v2ray-exporter..."
ARCH=$(uname -m)
case $ARCH in
x86_64)
BINARY_ARCH="amd64"
;;
aarch64|arm64)
BINARY_ARCH="arm64"
;;
*)
echo "Unsupported architecture: $ARCH"
exit 1
;;
esac
echo "Detected architecture: $ARCH, using binary: v2ray-exporter_linux_$BINARY_ARCH"
wget -L -O /tmp/v2ray-exporter "https://github.com/wi1dcard/v2ray-exporter/releases/download/v0.6.0/v2ray-exporter_linux_$BINARY_ARCH"
mv /tmp/v2ray-exporter /usr/local/bin/v2ray-exporter
chmod +x /usr/local/bin/v2ray-exporter
# Wait for initial API port file
echo "Waiting for initial xray API port file..."
while [ ! -f /shared/xray-api-port ]; do
echo "Waiting for API port file..."
sleep 2
done
# Main loop - restart exporter if it crashes or port changes
while true; do
if [ -f /shared/xray-api-port ]; then
API_PORT=$(cat /shared/xray-api-port)
if [ -n "$API_PORT" ]; then
echo "Starting v2ray-exporter with endpoint 127.0.0.1:$API_PORT"
/usr/local/bin/v2ray-exporter --v2ray-endpoint "127.0.0.1:$API_PORT" --listen ":9550" &
EXPORTER_PID=$!
# Wait for exporter to exit or port file to change
while kill -0 $EXPORTER_PID 2>/dev/null; do
if [ -f /shared/xray-api-port ]; then
NEW_PORT=$(cat /shared/xray-api-port)
if [ "$NEW_PORT" != "$API_PORT" ]; then
echo "API port changed from $API_PORT to $NEW_PORT, restarting exporter"
kill $EXPORTER_PID 2>/dev/null
wait $EXPORTER_PID 2>/dev/null
break
fi
fi
sleep 5
done
echo "Exporter stopped, restarting..."
wait $EXPORTER_PID 2>/dev/null
fi
fi
sleep 2
done
pasarguard-start.sh: |
#!/bin/sh
# Read API_KEY from shared volume created by init container
if [ -f /shared/api-key ]; then
export API_KEY=$(cat /shared/api-key)
echo "Loaded API_KEY from shared volume"
else
echo "WARNING: API_KEY file not found, using default"
fi
cd /app
# Start main process in background
./main &
MAIN_PID=$!
# Start continuous port monitoring in background
{
sleep 10 # Wait for xray to start initially
LAST_PORT=""
while true; do
API_PORT=$(netstat -tlpn | grep xray | grep 127.0.0.1 | awk '{print $4}' | cut -d: -f2 | head -1)
if [ -n "$API_PORT" ] && [ "$API_PORT" != "$LAST_PORT" ]; then
echo "Found xray API port: $API_PORT"
echo -n "$API_PORT" > /shared/xray-api-port
LAST_PORT="$API_PORT"
fi
sleep 5 # Check every 5 seconds
done
} &
PORT_MONITOR_PID=$!
# Wait for main process to finish
wait $MAIN_PID
# Clean up port monitor
kill $PORT_MONITOR_PID 2>/dev/null

View File

@@ -105,19 +105,174 @@ spec:
fieldPath: spec.nodeName
command:
- /bin/bash
- /scripts/init-uuid.sh
- -c
- |
set -e
echo "Started"
# NODE_NAME is already set via environment variable
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Get DNS name from node label xray-node-address
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
if [ -z "${DNS_NAME}" ]; then
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
exit 1
fi
echo "Node: ${NODE_NAME}"
echo "DNS Name from label: ${DNS_NAME}"
# Use DNS name for ConfigMap name to ensure uniqueness
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
# Check if ConfigMap exists and get UUID
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "ConfigMap exists, reading UUID..."
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
if [ -z "${API_KEY}" ]; then
echo "UUID not found in ConfigMap, generating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
else
echo "Using existing UUID from ConfigMap"
fi
else
echo "ConfigMap does not exist, creating new one..."
API_KEY=$(cat /proc/sys/kernel/random/uuid)
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
--from-literal=API_KEY="${API_KEY}" \
--from-literal=NODE_NAME="${NODE_NAME}"
fi
# Save UUID and node info to shared volume for the main container
echo -n "${API_KEY}" > /shared/api-key
echo -n "${NODE_NAME}" > /shared/node-name
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
echo "UUID initialized: ${API_KEY}"
echo "Node name: ${NODE_NAME}"
echo "ConfigMap: ${CONFIGMAP_NAME}"
# Create Certificate for this node using DNS name from label
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
# Check if Certificate already exists
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate does not exist, creating..."
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${CERT_NAME}
namespace: ${NAMESPACE}
spec:
secretName: ${CERT_NAME}-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- ${DNS_NAME}
EOF
else
echo "Certificate already exists"
fi
# Wait for certificate to be ready
echo "Waiting for certificate to be ready..."
for i in {1..600}; do
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "Certificate secret is ready!"
break
fi
echo "Waiting for certificate... ($i/600)"
sleep 1
done
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
echo "WARNING: Certificate secret not ready after 600 seconds"
else
# Extract certificate and key from secret to shared volume
echo "Extracting certificate and key..."
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
echo "Certificate and key extracted successfully."
cat /shared/tls.crt
fi
# Create individual Service and Endpoints for this node
# Take only first part of node name before first dot
NODE_SHORT_NAME="${NODE_NAME%%.*}"
SERVICE_NAME="${NODE_SHORT_NAME}"
# Get node internal IP (take only first IP if multiple)
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
# Create Service without selector
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
spec:
clusterIP: None
ports:
- name: api
port: 62050
protocol: TCP
targetPort: 62050
---
apiVersion: v1
kind: Endpoints
metadata:
name: ${SERVICE_NAME}
namespace: ${NAMESPACE}
labels:
app: pasarguard-node
node: ${NODE_NAME}
subsets:
- addresses:
- ip: ${NODE_IP}
nodeName: ${NODE_NAME}
ports:
- name: api
port: 62050
protocol: TCP
EOF
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
volumeMounts:
- name: shared-data
mountPath: /shared
- name: scripts
mountPath: /scripts
containers:
- name: pasarguard-node
image: 'pasarguard/node:v0.2.1'
image: 'pasarguard/node:v0.1.1'
imagePullPolicy: Always
command:
- /bin/sh
- /scripts/pasarguard-start.sh
- -c
- |
# Read API_KEY from shared volume created by init container
if [ -f /shared/api-key ]; then
export API_KEY=$(cat /shared/api-key)
echo "Loaded API_KEY from shared volume"
else
echo "WARNING: API_KEY file not found, using default"
fi
cd /app
exec ./main
ports:
- name: api
containerPort: 62050
@@ -162,60 +317,14 @@ spec:
resources:
requests:
memory: "128Mi"
#cpu: "500m"
cpu: "100m"
limits:
memory: "512Mi"
#cpu: "1200m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: false
- name: scripts
mountPath: /scripts
- name: xray-exporter
image: alpine:3.18
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- /scripts/exporter-start.sh
ports:
- name: metrics
containerPort: 9550
protocol: TCP
livenessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "500m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: true
- name: scripts
mountPath: /scripts
volumes:
- name: shared-data
emptyDir: {}
- name: scripts
configMap:
name: pasarguard-scripts
defaultMode: 0755

View File

@@ -34,7 +34,7 @@ spec:
mountPath: /templates/subscription
containers:
- name: pasarguard-web
image: 'pasarguard/panel:latest'
image: 'pasarguard/panel:v1.4.1'
imagePullPolicy: Always
envFrom:
- secretRef:

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: pasarguard-secrets

View File

@@ -7,5 +7,5 @@ resources:
- ./deployment.yaml
- ./daemonset.yaml
- ./certificate.yaml
- ./configmap-scripts.yaml
- ./servicemonitor.yaml

View File

@@ -1,21 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: pasarguard-node-metrics
labels:
app: pasarguard-node
release: prometheus
spec:
selector:
matchLabels:
app: pasarguard-node
endpoints:
- port: metrics
path: /scrape
interval: 30s
scrapeTimeout: 10s
honorLabels: true
namespaceSelector:
matchNames:
- pasarguard

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: remnawave
namespace: argocd
spec:
project: apps
destination:
namespace: remnawave
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/remnawave
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,71 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: remnawave
labels:
app: remnawave
annotations:
reloader.stakater.com/auto: "true"
spec:
selector:
matchLabels:
app: remnawave
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: remnawave
spec:
containers:
- name: remnawave
image: 'remnawave/backend:2'
imagePullPolicy: Always
envFrom:
- secretRef:
name: remnawave-secrets
env:
- name: REDIS_URL
value: "redis://remnawave-redis:6379"
ports:
- name: http
containerPort: 3000
protocol: TCP
- name: metrics
containerPort: 3001
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 3001
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: 3001
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 3
---
apiVersion: v1
kind: Service
metadata:
name: remnawave
spec:
selector:
app: remnawave
ports:
- name: http
protocol: TCP
port: 3000
targetPort: 3000
- name: metrics
protocol: TCP
port: 3001
targetPort: 3001

View File

@@ -1,70 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: remnawave-secrets
spec:
target:
name: remnawave-secrets
deletionPolicy: Delete
template:
type: Opaque
data:
METRICS_USER: admin
FRONT_END_DOMAIN: rw.hexor.cy
SUB_PUBLIC_DOMAIN: sub.hexor.cy
REDIS_HOST: remnawave-redis
REDIS_PORT: "6379"
DATABASE_URL: |-
postgresql://remnawave:{{ .pg_pass }}@psql.psql.svc:5432/remnawave
JWT_AUTH_SECRET: |-
{{ .jwt_auth_secret }}
JWT_API_TOKENS_SECRET: |-
{{ .jwt_api_tokens_secret }}
METRICS_PASS: |-
{{ .metrics_pass }}
WEBHOOK_SECRET_HEADER: |-
{{ .webhook_secret }}
data:
- secretKey: pg_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[10].value
- secretKey: jwt_auth_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 0d090436-5e82-453a-914c-19cec2abded1
property: fields[0].value
- secretKey: jwt_api_tokens_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 0d090436-5e82-453a-914c-19cec2abded1
property: fields[1].value
- secretKey: metrics_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 0d090436-5e82-453a-914c-19cec2abded1
property: fields[2].value
- secretKey: webhook_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 0d090436-5e82-453a-914c-19cec2abded1
property: fields[3].value

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./external-secrets.yaml
- ./deployment.yaml
- ./redis-deployment.yaml
- ./subscription-page-configmap.yaml
- ./subscription-page-deployment.yaml
- ./servicemonitor.yaml
- ./user-ui-ingress.yaml
- ./panel-ingress.yaml

View File

@@ -1,37 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: panel-ui
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: rw.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave
port:
number: 3000
- host: rw.hexor.ru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave
port:
number: 3000
tls:
- secretName: remnawave-panel-tls
hosts:
- rw.hexor.cy
- rw.hexor.ru

View File

@@ -1,71 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: remnawave-redis
labels:
app: remnawave-redis
spec:
selector:
matchLabels:
app: remnawave-redis
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: remnawave-redis
spec:
containers:
- name: redis
image: 'valkey/valkey:8.1-alpine'
imagePullPolicy: Always
ports:
- name: redis
containerPort: 6379
protocol: TCP
livenessProbe:
exec:
command:
- valkey-cli
- ping
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
exec:
command:
- valkey-cli
- ping
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
volumeMounts:
- name: redis-data
mountPath: /data
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "256Mi"
cpu: "200m"
volumes:
- name: redis-data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: remnawave-redis
spec:
selector:
app: remnawave-redis
ports:
- name: redis
protocol: TCP
port: 6379
targetPort: 6379

View File

@@ -1,21 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: remnawave-metrics
labels:
app: remnawave
release: prometheus
spec:
selector:
matchLabels:
app: remnawave
endpoints:
- port: metrics
path: /metrics
interval: 30s
scrapeTimeout: 10s
honorLabels: true
namespaceSelector:
matchNames:
- remnawave

View File

@@ -1,27 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: remnawave-subscription-page-config
labels:
app: remnawave-subscription-page
data:
APP_PORT: "3010"
REMNAWAVE_PANEL_URL: "https://rw.hexor.cy"
META_TITLE: "RemnaWave Subscription"
META_DESCRIPTION: "Your VPN subscription portal"
META_KEYWORDS: "vpn,subscription,remnawave"
META_AUTHOR: "RemnaWave"
ENABLE_ANALYTICS: "false"
ANALYTICS_MEASUREMENT_ID: ""
CUSTOM_SUB_PREFIX: ""
THEME: "dark"
CUSTOM_LOGO_URL: ""
SHOW_SUBSCRIPTION_INFO: "true"
SHOW_CONNECTION_INFO: "true"
SHOW_QR_CODE: "true"
QR_CODE_SIZE: "256"
REFRESH_INTERVAL: "30000"
SUBSCRIPTION_TEXT_COLOR: "#ffffff"
BACKGROUND_COLOR: "#1a1a1a"
ACCENT_COLOR: "#007bff"

View File

@@ -1,52 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: remnawave-subscription-page
labels:
app: remnawave-subscription-page
spec:
selector:
matchLabels:
app: remnawave-subscription-page
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: remnawave-subscription-page
spec:
containers:
- name: subscription-page
image: 'remnawave/subscription-page:latest'
imagePullPolicy: Always
envFrom:
- configMapRef:
name: remnawave-subscription-page-config
ports:
- name: http
containerPort: 3010
protocol: TCP
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "256Mi"
cpu: "200m"
---
apiVersion: v1
kind: Service
metadata:
name: remnawave-subscription-page
labels:
app: remnawave-subscription-page
spec:
selector:
app: remnawave-subscription-page
ports:
- name: http
protocol: TCP
port: 3010
targetPort: 3010

View File

@@ -1,37 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: user-ui
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: sub.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave-subscription-page
port:
number: 3010
- host: sub.hexor.ru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave-subscription-page
port:
number: 3010
tls:
- secretName: remnawave-user-ui-tls
hosts:
- sub.hexor.cy
- sub.hexor.ru

View File

@@ -27,7 +27,7 @@ spec:
cpu: "100m"
limits:
memory: "512Mi"
cpu: "750m"
cpu: "500m"
command: ["hbbs"]
args:
- "--relay-servers"
@@ -98,7 +98,7 @@ spec:
cpu: "100m"
limits:
memory: "512Mi"
cpu: "750m"
cpu: "500m"
command: ["hbbr"]
args:
- "--port"

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: rustdesk-keys

View File

@@ -4,7 +4,7 @@ resources:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net

View File

@@ -6,7 +6,7 @@ resources:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net

View File

@@ -6,7 +6,7 @@ resources:
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
cpu: "1000m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net

View File

@@ -33,7 +33,7 @@ resources:
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
cpu: "1000m"
probes:
liveness:

View File

@@ -16,18 +16,18 @@ helmCharts:
valuesFile: syncthing-master.yaml
includeCRDs: true
- name: syncthing
repo: https://k8s-home-lab.github.io/helm-charts
version: 4.0.0
releaseName: syncthing-khv
namespace: syncthing
valuesFile: syncthing-khv.yaml
includeCRDs: true
- name: syncthing
repo: https://k8s-home-lab.github.io/helm-charts
version: 4.0.0
releaseName: syncthing-nas
namespace: syncthing
valuesFile: syncthing-nas.yaml
includeCRDs: true
# - name: syncthing
# repo: https://k8s-home-lab.github.io/helm-charts
# version: 4.0.0
# releaseName: syncthing-khv
# namespace: syncthing
# valuesFile: syncthing-khv.yaml
# includeCRDs: true
includeCRDs: true

View File

@@ -1,3 +1,4 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -22,7 +23,7 @@ spec:
kubernetes.io/hostname: home.homenet
containers:
- name: desubot
image: "ultradesu/desubot:latest"
image: 'ultradesu/desubot:latest'
imagePullPolicy: Always
envFrom:
- secretRef:
@@ -31,11 +32,11 @@ spec:
- name: RUST_LOG
value: "info"
volumeMounts:
- mountPath: /storage
name: storage
- mountPath: /storage
name: storage
volumes:
- name: storage
persistentVolumeClaim:
claimName: desubot-storage
readOnly: false
nfs:
server: nas.homenet
path: /mnt/storage/Storage/k8s/desubot/
readOnly: false

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: get-id-bot
@@ -24,7 +24,7 @@ spec:
property: fields[0].value
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: desubot

View File

@@ -30,7 +30,7 @@ spec:
name: get-id-bot
env:
- name: RUST_LOG
value: "info,teloxide::error_handlers=off"
value: "info"

View File

@@ -7,6 +7,3 @@ resources:
- get-id-bot.yaml
- external-secrets.yaml
- desubot.yaml
- restart-job.yaml
- storage.yaml

View File

@@ -1,56 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tg-bots-restart-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tg-bots-restart-role
rules:
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tg-bots-restart-rb
subjects:
- kind: ServiceAccount
name: tg-bots-restart-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: tg-bots-restart-role
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: tg-bots-daily-restart
spec:
schedule: "0 4 * * *" # every day at 04:00
jobTemplate:
spec:
template:
spec:
serviceAccountName: tg-bots-restart-sa
restartPolicy: OnFailure
containers:
- name: kubectl
image: bitnami/kubectl:latest
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /bin/sh
- -c
- |
kubectl -n "$POD_NAMESPACE" rollout restart deployment/desubot
kubectl -n "$POD_NAMESPACE" rollout restart deployment/get-id-bot

View File

@@ -1,12 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: desubot-storage
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
resources:
requests:
storage: 200Gi

View File

@@ -37,7 +37,7 @@ spec:
cpu: "100m"
limits:
memory: "1Gi"
cpu: "750m"
cpu: "500m"
env:
- name: DOMAIN
value: https://vw.hexor.cy

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: admin-token

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: outfleet-secrets
@@ -51,7 +51,7 @@ spec:
property: fields[1].value
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: outline-config

View File

@@ -174,7 +174,7 @@ spec:
resources:
limits:
memory: "512Mi"
cpu: "750m"
cpu: "500m"
requests:
memory: "256Mi"
cpu: "250m"

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: xandikos
namespace: argocd
spec:
project: apps
destination:
namespace: xandikos
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/xandikos
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,70 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: xandikos
labels:
app: xandikos
spec:
selector:
matchLabels:
app: xandikos
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: xandikos
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
volumes:
- name: storage
hostPath:
path: /k8s/xandikos
type: Directory
containers:
- name: xandikos
image: ghcr.io/jelmer/xandikos:latest
imagePullPolicy: Always
command:
- "python3"
- "-m"
- "xandikos.web"
- "--port=8081"
- "-d/data"
- "--defaults"
- "--listen-address=0.0.0.0"
- "--route-prefix=/dav"
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /
port: 8081
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8081
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
ports:
- name: http
containerPort: 8081
protocol: TCP
volumeMounts:
- name: storage
mountPath: /data

View File

@@ -1,31 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: mmdl-secrets
spec:
target:
name: mmdl-secrets
deletionPolicy: Delete
template:
type: Opaque
data:
DB_DIALECT: 'postgres'
DB_HOST: psql.psql.svc
DB_USER: mmdl
DB_NAME: mmdl
DB_PORT: "5432"
DB_PASS: |-
{{ .pg_pass }}
AES_PASSWORD: |-
{{ .pg_pass }}
data:
- secretKey: pg_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[12].value

View File

@@ -1,47 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: xandikos
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: cal.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mmdl
port:
number: 3000
- path: /dav
pathType: Prefix
backend:
service:
name: xandikos
port:
number: 8081
- path: /.well-known/carddav
pathType: Exact
backend:
service:
name: xandikos
port:
number: 8081
- path: /.well-known/caldav
pathType: Exact
backend:
service:
name: xandikos
port:
number: 8081
tls:
- secretName: xandikos-tls
hosts:
- cal.hexor.cy

View File

@@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- mmdl-deployment.yaml
- mmdl-service.yaml
- ingress.yaml
- external-secrets.yaml

View File

@@ -1,61 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mmdl
labels:
app: mmdl
spec:
selector:
matchLabels:
app: mmdl
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: mmdl
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: mmdl
image: intriin/mmdl:latest
imagePullPolicy: Always
envFrom:
- secretRef:
name: mmdl-secrets
env:
- name: NEXTAUTH_URL
value: "https://cal.hexor.cy"
- name: CALDAV_SERVER_URL
value: "https://cal.hexor.cy/dav"
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
ports:
- name: http
containerPort: 3000
protocol: TCP

View File

@@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: mmdl
spec:
selector:
app: mmdl
type: ClusterIP
ports:
- name: http
port: 3000
protocol: TCP
targetPort: 3000

View File

@@ -1,16 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: xandikos
labels:
app: xandikos
spec:
selector:
app: xandikos
ports:
- protocol: TCP
port: 8081
targetPort: 8081
name: http
type: ClusterIP

View File

@@ -47,20 +47,3 @@ spec:
server: https://kubernetes.default.svc
sourceRepos:
- ssh://git@gt.hexor.cy:30022/ab/homelab.git
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: desktop
namespace: argocd
spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
description: Hexor Home Lab Desktop Apps
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
sourceRepos:
- ssh://git@gt.hexor.cy:30022/ab/homelab.git

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: oidc-creds

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: argo-cd
repo: https://argoproj.github.io/argo-helm
version: 9.1.4
version: 8.1.3
releaseName: argocd
namespace: argocd
valuesFile: values.yaml

View File

@@ -2,7 +2,7 @@
global:
domain: ag.hexor.cy
nodeSelector: &nodeSelector
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
logging:
format: text
@@ -55,15 +55,15 @@ configs:
controller:
replicas: 1
nodeSelector:
<<: *nodeSelector
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# Dex OIDC provider
dex:
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
enabled: false
# Standard Redis disabled because Redis HA is enabled
@@ -86,7 +86,7 @@ redis-ha:
server:
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: false
@@ -99,11 +99,8 @@ server:
# Repository Server
repoServer:
replicas: 1
livenessProbe:
timeoutSeconds: 10
periodSeconds: 60
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# ApplicationSet Controller
@@ -111,7 +108,7 @@ applicationSet:
enabled: true # Enabled by default
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# Notifications Controller
@@ -119,5 +116,5 @@ notifications:
enabled: true # Enabled by default
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add notifiers, triggers, templates configurations if needed

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: authentik-creds

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: authentik
repo: https://charts.goauthentik.io
version: 2025.10.1
version: 2025.8.1
releaseName: authentik
namespace: authentik
valuesFile: values.yaml

View File

@@ -1,6 +1,6 @@
global:
image:
tag: "2025.10.1"
tag: "2025.8.1"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
@@ -47,7 +47,6 @@ server:
- minecraft.hexor.cy # Minecraft UI and server
- pass.hexor.cy # k8s-secret for openai
- ps.hexor.cy # pasarguard UI
# - rw.hexor.cy # RemnaWave UI
tls:
- secretName: idm-tls
hosts:

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: cloudflare-creds
@@ -22,7 +22,7 @@ spec:
key: 8ae1dcb1-1182-48a1-8733-ca1144ea754b
property: fields[0].value
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: aws-creds

View File

@@ -35,6 +35,5 @@ spec:
key: secretKey
selector:
dnsZones:
- "ps.hexor.cy"
- "of.hexor.cy"

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: cert-manager
repo: https://charts.jetstack.io
version: 1.19.1
version: 1.17.1
releaseName: cert-manager
namespace: cert-manager
valuesFile: values.yaml

View File

@@ -1,6 +1,2 @@
crds:
enabled: true
prometheus:
enabled: true
servicemonitor:
enabled: true

View File

@@ -1,6 +1,6 @@
FROM debian:sid
ENV BW_CLI_VERSION=2025.12.1
ENV BW_CLI_VERSION=2025.5.0
RUN apt update && \
apt install -y wget unzip && \

View File

@@ -18,4 +18,4 @@ spec:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@@ -8,7 +8,7 @@
# BW_HOST: base64(url)
# BW_USERNAME: base64(name)
# BW_PASSWORD: base64(pass)
# Vaultwarden bot - 81212111-6350-4069-8bcf-19a67d3964a5
# 81212111-6350-4069-8bcf-19a67d3964a5
---
apiVersion: apps/v1
kind: Deployment
@@ -37,15 +37,15 @@ spec:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: bitwarden-cli
image: ultradesu/bitwarden-client:2025.12.1
image: ultradesu/bitwarden-client:2025.5.0
imagePullPolicy: Always
resources:
requests:
memory: "128Mi"
cpu: "300m"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
cpu: "500m"
env:
- name: BW_HOST
valueFrom:
@@ -128,7 +128,7 @@ spec:
app.kubernetes.io/name: external-secrets
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: vaultwarden-login
@@ -141,7 +141,7 @@ spec:
result:
jsonPath: "$.data.{{ .remoteRef.property }}"
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: vaultwarden-fields

View File

@@ -2,12 +2,13 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
- bitwarden-store.yaml
helmCharts:
- name: external-secrets
repo: https://charts.external-secrets.io
version: 1.1.0
version: 0.16.2
releaseName: external-secrets
namespace: external-secrets
valuesFile: values.yaml

View File

@@ -3,15 +3,5 @@ kind: Kustomization
resources:
- app.yaml
- nfs-storage.yaml
- coredns-internal-resolve.yaml
helmCharts:
- name: csi-driver-nfs
repo: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
version: 4.12.0
releaseName: csi-driver-nfs
namespace: kube-system
#valuesFile: values.yaml
includeCRDs: true

View File

@@ -1,14 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
server: nas.homenet
share: /mnt/storage/Storage/PVC
reclaimPolicy: Retain
volumeBindingMode: Immediate
mountOptions:
- vers=4
- hard

View File

@@ -1,5 +1,5 @@
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: postgres-creds
@@ -90,7 +90,7 @@ spec:
key: 832042b9-7edb-4f4c-9254-3c8884ba9733
property: fields[2].value
---
apiVersion: external-secrets.io/v1
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: postgres-users
@@ -119,14 +119,6 @@ spec:
{{ .outfleet_rs }}
USER_pasarguard: |-
{{ .pasarguard }}
USER_remnawave: |-
{{ .remnawave }}
USER_umami: |-
{{ .umami }}
USER_mmdl: |-
{{ .mmdl }}
USER_n8n: |-
{{ .n8n }}
data:
- secretKey: authentik
sourceRef:
@@ -227,48 +219,3 @@ spec:
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[9].value
- secretKey: remnawave
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[10].value
- secretKey: umami
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[11].value
- secretKey: mmdl
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[12].value
- secretKey: n8n
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[13].value

Some files were not shown because too many files have changed in this diff Show More