Compare commits

..

1 Commits

Author SHA1 Message Date
Gitea Actions Bot
40a920cbdf Auto-update README with current k8s applications
All checks were successful
Terraform / Terraform (pull_request) Successful in 42s
Generated by CI/CD workflow on 2025-11-24 16:47:03

This PR updates the README.md file with the current list of applications found in the k8s/ directory structure.
2025-11-24 16:47:03 +00:00
149 changed files with 518 additions and 90693 deletions

View File

@@ -25,34 +25,26 @@ jobs:
uses: actions/checkout@v3
- name: Setup Terraform
uses: hashicorp/setup-terraform@v4.0.0
uses: hashicorp/setup-terraform@v2
with:
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
- name: Terraform Init
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform init
working-directory: ./terraform/authentik
- name: Terraform Format
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform fmt -check
continue-on-error: true
working-directory: ./terraform/authentik
- name: Terraform Apply
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform apply -input=false -auto-approve -parallelism=100
run: terraform apply -var-file proxy-apps.tfvars -var-file oauth2-apps.tfvars -var-file terraform.tfvars -var-file groups.tfvars -input=false -auto-approve -parallelism=100
working-directory: ./terraform/authentik
- name: Generate Wiki Content
if: success()
continue-on-error: true
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: |
echo "📋 Starting Wiki generation..."
cd ./terraform/authentik

View File

@@ -22,13 +22,12 @@ jobs:
- name: Install Python dependencies
run: |
python3 -m venv .venv
.venv/bin/pip install pyyaml
pip install pyyaml
- name: Generate K8s Services Wiki
run: |
echo "📋 Starting K8s wiki generation..."
.venv/bin/python .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
python3 .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
if [ -f "Kubernetes-Services.md" ]; then
echo "✅ Wiki content generated successfully"

1
.gitignore vendored
View File

@@ -13,7 +13,6 @@ crash.*.log
*.tfvars
*.tfvars.json
!*terraform.tfvars
!*.auto.tfvars
# claude ai
.claude/

View File

@@ -18,7 +18,6 @@ ArgoCD homelab project
| **external-secrets** | [![external-secrets](https://ag.hexor.cy/api/badge?name=external-secrets&revision=true)](https://ag.hexor.cy/applications/argocd/external-secrets) |
| **kube-system-custom** | [![kube-system-custom](https://ag.hexor.cy/api/badge?name=kube-system-custom&revision=true)](https://ag.hexor.cy/applications/argocd/kube-system-custom) |
| **kubernetes-dashboard** | [![kubernetes-dashboard](https://ag.hexor.cy/api/badge?name=kubernetes-dashboard&revision=true)](https://ag.hexor.cy/applications/argocd/kubernetes-dashboard) |
| **longhorn** | [![longhorn](https://ag.hexor.cy/api/badge?name=longhorn&revision=true)](https://ag.hexor.cy/applications/argocd/longhorn) |
| **postgresql** | [![postgresql](https://ag.hexor.cy/api/badge?name=postgresql&revision=true)](https://ag.hexor.cy/applications/argocd/postgresql) |
| **prom-stack** | [![prom-stack](https://ag.hexor.cy/api/badge?name=prom-stack&revision=true)](https://ag.hexor.cy/applications/argocd/prom-stack) |
| **system-upgrade** | [![system-upgrade](https://ag.hexor.cy/api/badge?name=system-upgrade&revision=true)](https://ag.hexor.cy/applications/argocd/system-upgrade) |
@@ -45,7 +44,6 @@ ArgoCD homelab project
| **jellyfin** | [![jellyfin](https://ag.hexor.cy/api/badge?name=jellyfin&revision=true)](https://ag.hexor.cy/applications/argocd/jellyfin) |
| **k8s-secrets** | [![k8s-secrets](https://ag.hexor.cy/api/badge?name=k8s-secrets&revision=true)](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
| **khm** | [![khm](https://ag.hexor.cy/api/badge?name=khm&revision=true)](https://ag.hexor.cy/applications/argocd/khm) |
| **n8n** | [![n8n](https://ag.hexor.cy/api/badge?name=n8n&revision=true)](https://ag.hexor.cy/applications/argocd/n8n) |
| **ollama** | [![ollama](https://ag.hexor.cy/api/badge?name=ollama&revision=true)](https://ag.hexor.cy/applications/argocd/ollama) |
| **paperless** | [![paperless](https://ag.hexor.cy/api/badge?name=paperless&revision=true)](https://ag.hexor.cy/applications/argocd/paperless) |
| **pasarguard** | [![pasarguard](https://ag.hexor.cy/api/badge?name=pasarguard&revision=true)](https://ag.hexor.cy/applications/argocd/pasarguard) |
@@ -58,7 +56,6 @@ ArgoCD homelab project
| **tg-bots** | [![tg-bots](https://ag.hexor.cy/api/badge?name=tg-bots&revision=true)](https://ag.hexor.cy/applications/argocd/tg-bots) |
| **vaultwarden** | [![vaultwarden](https://ag.hexor.cy/api/badge?name=vaultwarden&revision=true)](https://ag.hexor.cy/applications/argocd/vaultwarden) |
| **vpn** | [![vpn](https://ag.hexor.cy/api/badge?name=vpn&revision=true)](https://ag.hexor.cy/applications/argocd/vpn) |
| **xandikos** | [![xandikos](https://ag.hexor.cy/api/badge?name=xandikos&revision=true)](https://ag.hexor.cy/applications/argocd/xandikos) |
</td>
</tr>

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: comfyui
namespace: argocd
spec:
project: apps
destination:
namespace: comfyui
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/comfyui
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,57 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: comfyui
namespace: comfyui
labels:
app: comfyui
spec:
replicas: 1
selector:
matchLabels:
app: comfyui
template:
metadata:
labels:
app: comfyui
spec:
runtimeClassName: nvidia
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
# Fix permissions mismatch usually happening when mapping host paths
securityContext:
runAsUser: 0
initContainers:
- name: create-data-dir
image: busybox
command: ["sh", "-c", "mkdir -p /host.data && chown -R 1000:1000 /host.data"]
volumeMounts:
- name: data
mountPath: /host.data
containers:
- name: comfyui
image: runpod/comfyui:latest-5090
imagePullPolicy: IfNotPresent
env:
- name: COMFYUI_PORT
value: "8188"
ports:
- containerPort: 8188
name: http
protocol: TCP
resources:
limits:
nvidia.com/gpu: 1
volumeMounts:
- name: data
# For ai-dock images, /workspace is the persistent user directory
mountPath: /workspace
volumes:
- name: data
persistentVolumeClaim:
claimName: comfyui-data-pvc

View File

@@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- local-pv.yaml
- pvc.yaml
- deployment.yaml
- service.yaml

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: comfyui-data-pv
spec:
capacity:
storage: 200Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
local:
path: /data/comfyui
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- uk-desktop.tail2fe2d.ts.net

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: comfyui

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: comfyui-data-pvc
namespace: comfyui
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 200Gi

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: comfyui
namespace: comfyui
labels:
app: comfyui
spec:
ports:
- name: http
port: 8188
targetPort: 8188
protocol: TCP
selector:
app: comfyui

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: furumi-dev
namespace: argocd
spec:
project: apps
destination:
namespace: furumi-dev
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/furumi-dev
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,55 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: furumi-ng-creds
spec:
target:
name: furumi-ng-creds
deletionPolicy: Delete
template:
type: Opaque
data:
OIDC_CLIENT_ID: |-
{{ .client_id }}
OIDC_CLIENT_SECRET: |-
{{ .client_secret }}
OIDC_ISSUER_URL: https://idm.hexor.cy/application/o/furumi-dev/
OIDC_REDIRECT_URL: https://music-dev.hexor.cy/auth/callback
OIDC_SESSION_SECRET: |-
{{ .session_secret }}
PG_STRING: |-
postgres://furumi_dev:{{ .pg_pass }}@psql.psql.svc:5432/furumi_dev
data:
- secretKey: client_id
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 960735e6-2cc9-4b68-9bd3-e6786e5a0cd6
property: fields[0].value
- secretKey: client_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 960735e6-2cc9-4b68-9bd3-e6786e5a0cd6
property: fields[1].value
- secretKey: session_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 960735e6-2cc9-4b68-9bd3-e6786e5a0cd6
property: fields[2].value
- secretKey: pg_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[17].value

View File

@@ -1,59 +0,0 @@
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: admin-strip
spec:
stripPrefix:
prefixes:
- /admin
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: furumi-tls-ingress
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: music-dev.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: furumi-dev-web-player
port:
number: 8080
tls:
- secretName: furumi-tls
hosts:
- '*.hexor.cy'
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: furumi-dev-admin-ingress
annotations:
ingressClassName: traefik
traefik.ingress.kubernetes.io/router.middlewares: furumi-server-admin-strip@kubernetescrd,kube-system-https-redirect@kubernetescrd
spec:
rules:
- host: music-dev.hexor.cy
http:
paths:
- path: /admin
pathType: Prefix
backend:
service:
name: furumi-dev-metadata-agent
port:
number: 8090
tls:
- secretName: furumi-tls
hosts:
- '*.hexor.cy'

View File

@@ -1,10 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
- service.yaml
- external-secrets.yaml
- ingress.yaml
- web-player.yaml
- metadata-agent.yaml

View File

@@ -1,59 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: furumi-dev-metadata-agent
labels:
app: furumi-dev-metadata-agent
spec:
replicas: 1
selector:
matchLabels:
app: furumi-dev-metadata-agent
template:
metadata:
labels:
app: furumi-dev-metadata-agent
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: furumi-dev-metadata-agent
image: ultradesu/furumi-metadata-agent:dev
imagePullPolicy: Always
env:
- name: FURUMI_AGENT_DATABASE_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: PG_STRING
- name: FURUMI_AGENT_INBOX_DIR
value: "/inbox"
- name: FURUMI_AGENT_STORAGE_DIR
value: "/media"
- name: FURUMI_AGENT_OLLAMA_URL
value: "http://ollama.ollama.svc:11434"
- name: FURUMI_AGENT_OLLAMA_MODEL
value: "qwen3:14b"
- name: FURUMI_AGENT_POLL_INTERVAL_SECS
value: "10"
- name: RUST_LOG
value: "info"
ports:
- name: admin-ui
containerPort: 8090
protocol: TCP
volumeMounts:
- name: library
mountPath: /media
- name: inbox
mountPath: /inbox
volumes:
- name: library
hostPath:
path: /k8s/furumi-dev/library
type: DirectoryOrCreate
- name: inbox
hostPath:
path: /k8s/furumi-dev/inbox
type: DirectoryOrCreate

View File

@@ -1,32 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: furumi-dev-metadata-agent
labels:
app: furumi-dev-metadata-agent
spec:
type: ClusterIP
selector:
app: furumi-dev-metadata-agent
ports:
- name: admin-ui
protocol: TCP
port: 8090
targetPort: 8090
---
apiVersion: v1
kind: Service
metadata:
name: furumi-dev-web-player
labels:
app: furumi-dev-web-player
spec:
type: ClusterIP
selector:
app: furumi-dev-web-player
ports:
- name: web-ui
protocol: TCP
port: 8080
targetPort: 8080

View File

@@ -1,70 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: furumi-dev-web-player
labels:
app: furumi-dev-web-player
spec:
replicas: 1
selector:
matchLabels:
app: furumi-dev-web-player
template:
metadata:
labels:
app: furumi-dev-web-player
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: furumi-dev-web-player
image: ultradesu/furumi-web-player:dev
imagePullPolicy: Always
env:
- name: FURUMI_PLAYER_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_CLIENT_ID
- name: FURUMI_PLAYER_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_CLIENT_SECRET
- name: FURUMI_PLAYER_OIDC_ISSUER_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_ISSUER_URL
- name: FURUMI_PLAYER_OIDC_REDIRECT_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_REDIRECT_URL
- name: FURUMI_PLAYER_OIDC_SESSION_SECRET
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_SESSION_SECRET
- name: FURUMI_PLAYER_DATABASE_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: PG_STRING
- name: FURUMI_PLAYER_STORAGE_DIR
value: "/media"
- name: RUST_LOG
value: "info"
ports:
- name: web-ui
containerPort: 8080
protocol: TCP
volumeMounts:
- name: music
mountPath: /media
volumes:
- name: music
hostPath:
path: /k8s/furumi-dev/library
type: DirectoryOrCreate

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: furumi-server
namespace: argocd
spec:
project: apps
destination:
namespace: furumi-server
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/furumi-server
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,75 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: furumi-server
labels:
app: furumi-server
spec:
replicas: 1
selector:
matchLabels:
app: furumi-server
template:
metadata:
labels:
app: furumi-server
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: furumi-server
image: ultradesu/furumi-server:trunk
imagePullPolicy: Always
env:
- name: FURUMI_TOKEN
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: TOKEN
- name: FURUMI_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_CLIENT_ID
- name: FURUMI_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_CLIENT_SECRET
- name: FURUMI_OIDC_ISSUER_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_ISSUER_URL
- name: FURUMI_OIDC_REDIRECT_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_REDIRECT_URL
- name: FURUMI_OIDC_SESSION_SECRET
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_SESSION_SECRET
- name: FURUMI_ROOT
value: "/media"
- name: RUST_LOG
value: "info"
ports:
- name: grpc
containerPort: 50051
protocol: TCP
- name: metrics
containerPort: 9090
protocol: TCP
- name: web-ui
containerPort: 8080
protocol: TCP
volumeMounts:
- name: music
mountPath: /media
volumes:
- name: music
hostPath:
path: /k8s/media/downloads/Lidarr_Music
type: DirectoryOrCreate

View File

@@ -1,65 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: furumi-ng-creds
spec:
target:
name: furumi-ng-creds
deletionPolicy: Delete
template:
type: Opaque
data:
TOKEN: |-
{{ .token }}
OIDC_CLIENT_ID: |-
{{ .client_id }}
OIDC_CLIENT_SECRET: |-
{{ .client_secret }}
OIDC_ISSUER_URL: https://idm.hexor.cy/application/o/furumi-ng-web/
OIDC_REDIRECT_URL: https://music.hexor.cy/auth/callback
OIDC_SESSION_SECRET: |-
{{ .session_secret }}
PG_STRING: |-
postgres://furumi:{{ .pg_pass }}@psql.psql.svc:5432/furumi
data:
- secretKey: token
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: b8b8c3a2-c3fe-42d3-9402-0ae305e1455f
property: fields[0].value
- secretKey: client_id
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: b8b8c3a2-c3fe-42d3-9402-0ae305e1455f
property: fields[1].value
- secretKey: client_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: b8b8c3a2-c3fe-42d3-9402-0ae305e1455f
property: fields[2].value
- secretKey: session_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: b8b8c3a2-c3fe-42d3-9402-0ae305e1455f
property: fields[3].value
- secretKey: pg_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[16].value

View File

@@ -1,59 +0,0 @@
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: admin-strip
spec:
stripPrefix:
prefixes:
- /admin
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: furumi-tls-ingress
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: music.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: furumi-web-player
port:
number: 8080
tls:
- secretName: furumi-tls
hosts:
- '*.hexor.cy'
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: furumi-admin-ingress
annotations:
ingressClassName: traefik
traefik.ingress.kubernetes.io/router.middlewares: furumi-server-admin-strip@kubernetescrd,kube-system-https-redirect@kubernetescrd
spec:
rules:
- host: music.hexor.cy
http:
paths:
- path: /admin
pathType: Prefix
backend:
service:
name: furumi-metadata-agent
port:
number: 8090
tls:
- secretName: furumi-tls
hosts:
- '*.hexor.cy'

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
- deployment.yaml
- service.yaml
- servicemonitor.yaml
- external-secrets.yaml
- ingress.yaml
- web-player.yaml
- metadata-agent.yaml

View File

@@ -1,59 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: furumi-metadata-agent
labels:
app: furumi-metadata-agent
spec:
replicas: 1
selector:
matchLabels:
app: furumi-metadata-agent
template:
metadata:
labels:
app: furumi-metadata-agent
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: furumi-metadata-agent
image: ultradesu/furumi-metadata-agent:trunk
imagePullPolicy: Always
env:
- name: FURUMI_AGENT_DATABASE_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: PG_STRING
- name: FURUMI_AGENT_INBOX_DIR
value: "/inbox"
- name: FURUMI_AGENT_STORAGE_DIR
value: "/media"
- name: FURUMI_AGENT_OLLAMA_URL
value: "http://ollama.ollama.svc:11434"
- name: FURUMI_AGENT_OLLAMA_MODEL
value: "qwen3.5:9b"
- name: FURUMI_AGENT_POLL_INTERVAL_SECS
value: "10"
- name: RUST_LOG
value: "info"
ports:
- name: admin-ui
containerPort: 8090
protocol: TCP
volumeMounts:
- name: library
mountPath: /media
- name: inbox
mountPath: /inbox
volumes:
- name: library
hostPath:
path: /k8s/furumi/library
type: DirectoryOrCreate
- name: inbox
hostPath:
path: /k8s/furumi/inbox
type: DirectoryOrCreate

View File

@@ -1,62 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: furumi-server-grpc
spec:
type: LoadBalancer
selector:
app: furumi-server
ports:
- name: grpc
protocol: TCP
port: 50051
targetPort: 50051
---
apiVersion: v1
kind: Service
metadata:
name: furumi-server-metrics
labels:
app: furumi-server
spec:
type: ClusterIP
selector:
app: furumi-server
ports:
- name: metrics
protocol: TCP
port: 9090
targetPort: 9090
---
apiVersion: v1
kind: Service
metadata:
name: furumi-metadata-agent
labels:
app: furumi-metadata-agent
spec:
type: ClusterIP
selector:
app: furumi-metadata-agent
ports:
- name: admin-ui
protocol: TCP
port: 8090
targetPort: 8090
---
apiVersion: v1
kind: Service
metadata:
name: furumi-web-player
labels:
app: furumi-web-player
spec:
type: ClusterIP
selector:
app: furumi-web-player
ports:
- name: web-ui
protocol: TCP
port: 8080
targetPort: 8080

View File

@@ -1,21 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: furumi-server-metrics
labels:
app: furumi-server
release: prometheus
spec:
selector:
matchLabels:
app: furumi-server
endpoints:
- port: metrics
path: /metrics
interval: 30s
scrapeTimeout: 10s
honorLabels: true
namespaceSelector:
matchNames:
- furumi-server

View File

@@ -1,70 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: furumi-web-player
labels:
app: furumi-web-player
spec:
replicas: 1
selector:
matchLabels:
app: furumi-web-player
template:
metadata:
labels:
app: furumi-web-player
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: furumi-web-player
image: ultradesu/furumi-web-player:trunk
imagePullPolicy: Always
env:
- name: FURUMI_PLAYER_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_CLIENT_ID
- name: FURUMI_PLAYER_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_CLIENT_SECRET
- name: FURUMI_PLAYER_OIDC_ISSUER_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_ISSUER_URL
- name: FURUMI_PLAYER_OIDC_REDIRECT_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_REDIRECT_URL
- name: FURUMI_PLAYER_OIDC_SESSION_SECRET
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: OIDC_SESSION_SECRET
- name: FURUMI_PLAYER_DATABASE_URL
valueFrom:
secretKeyRef:
name: furumi-ng-creds
key: PG_STRING
- name: FURUMI_PLAYER_STORAGE_DIR
value: "/media"
- name: RUST_LOG
value: "info"
ports:
- name: web-ui
containerPort: 8080
protocol: TCP
volumeMounts:
- name: music
mountPath: /media
volumes:
- name: music
hostPath:
path: /k8s/furumi/library
type: DirectoryOrCreate

View File

@@ -77,11 +77,8 @@ spec:
labels:
app: gitea-runner
spec:
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
nodeSelector:
kubernetes.io/hostname: home.homenet
volumes:
- name: docker-sock
hostPath:
@@ -93,36 +90,36 @@ spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- uk-desktop.tail2fe2d.ts.net
- weight: 50
- weight: 3
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- home.homenet
- weight: 30
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- master.tail2fe2d.ts.net
- weight: 10
- weight: 2
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- it.tail2fe2d.ts.net
- ch.tail2fe2d.ts.net
- us.tail2fe2d.ts.net
- nas.homenet
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- home.homenet
- nas.homenet
- master.tail2fe2d.ts.net
containers:
- name: gitea-runner
image: gitea/act_runner:nightly
@@ -130,11 +127,11 @@ spec:
requests:
cpu: "100m"
memory: "256Mi"
ephemeral-storage: "1Gi"
ephemeral-storage: "1Gi" # reserve ephemeral storage
limits:
cpu: "3000m"
memory: "4Gi"
ephemeral-storage: "28Gi"
ephemeral-storage: "28Gi" # hard cap for /data usage
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock

View File

@@ -74,14 +74,19 @@ spec:
- nas.homenet
volumes:
- name: upload-storage
persistentVolumeClaim:
claimName: immich-upload-pvc
nfs:
server: nas.homenet
path: /mnt/storage/Storage/k8s/immich/library/
readOnly: false
- name: gphoto-storage
persistentVolumeClaim:
claimName: immich-gphoto-pvc
nfs:
server: nas.homenet
path: /mnt/storage/Storage/k8s/immich/GPHOTO/
readOnly: false
- name: camera
persistentVolumeClaim:
claimName: immich-camera-pvc
nfs:
server: nas.homenet
path: /mnt/storage/Storage/Syncthing-repos/PhoneCamera/
readOnly: true
- name: localtime
hostPath:

View File

@@ -1,52 +1,79 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-upload-pv
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/storage/Storage/k8s/immich/library
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-upload-pvc
namespace: immich
spec:
storageClassName: ""
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
- ReadWriteOnce
volumeName: immich-upload-pv
resources:
requests:
storage: 500Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-gphoto-pv
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/storage/Storage/k8s/immich/GPHOTO
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-gphoto-pvc
namespace: immich
spec:
storageClassName: ""
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
- ReadWriteOnce
volumeName: immich-gphoto-pv
resources:
requests:
storage: 500Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-db-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/storage/Storage/k8s/immich/db
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-db-pvc
namespace: immich
spec:
storageClassName: ""
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
- ReadWriteOnce
volumeName: immich-db-pv
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-camera-pvc
namespace: immich
spec:
accessModes:
- ReadOnlyMany
storageClassName: nfs-csi
resources:
requests:
storage: 100Gi

View File

@@ -1,5 +1,5 @@
image:
tag: 10.11.4
tag: 10.10.7
resources:
requests:
memory: "2Gi"
@@ -36,40 +36,8 @@ ingress:
paths:
- path: /
pathType: Prefix
- host: us.hexor.cy
paths:
- path: /
pathType: Prefix
- host: ch.hexor.cy
paths:
- path: /
pathType: Prefix
- host: jp.hexor.cy
paths:
- path: /
pathType: Prefix
- host: spb.hexor.cy
paths:
- path: /
pathType: Prefix
- host: cy.hexor.cy
paths:
- path: /
pathType: Prefix
- host: am.hexor.cy
paths:
- path: /
pathType: Prefix
- host: de.hexor.cy
paths:
- path: /
pathType: Prefix
- host: it.hexor.cy
paths:
- path: /
pathType: Prefix
tls:
- secretName: jellyfin-tls
hosts:
- '*.hexor.cy'
- 'jf.hexor.cy'

View File

@@ -19,7 +19,7 @@ spec:
kubernetes.io/os: linux
containers:
- name: secret-reader
image: ultradesu/k8s-secrets:0.2.1
image: ultradesu/k8s-secrets:0.1.1
imagePullPolicy: Always
args:
- "--secrets"
@@ -28,7 +28,6 @@ spec:
- "k8s-secret"
- "--port"
- "3000"
- "--webhook"
ports:
- containerPort: 3000
name: http

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: lidarr
namespace: argocd
spec:
project: apps
destination:
namespace: lidarr
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/lidarr
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
helmCharts:
- name: lidarr
repo: https://k8s-home-lab.github.io/helm-charts/
version: 15.3.0
releaseName: lidarr
namespace: lidarr
valuesFile: lidarr-values.yaml
includeCRDs: true

View File

@@ -1,27 +0,0 @@
env:
TZ: Asia/Nicosia
resources:
requests:
memory: "512Mi"
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
persistence:
config:
enabled: true
type: hostPath
hostPath: /k8s/lidarr
mountPath: /config
downloads:
enabled: true
type: hostPath
hostPath: /k8s/media/downloads
mountPath: /downloads
accessMode: ReadWriteOnce

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: matrix
namespace: argocd
spec:
project: apps
destination:
namespace: matrix
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/matrix
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,95 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: matrix-postgres-creds
spec:
target:
name: matrix-postgres-creds
deletionPolicy: Delete
template:
type: Opaque
data:
synapse_db_password: |-
{{ .synapse_db_password }}
mas_db_password: |-
{{ .mas_db_password }}
data:
- secretKey: synapse_db_password
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[14].value
- secretKey: mas_db_password
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[15].value
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: matrix-oidc-config
spec:
target:
name: matrix-oidc-config
deletionPolicy: Delete
template:
type: Opaque
data:
mas-oidc.yaml: |
upstream_oauth2:
providers:
- id: 001KKV4EKY7KG98W2M9T806K6A
human_name: Authentik
issuer: https://idm.hexor.cy/application/o/matrix/
client_id: "{{ .oauth_client_id }}"
client_secret: "{{ .oauth_client_secret }}"
token_endpoint_auth_method: client_secret_post
scope: "openid profile email"
claims_imports:
localpart:
action: suggest
template: "{{ `{{ user.preferred_username | split(\"@\") | first }}` }}"
displayname:
action: suggest
template: "{{ `{{ user.name }}` }}"
email:
action: suggest
template: "{{ `{{ user.email }}` }}"
set_email_verification: always
data:
- secretKey: oauth_client_id
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: ca76867f-49f3-4a30-9ef3-b05af35ee49a
property: fields[0].value
- secretKey: oauth_client_secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: ca76867f-49f3-4a30-9ef3-b05af35ee49a
property: fields[1].value

View File

@@ -1,15 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
- external-secrets.yaml
helmCharts:
- name: matrix-stack
repo: oci://ghcr.io/element-hq/ess-helm
version: 26.2.3
releaseName: matrix-stack
namespace: matrix
valuesFile: matrix-stack-values.yaml
includeCRDs: true

View File

@@ -1,112 +0,0 @@
## Matrix server name - appears in @user:matrix.hexor.cy
serverName: matrix.hexor.cy
## Use letsencrypt cluster issuer for all ingresses
certManager:
clusterIssuer: letsencrypt
## Global ingress settings
ingress:
className: traefik
annotations:
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
## Disable built-in PostgreSQL - using external database
postgres:
enabled: false
## Disable components we don't need yet
hookshot:
enabled: false
## MatrixRTC - voice/video calls via LiveKit SFU
matrixRTC:
enabled: true
ingress:
host: livekit.matrix.hexor.cy
sfu:
enabled: true
manualIP: "138.201.61.182"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
exposedServices:
rtcTcp:
enabled: true
port: 30881
rtcMuxedUdp:
enabled: true
port: 30882
turnTLS:
enabled: true
port: 31443
domain: turn.matrix.hexor.cy
tlsTerminationOnPod: true
## Synapse homeserver
synapse:
enabled: true
ingress:
host: synapse.matrix.hexor.cy
postgres:
host: psql.psql.svc
port: 5432
user: synapse
database: synapse
sslMode: prefer
password:
secret: matrix-postgres-creds
secretKey: synapse_db_password
media:
storage:
size: 20Gi
maxUploadSize: 100M
# nodeSelector:
# kubernetes.io/hostname: nas.homenet
## Matrix Authentication Service
matrixAuthenticationService:
enabled: true
ingress:
host: auth.matrix.hexor.cy
postgres:
host: psql.psql.svc
port: 5432
user: mas
database: mas
sslMode: prefer
password:
secret: matrix-postgres-creds
secretKey: mas_db_password
## Admin policy
additional:
0-admin-policy:
config: |
policy:
data:
admin_users:
- username: ultradesu
1-oidc:
configSecret: matrix-oidc-config
configSecretKey: mas-oidc.yaml
# nodeSelector:
# kubernetes.io/hostname: nas.homenet
## Element Web client
elementWeb:
enabled: true
ingress:
host: chat.matrix.hexor.cy
# nodeSelector:
# kubernetes.io/hostname: nas.homenet
## Element Admin panel
elementAdmin:
enabled: true
ingress:
host: admin.matrix.hexor.cy
# nodeSelector:
# kubernetes.io/hostname: nas.homenet
## Well-known delegation on the base domain (host is derived from serverName)
wellKnownDelegation:
enabled: true

View File

@@ -1,53 +0,0 @@
FROM --platform=$BUILDPLATFORM debian:bookworm-slim AS builder
ARG TARGETARCH
RUN apt-get update && apt-get install -y \
git curl make gcc libssl-dev zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
RUN if [ "$(dpkg --print-architecture)" != "$TARGETARCH" ]; then \
dpkg --add-architecture $TARGETARCH && \
apt-get update && \
case "$TARGETARCH" in \
arm64) apt-get install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 ;; \
amd64) apt-get install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 ;; \
esac && \
rm -rf /var/lib/apt/lists/*; \
fi
RUN git clone https://github.com/TelegramMessenger/MTProxy.git /src
WORKDIR /src
RUN NATIVE=$(dpkg --print-architecture) && \
if [ "$NATIVE" != "$TARGETARCH" ]; then \
case "$TARGETARCH" in \
arm64) export CC=aarch64-linux-gnu-gcc ;; \
amd64) export CC=x86_64-linux-gnu-gcc ;; \
esac; \
fi && \
make -j$(nproc)
FROM debian:bookworm-slim
ENV PROXY_PORT=30443
ENV STATS_PORT=8888
ENV WORKERS=1
ENV RUN_USER=nobody
RUN apt-get update && apt-get install -y \
curl libssl3 zlib1g xxd \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /src/objs/bin/mtproto-proxy /usr/local/bin/mtproto-proxy
RUN curl -s https://core.telegram.org/getProxySecret -o /etc/mtproxy/proxy-secret --create-dirs && \
curl -s https://core.telegram.org/getProxyConfig -o /etc/mtproxy/proxy-multi.conf
ENTRYPOINT mtproto-proxy \
-u ${RUN_USER} \
-p ${STATS_PORT} \
-H ${PROXY_PORT} \
-M ${WORKERS} \
--aes-pwd /etc/mtproxy/proxy-secret \
/etc/mtproxy/proxy-multi.conf

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mtproxy
namespace: argocd
spec:
project: apps
destination:
namespace: mtproxy
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/mtproxy
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,117 +0,0 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: mtproxy
labels:
app: mtproxy
spec:
selector:
matchLabels:
app: mtproxy
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: mtproxy
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: mtproxy
operator: Exists
serviceAccountName: mtproxy
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: register-proxy
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SECRET
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: SECRET
- name: PORT
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: PORT
volumeMounts:
- name: data
mountPath: /data
command:
- /bin/bash
- -c
- |
set -e
curl -s https://core.telegram.org/getProxySecret -o /data/proxy-secret
curl -s https://core.telegram.org/getProxyConfig -o /data/proxy-multi.conf
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
SERVER=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.mtproxy}')
if [ -z "${SERVER}" ]; then
echo "ERROR: node ${NODE_NAME} has no mtproxy label"
exit 1
fi
LINK="tg://proxy?server=${SERVER}&port=${PORT}&secret=${SECRET}"
echo "Registering: ${SERVER} -> ${LINK}"
if kubectl get secret mtproxy-links -n "${NAMESPACE}" &>/dev/null; then
kubectl patch secret mtproxy-links -n "${NAMESPACE}" \
--type merge -p "{\"stringData\":{\"${SERVER}\":\"${LINK}\"}}"
else
kubectl create secret generic mtproxy-links -n "${NAMESPACE}" \
--from-literal="${SERVER}=${LINK}"
fi
echo "Done"
containers:
- name: mtproxy
image: telegrammessenger/proxy:latest
# image: ultradesu/mtproxy:v0.02
imagePullPolicy: Always
ports:
- name: proxy
containerPort: 30443
protocol: TCP
command:
- /bin/sh
- -c
- >-
mtproto-proxy
-u nobody
-p 8888
-H $(PORT)
-M 1
-S $(SECRET)
--aes-pwd /data/proxy-secret
/data/proxy-multi.conf
env:
- name: SECRET
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: SECRET
- name: PORT
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: PORT
volumeMounts:
- name: data
mountPath: /data
#resources:
# requests:
# memory: "128Mi"
# cpu: "100m"
# limits:
# memory: "256Mi"
# cpu: "500m"
volumes:
- name: data
emptyDir: {}

View File

@@ -1,25 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tgproxy-secret
spec:
target:
name: tgproxy-secret
deletionPolicy: Delete
template:
type: Opaque
data:
SECRET: |-
{{ .secret }}
PORT: "30443"
data:
- secretKey: secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 58a37daf-72d8-430d-86bd-6152aa8f888d
property: fields[0].value

View File

@@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./app.yaml
- ./rbac.yaml
- ./daemonset.yaml
- ./external-secrets.yaml
- ./service.yaml
- ./secret-reader.yaml
# - ./storage.yaml

View File

@@ -1,58 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: mtproxy
labels:
app: mtproxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: mtproxy-node-reader
labels:
app: mtproxy
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: mtproxy-node-reader
labels:
app: mtproxy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: mtproxy-node-reader
subjects:
- kind: ServiceAccount
name: mtproxy
namespace: mtproxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mtproxy-secret-manager
labels:
app: mtproxy
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mtproxy-secret-manager
labels:
app: mtproxy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: mtproxy-secret-manager
subjects:
- kind: ServiceAccount
name: mtproxy

View File

@@ -1,63 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: secret-reader
labels:
app: secret-reader
spec:
replicas: 1
selector:
matchLabels:
app: secret-reader
template:
metadata:
labels:
app: secret-reader
spec:
serviceAccountName: mtproxy
nodeSelector:
kubernetes.io/os: linux
containers:
- name: secret-reader
image: ultradesu/k8s-secrets:0.2.1
imagePullPolicy: Always
args:
- "--secrets"
- "mtproxy-links"
- "--namespace"
- "mtproxy"
- "--port"
- "3000"
ports:
- containerPort: 3000
name: http
env:
- name: RUST_LOG
value: "info"
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "150m"
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 5
periodSeconds: 5
securityContext:
runAsNonRoot: true
runAsUser: 1000
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL

View File

@@ -1,16 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: secret-reader
labels:
app: secret-reader
spec:
type: ClusterIP
selector:
app: secret-reader
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http

View File

@@ -1,12 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mtproxy-data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: n8n
namespace: argocd
spec:
project: apps
destination:
namespace: n8n
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/n8n
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,165 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-main
labels:
app: n8n
component: main
spec:
replicas: 1
selector:
matchLabels:
app: n8n
component: main
template:
metadata:
labels:
app: n8n
component: main
spec:
serviceAccountName: n8n
initContainers:
- name: install-tools
image: alpine:3.22
command:
- /bin/sh
- -c
- |
set -e
if [ -x /tools/kubectl ]; then
echo "kubectl already exists, skipping download"
/tools/kubectl version --client
exit 0
fi
echo "Downloading kubectl..."
ARCH=$(uname -m)
case $ARCH in
x86_64) ARCH="amd64" ;;
aarch64) ARCH="arm64" ;;
esac
wget -O /tools/kubectl "https://dl.k8s.io/release/$(wget -qO- https://dl.k8s.io/release/stable.txt)/bin/linux/${ARCH}/kubectl"
chmod +x /tools/kubectl
/tools/kubectl version --client
volumeMounts:
- name: tools
mountPath: /tools
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
containers:
- name: n8n
image: n8nio/n8n:latest
ports:
- containerPort: 5678
name: http
- containerPort: 5679
name: task-broker
env:
- name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- name: HOME
value: "/home/node"
- name: N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS
value: "true"
- name: NODES_EXCLUDE
value: "[]"
- name: N8N_HOST
value: "n8n.hexor.cy"
- name: N8N_PORT
value: "5678"
- name: N8N_PROTOCOL
value: "https"
- name: N8N_RUNNERS_ENABLED
value: "true"
- name: N8N_RUNNERS_MODE
value: "external"
- name: N8N_RUNNERS_BROKER_LISTEN_ADDRESS
value: "0.0.0.0"
- name: N8N_LISTEN_ADDRESS
value: "0.0.0.0"
- name: N8N_RUNNERS_BROKER_PORT
value: "5679"
- name: EXECUTIONS_MODE
value: "queue"
- name: QUEUE_BULL_REDIS_HOST
value: "n8n-redis"
- name: QUEUE_BULL_REDIS_PORT
value: "6379"
- name: NODE_ENV
value: "production"
- name: WEBHOOK_URL
value: "https://n8n.hexor.cy/"
- name: N8N_PROXY_HOPS
value: "1"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: DB_TYPE
value: "postgresdb"
- name: DB_POSTGRESDB_HOST
value: "psql.psql.svc"
- name: DB_POSTGRESDB_DATABASE
value: "n8n"
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
name: credentials
key: username
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
name: credentials
key: password
- name: N8N_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: credentials
key: encryptionkey
- name: N8N_RUNNERS_AUTH_TOKEN
valueFrom:
secretKeyRef:
name: credentials
key: runnertoken
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
- name: tools
mountPath: /opt/tools
resources:
requests:
cpu: 2000m
memory: 512Mi
limits:
cpu: 4000m
memory: 2048Mi
livenessProbe:
httpGet:
path: /healthz
port: http
initialDelaySeconds: 240
periodSeconds: 30
timeoutSeconds: 20
failureThreshold: 10
readinessProbe:
httpGet:
path: /healthz/readiness
port: http
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 15
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
- name: tools
persistentVolumeClaim:
claimName: n8n-tools
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -1,87 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-runner
labels:
app: n8n
component: runner
spec:
replicas: 2
selector:
matchLabels:
app: n8n
component: runner
template:
metadata:
labels:
app: n8n
component: runner
spec:
serviceAccountName: n8n
containers:
- name: n8n-runner
image: n8nio/runners:latest
ports:
- containerPort: 5680
name: health
env:
- name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- name: HOME
value: "/home/node"
- name: N8N_RUNNERS_TASK_BROKER_URI
value: "http://n8n:5679"
- name: N8N_RUNNERS_LAUNCHER_LOG_LEVEL
value: "info"
- name: N8N_RUNNERS_MAX_CONCURRENCY
value: "10"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: N8N_RUNNERS_AUTH_TOKEN
valueFrom:
secretKeyRef:
name: credentials
key: runnertoken
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
- name: tools
mountPath: /opt/tools
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2048Mi
livenessProbe:
httpGet:
path: /healthz
port: 5680
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz
port: 5680
initialDelaySeconds: 15
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
- name: tools
persistentVolumeClaim:
claimName: n8n-tools
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -1,84 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-worker
labels:
app: n8n
component: worker
spec:
replicas: 2
selector:
matchLabels:
app: n8n
component: worker
template:
metadata:
labels:
app: n8n
component: worker
spec:
serviceAccountName: n8n
containers:
- name: n8n-worker
image: n8nio/n8n:latest
command:
- n8n
- worker
env:
- name: HOME
value: "/home/node"
- name: N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS
value: "true"
- name: EXECUTIONS_MODE
value: "queue"
- name: QUEUE_BULL_REDIS_HOST
value: "n8n-redis"
- name: QUEUE_BULL_REDIS_PORT
value: "6379"
- name: NODE_ENV
value: "production"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: DB_TYPE
value: "postgresdb"
- name: DB_POSTGRESDB_HOST
value: "psql.psql.svc"
- name: DB_POSTGRESDB_DATABASE
value: "n8n"
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
name: credentials
key: username
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
name: credentials
key: password
- name: N8N_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: credentials
key: encryptionkey
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2048Mi
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -1,50 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: credentials
spec:
target:
name: credentials
deletionPolicy: Delete
template:
type: Opaque
data:
password: "{{ .psql | trim }}"
username: "n8n"
encryptionkey: "{{ .enc_pass | trim }}"
runnertoken: "{{ .runner_token | trim }}"
data:
- secretKey: psql
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[13].value
- secretKey: enc_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 18c92d73-9637-4419-8642-7f7b308460cb
property: fields[0].value
- secretKey: runner_token
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 18c92d73-9637-4419-8642-7f7b308460cb
property: fields[1].value

View File

@@ -1,28 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: n8n
labels:
app: n8n
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
ingressClassName: traefik
tls:
- hosts:
- n8n.hexor.cy
secretName: n8n-tls
rules:
- host: n8n.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: n8n
port:
number: 80

View File

@@ -1,28 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- external-secrets.yaml
- storage.yaml
- rbac.yaml
- redis-deployment.yaml
- redis-service.yaml
- paddleocr-deployment.yaml
- paddleocr-service.yaml
- deployment-main.yaml
- deployment-worker.yaml
- deployment-runner.yaml
- service.yaml
- ingress.yaml
helmCharts:
- name: yacy
repo: https://gt.hexor.cy/api/packages/ab/helm
version: 0.1.2
releaseName: yacy
namespace: n8n
valuesFile: values-yacy.yaml
includeCRDs: true
commonLabels:
app.kubernetes.io/name: n8n

View File

@@ -1,43 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: paddleocr
labels:
app: paddleocr
component: n8n
spec:
replicas: 1
selector:
matchLabels:
app: paddleocr
component: n8n
template:
metadata:
labels:
app: paddleocr
component: n8n
spec:
containers:
- name: paddleocr
image: c403/paddleocr
ports:
- containerPort: 5000
name: http
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1000m
memory: 2Gi
livenessProbe:
tcpSocket:
port: 5000
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
tcpSocket:
port: 5000
initialDelaySeconds: 30
periodSeconds: 10

View File

@@ -1,18 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: paddleocr
labels:
app: paddleocr
component: n8n
spec:
selector:
app: paddleocr
component: n8n
ports:
- name: http
port: 80
targetPort: 5000
protocol: TCP
type: ClusterIP

View File

@@ -1,45 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: n8n
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: n8n-clusterrole
rules:
# Core API group ("")
- apiGroups: [""]
resources: ["*"]
verbs: ["get", "list", "watch"]
# Common built-in API groups
- apiGroups: ["apps", "batch", "autoscaling", "extensions", "policy"]
resources: ["*"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io", "rbac.authorization.k8s.io", "apiextensions.k8s.io"]
resources: ["*"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io", "discovery.k8s.io", "events.k8s.io"]
resources: ["*"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io", "admissionregistration.k8s.io", "authentication.k8s.io", "authorization.k8s.io"]
resources: ["*"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: n8n-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: n8n-clusterrole
subjects:
- kind: ServiceAccount
name: n8n
namespace: n8n

View File

@@ -1,57 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-redis
labels:
app: redis
component: n8n
spec:
replicas: 1
selector:
matchLabels:
app: redis
component: n8n
template:
metadata:
labels:
app: redis
component: n8n
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
name: redis
command:
- redis-server
- --appendonly
- "yes"
- --save
- "900 1"
volumeMounts:
- name: redis-data
mountPath: /data
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
livenessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
exec:
command:
- redis-cli
- ping
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: redis-data
emptyDir: {}

View File

@@ -1,18 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: n8n-redis
labels:
app: redis
component: n8n
spec:
selector:
app: redis
component: n8n
ports:
- name: redis
port: 6379
targetPort: 6379
protocol: TCP
type: ClusterIP

View File

@@ -1,21 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: n8n
labels:
app: n8n
spec:
selector:
app: n8n
component: main
ports:
- name: http
port: 80
targetPort: 5678
protocol: TCP
- name: task-broker
port: 5679
targetPort: 5679
protocol: TCP
type: ClusterIP

View File

@@ -1,24 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: n8n-data
spec:
accessModes:
- ReadWriteMany
storageClassName: longhorn
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: n8n-tools
spec:
accessModes:
- ReadWriteMany
storageClassName: longhorn
resources:
requests:
storage: 20Gi

View File

@@ -1,24 +0,0 @@
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
resources:
limits:
memory: 2Gi
requests:
memory: 1Gi
persistence:
enabled: true
size: 10Gi
yacy:
network:
mode: "intranet"
config:
network.unit.bootstrap.seedlist: ""
network.unit.remotecrawl: "false"
network.unit.dhtredundancy.junior: "1"
network.unit.dhtredundancy.senior: "1"
index.receive.allow: "false"
index.distribute.allow: "false"
crawl.response.timeout: "10000"

View File

@@ -3,24 +3,19 @@ kind: Kustomization
resources:
- external-secrets.yaml
- local-pv.yaml
- open-terminal.yaml
helmCharts:
- name: ollama
repo: https://otwld.github.io/ollama-helm/
version: 1.49.0
version: 0.4.0
releaseName: ollama
namespace: ollama
valuesFile: ollama-values.yaml
includeCRDs: true
- name: open-webui
repo: https://helm.openwebui.com/
version: 12.10.0
version: 8.14.0
releaseName: openweb-ui
namespace: ollama
valuesFile: openweb-ui-values.yaml
includeCRDs: true
patches:
- path: patch-runtimeclass.yaml
includeCRDs: true

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: ollama-local-pv
spec:
capacity:
storage: 100Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
local:
path: /var/lib/ollama
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- uk-desktop.tail2fe2d.ts.net

View File

@@ -3,20 +3,6 @@ image:
pullPolicy: Always
tag: "latest"
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: false
ollama:
gpu:
enabled: true
type: 'nvidia'
number: 1
persistentVolume:
enabled: true
size: 100Gi
storageClass: "local-path"

View File

@@ -1,53 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: open-terminal
labels:
app: open-terminal
spec:
replicas: 1
selector:
matchLabels:
app: open-terminal
template:
metadata:
labels:
app: open-terminal
spec:
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
containers:
- name: open-terminal
image: ghcr.io/open-webui/open-terminal:latest
ports:
- containerPort: 8000
env:
- name: OPEN_TERMINAL_API_KEY
value: "LOCAL_ACCESS_TOKEN"
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: "2"
memory: 2Gi
---
apiVersion: v1
kind: Service
metadata:
name: open-terminal
labels:
app: open-terminal
spec:
selector:
app: open-terminal
ports:
- port: 8000
targetPort: 8000
protocol: TCP

View File

@@ -1,4 +1,4 @@
clusterDomain: cluster.local
clusterDomain: ai.hexor.cy
extraEnvVars:
GLOBAL_LOG_LEVEL: debug
@@ -32,22 +32,12 @@ ollama:
pipelines:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
tika:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
websocket:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
redis:
master:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: true
@@ -56,5 +46,7 @@ ingress:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
host: "ai.hexor.cy"
tls: true
existingSecret: ollama-tls
tls:
- hosts:
- '*.hexor.cy'
secretName: ollama-tls

View File

@@ -1,9 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
namespace: ollama
spec:
template:
spec:
runtimeClassName: nvidia

View File

@@ -4,7 +4,6 @@ kind: Kustomization
resources:
- app.yaml
- external-secrets.yaml
- paperless-ai.yaml
helmCharts:
- name: paperless-ngx
@@ -28,11 +27,4 @@ helmCharts:
namespace: paperless
valuesFile: gotenberg-values.yaml
includeCRDs: true
#- name: redis
# repo: oci://registry-1.docker.io/bitnamicharts/redis
# version: 24.1.0
# releaseName: redis
# namespace: paperless
# includeCRDs: true
# valuesFile: bazarr-values.yaml

View File

@@ -1,101 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: paperless-ai
labels:
app: paperless-ai
spec:
replicas: 1
selector:
matchLabels:
app: paperless-ai
template:
metadata:
labels:
app: paperless-ai
spec:
nodeSelector:
kubernetes.io/hostname: nas.homenet
containers:
- name: paperless-ai
image: clusterzx/paperless-ai:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
name: http
env:
- name: NODE_ENV
value: production
- name: PAPERLESS_AI_PORT
value: "3000"
resources:
requests:
memory: 512Mi
cpu: 500m
limits:
memory: 1024Mi
cpu: 2000m
#livenessProbe:
# httpGet:
# path: /
# port: 8000
# initialDelaySeconds: 30
# periodSeconds: 10
#readinessProbe:
# httpGet:
# path: /
# port: 8000
# initialDelaySeconds: 5
# periodSeconds: 5
volumeMounts:
- name: data
mountPath: /app/data
volumes:
- name: data
hostPath:
path: /mnt/storage/Storage/k8s/paperless/ai-data
type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
name: paperless-ai
namespace: paperless
labels:
app: paperless-ai
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app: paperless-ai
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: paperless-ai-ingress
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: ai-docs.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: paperless-ai
port:
number: 3000
tls:
- secretName: docs-tls
hosts:
- '*.hexor.cy'

View File

@@ -1,5 +1,5 @@
image:
tag: latest
tag: 2.19.3
resources:
requests:
memory: "1Gi"
@@ -9,7 +9,7 @@ resources:
cpu: "3000m"
initContainers:
install-tesseract-langs:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
image: ghcr.io/paperless-ngx/paperless-ngx:2.18.2
resources:
requests:
memory: "256Mi"
@@ -107,8 +107,6 @@ persistence:
- path: /usr/src/paperless/consume
redis:
enabled: true
image:
tag: latest
master:
nodeSelector:
kubernetes.io/hostname: nas.homenet

View File

@@ -112,8 +112,47 @@ spec:
- name: scripts
mountPath: /scripts
containers:
- name: xray-exporter
image: alpine:3.18
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- /scripts/exporter-start.sh
ports:
- name: metrics
containerPort: 9550
protocol: TCP
livenessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "150m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: true
- name: scripts
mountPath: /scripts
- name: pasarguard-node
image: 'pasarguard/node:v0.2.1'
image: 'pasarguard/node:v0.1.1'
imagePullPolicy: Always
command:
- /bin/sh
@@ -162,56 +201,16 @@ spec:
resources:
requests:
memory: "128Mi"
#cpu: "500m"
cpu: "100m"
limits:
memory: "512Mi"
#cpu: "1200m"
cpu: "750m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: false
- name: scripts
mountPath: /scripts
- name: xray-exporter
image: alpine:3.18
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- /scripts/exporter-start.sh
ports:
- name: metrics
containerPort: 9550
protocol: TCP
livenessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "500m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: true
- name: scripts
mountPath: /scripts
volumes:
- name: shared-data
emptyDir: {}

View File

@@ -34,7 +34,7 @@ spec:
mountPath: /templates/subscription
containers:
- name: pasarguard-web
image: 'pasarguard/panel:latest'
image: 'pasarguard/panel:v1.7.2'
imagePullPolicy: Always
envFrom:
- secretRef:

View File

@@ -28,6 +28,22 @@ spec:
- name: http
containerPort: 3010
protocol: TCP
livenessProbe:
httpGet:
path: /
port: 3010
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /
port: 3010
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 3
resources:
requests:
memory: "64Mi"

View File

@@ -16,18 +16,18 @@ helmCharts:
valuesFile: syncthing-master.yaml
includeCRDs: true
- name: syncthing
repo: https://k8s-home-lab.github.io/helm-charts
version: 4.0.0
releaseName: syncthing-khv
namespace: syncthing
valuesFile: syncthing-khv.yaml
includeCRDs: true
- name: syncthing
repo: https://k8s-home-lab.github.io/helm-charts
version: 4.0.0
releaseName: syncthing-nas
namespace: syncthing
valuesFile: syncthing-nas.yaml
includeCRDs: true
# - name: syncthing
# repo: https://k8s-home-lab.github.io/helm-charts
# version: 4.0.0
# releaseName: syncthing-khv
# namespace: syncthing
# valuesFile: syncthing-khv.yaml
# includeCRDs: true
includeCRDs: true

View File

@@ -1,3 +1,4 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -22,7 +23,7 @@ spec:
kubernetes.io/hostname: home.homenet
containers:
- name: desubot
image: "ultradesu/desubot:latest"
image: 'ultradesu/desubot:latest'
imagePullPolicy: Always
envFrom:
- secretRef:
@@ -31,11 +32,11 @@ spec:
- name: RUST_LOG
value: "info"
volumeMounts:
- mountPath: /storage
name: storage
- mountPath: /storage
name: storage
volumes:
- name: storage
persistentVolumeClaim:
claimName: desubot-storage
readOnly: false
nfs:
server: nas.homenet
path: /mnt/storage/Storage/k8s/desubot/
readOnly: false

View File

@@ -30,7 +30,7 @@ spec:
name: get-id-bot
env:
- name: RUST_LOG
value: "info,teloxide::error_handlers=off"
value: "info"

View File

@@ -7,6 +7,3 @@ resources:
- get-id-bot.yaml
- external-secrets.yaml
- desubot.yaml
- restart-job.yaml
- storage.yaml

View File

@@ -1,56 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tg-bots-restart-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tg-bots-restart-role
rules:
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tg-bots-restart-rb
subjects:
- kind: ServiceAccount
name: tg-bots-restart-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: tg-bots-restart-role
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: tg-bots-daily-restart
spec:
schedule: "0 4 * * *" # every day at 04:00
jobTemplate:
spec:
template:
spec:
serviceAccountName: tg-bots-restart-sa
restartPolicy: OnFailure
containers:
- name: kubectl
image: bitnami/kubectl:latest
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /bin/sh
- -c
- |
kubectl -n "$POD_NAMESPACE" rollout restart deployment/desubot
kubectl -n "$POD_NAMESPACE" rollout restart deployment/get-id-bot

View File

@@ -1,12 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: desubot-storage
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
resources:
requests:
storage: 200Gi

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: xandikos
namespace: argocd
spec:
project: apps
destination:
namespace: xandikos
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/xandikos
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,70 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: xandikos
labels:
app: xandikos
spec:
selector:
matchLabels:
app: xandikos
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: xandikos
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
volumes:
- name: storage
hostPath:
path: /k8s/xandikos
type: Directory
containers:
- name: xandikos
image: ghcr.io/jelmer/xandikos:latest
imagePullPolicy: Always
command:
- "python3"
- "-m"
- "xandikos.web"
- "--port=8081"
- "-d/data"
- "--defaults"
- "--listen-address=0.0.0.0"
- "--route-prefix=/dav"
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /
port: 8081
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8081
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
ports:
- name: http
containerPort: 8081
protocol: TCP
volumeMounts:
- name: storage
mountPath: /data

View File

@@ -1,31 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: mmdl-secrets
spec:
target:
name: mmdl-secrets
deletionPolicy: Delete
template:
type: Opaque
data:
DB_DIALECT: 'postgres'
DB_HOST: psql.psql.svc
DB_USER: mmdl
DB_NAME: mmdl
DB_PORT: "5432"
DB_PASS: |-
{{ .pg_pass }}
AES_PASSWORD: |-
{{ .pg_pass }}
data:
- secretKey: pg_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[12].value

View File

@@ -1,47 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: xandikos
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: cal.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mmdl
port:
number: 3000
- path: /dav
pathType: Prefix
backend:
service:
name: xandikos
port:
number: 8081
- path: /.well-known/carddav
pathType: Exact
backend:
service:
name: xandikos
port:
number: 8081
- path: /.well-known/caldav
pathType: Exact
backend:
service:
name: xandikos
port:
number: 8081
tls:
- secretName: xandikos-tls
hosts:
- cal.hexor.cy

View File

@@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- mmdl-deployment.yaml
- mmdl-service.yaml
- ingress.yaml
- external-secrets.yaml

View File

@@ -1,61 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mmdl
labels:
app: mmdl
spec:
selector:
matchLabels:
app: mmdl
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: mmdl
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: mmdl
image: intriin/mmdl:latest
imagePullPolicy: Always
envFrom:
- secretRef:
name: mmdl-secrets
env:
- name: NEXTAUTH_URL
value: "https://cal.hexor.cy"
- name: CALDAV_SERVER_URL
value: "https://cal.hexor.cy/dav"
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
ports:
- name: http
containerPort: 3000
protocol: TCP

View File

@@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: mmdl
spec:
selector:
app: mmdl
type: ClusterIP
ports:
- name: http
port: 3000
protocol: TCP
targetPort: 3000

View File

@@ -1,16 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: xandikos
labels:
app: xandikos
spec:
selector:
app: xandikos
ports:
- protocol: TCP
port: 8081
targetPort: 8081
name: http
type: ClusterIP

View File

@@ -18,5 +18,4 @@ spec:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@@ -47,20 +47,3 @@ spec:
server: https://kubernetes.default.svc
sourceRepos:
- ssh://git@gt.hexor.cy:30022/ab/homelab.git
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: desktop
namespace: argocd
spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
description: Hexor Home Lab Desktop Apps
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
sourceRepos:
- ssh://git@gt.hexor.cy:30022/ab/homelab.git

View File

@@ -23,9 +23,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 1062e5b4-5380-49f1-97c3-340f26f3487e
property: fields[0].value
- secretKey: client_secret
@@ -34,9 +31,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 1062e5b4-5380-49f1-97c3-340f26f3487e
property: fields[1].value

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: argo-cd
repo: https://argoproj.github.io/argo-helm
version: 9.4.10
version: 9.1.4
releaseName: argocd
namespace: argocd
valuesFile: values.yaml

View File

@@ -2,7 +2,7 @@
global:
domain: ag.hexor.cy
nodeSelector: &nodeSelector
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
logging:
format: text
@@ -28,9 +28,8 @@ configs:
issuer: https://idm.hexor.cy/application/o/argocd/
clientID: $oidc-creds:id
clientSecret: $oidc-creds:secret
requestedScopes: ["openid", "profile", "email", "groups", "offline_access"]
requestedScopes: ["openid", "profile", "email", "groups"]
requestedIDTokenClaims: {"groups": {"essential": true}}
refreshTokenThreshold: 2m
rbac:
create: true
policy.default: ""
@@ -56,15 +55,15 @@ configs:
controller:
replicas: 1
nodeSelector:
<<: *nodeSelector
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# Dex OIDC provider
dex:
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
enabled: false
# Standard Redis disabled because Redis HA is enabled
@@ -87,7 +86,7 @@ redis-ha:
server:
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: false
@@ -100,11 +99,8 @@ server:
# Repository Server
repoServer:
replicas: 1
livenessProbe:
timeoutSeconds: 10
periodSeconds: 60
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# ApplicationSet Controller
@@ -112,7 +108,7 @@ applicationSet:
enabled: true # Enabled by default
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# Notifications Controller
@@ -120,5 +116,5 @@ notifications:
enabled: true # Enabled by default
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add notifiers, triggers, templates configurations if needed

View File

@@ -18,4 +18,4 @@ spec:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@@ -19,14 +19,6 @@ spec:
{{ .password }}
AUTHENTIK_SECRET_KEY: |-
{{ .secret_key }}
POSTGRES_PASSWORD: |-
{{ .password }}
POSTGRES_USER: |-
{{ .username }}
username: |-
{{ .password }}
password: |-
{{ .username }}
data:
- secretKey: password
sourceRef:
@@ -34,9 +26,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 279c2c1f-c147-4b6b-a511-36c3cd764f9d
property: login.password
- secretKey: username
@@ -45,9 +34,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 279c2c1f-c147-4b6b-a511-36c3cd764f9d
property: login.username
- secretKey: secret_key
@@ -56,9 +42,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 279c2c1f-c147-4b6b-a511-36c3cd764f9d
property: fields[0].value

View File

@@ -5,13 +5,12 @@ resources:
- app.yaml
- external-secrets.yaml
- https-middleware.yaml
- outpost-selector-fix.yaml
# - worker-restart.yaml
- worker-restart.yaml
helmCharts:
- name: authentik
repo: https://charts.goauthentik.io
version: 2026.2.1
version: 2025.10.1
releaseName: authentik
namespace: authentik
valuesFile: values.yaml

View File

@@ -1,81 +0,0 @@
## Workaround for authentik bug: embedded outpost controller creates
## a Service with selectors that don't match the pod labels it sets.
## Remove this after upgrading to a version with the fix.
apiVersion: v1
kind: ServiceAccount
metadata:
name: outpost-selector-fix
namespace: authentik
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: outpost-selector-fix
namespace: authentik
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: outpost-selector-fix
namespace: authentik
subjects:
- kind: ServiceAccount
name: outpost-selector-fix
namespace: authentik
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: outpost-selector-fix
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: outpost-selector-fix
namespace: authentik
spec:
schedule: "* * * * *"
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
concurrencyPolicy: Replace
jobTemplate:
spec:
ttlSecondsAfterFinished: 300
template:
spec:
serviceAccountName: outpost-selector-fix
restartPolicy: OnFailure
containers:
- name: fix
image: bitnami/kubectl:latest
command:
- /bin/sh
- -c
- |
SVC="ak-outpost-authentik-embedded-outpost"
# check if endpoints are populated
ADDRS=$(kubectl get endpoints "$SVC" -n authentik -o jsonpath='{.subsets[*].addresses[*].ip}' 2>/dev/null)
if [ -n "$ADDRS" ]; then
echo "Endpoints OK ($ADDRS), nothing to fix"
exit 0
fi
echo "No endpoints for $SVC, patching selector..."
kubectl patch svc "$SVC" -n authentik --type=json -p '[
{"op":"remove","path":"/spec/selector/app.kubernetes.io~1component"},
{"op":"replace","path":"/spec/selector/app.kubernetes.io~1name","value":"authentik-outpost-proxy"}
]'
echo "Patched. Verifying..."
sleep 2
ADDRS=$(kubectl get endpoints "$SVC" -n authentik -o jsonpath='{.subsets[*].addresses[*].ip}' 2>/dev/null)
if [ -n "$ADDRS" ]; then
echo "Fix confirmed, endpoints: $ADDRS"
else
echo "WARNING: still no endpoints after patch"
exit 1
fi

View File

@@ -1,6 +1,8 @@
global:
image:
tag: "2026.2.1"
tag: "2025.10.1"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
authentik:
error_reporting:
@@ -13,35 +15,14 @@ worker:
envFrom:
- secretRef:
name: authentik-creds
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
# sizeLimit: 512Mi
# volumeMounts:
# - name: dshm
# mountPath: /dev/shm
# livenessProbe:
# exec:
# command: ["/bin/sh", "-c", "kill -0 1"]
# initialDelaySeconds: 5
# periodSeconds: 10
# failureThreshold: 3
# timeoutSeconds: 3
# readinessProbe:
# exec:
# command: ["/bin/sh", "-c", "kill -0 1"]
# initialDelaySeconds: 5
# periodSeconds: 10
# failureThreshold: 3
# timeoutSeconds: 3
# startupProbe:
# exec:
# command: ["/bin/sh", "-c", "kill -0 1"]
# initialDelaySeconds: 30
# periodSeconds: 10
# failureThreshold: 60
# timeoutSeconds: 3
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 512Mi
volumeMounts:
- name: dshm
mountPath: /dev/shm
server:
envFrom:
- secretRef:
@@ -54,10 +35,23 @@ server:
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
hosts:
- idm.hexor.cy
- nas.hexor.cy # TrueNAS Limassol
- nc.hexor.cy # NaxtCloud
- of.hexor.cy # Outfleet-v2
- k8s.hexor.cy # k8s dashboard
- qbt.hexor.cy # qBittorent for Jellyfin
- prom.hexor.cy # Prometheus
- khm.hexor.cy # Known Hosts keys Manager
- backup.hexor.cy # Kopia Backup UI
- fm.hexor.cy # Filemanager
- minecraft.hexor.cy # Minecraft UI and server
- pass.hexor.cy # k8s-secret for openai
- ps.hexor.cy # pasarguard UI
# - rw.hexor.cy # RemnaWave UI
tls:
- secretName: idm-tls
hosts:
- '*.hexor.cy'
redis:
enabled: false
enabled: true

View File

@@ -35,7 +35,5 @@ spec:
key: secretKey
selector:
dnsZones:
- "ps.hexor.cy"
- "of.hexor.cy"
- "matrix.hexor.cy"

Some files were not shown because too many files have changed in this diff Show More