Compare commits

..

1 Commits

Author SHA1 Message Date
Gitea Actions Bot
c1a61550e9 Auto-update README with current k8s applications
All checks were successful
Terraform / Terraform (pull_request) Successful in 18s
Generated by CI/CD workflow on 2026-03-06 23:31:46

This PR updates the README.md file with the current list of applications found in the k8s/ directory structure.
2026-03-06 23:31:46 +00:00
24 changed files with 110 additions and 452 deletions

View File

@@ -38,7 +38,6 @@ ArgoCD homelab project
| Application | Status |
| :--- | :---: |
| **comfyui** | [![comfyui](https://ag.hexor.cy/api/badge?name=comfyui&revision=true)](https://ag.hexor.cy/applications/argocd/comfyui) |
| **gitea** | [![gitea](https://ag.hexor.cy/api/badge?name=gitea&revision=true)](https://ag.hexor.cy/applications/argocd/gitea) |
| **greece-notifier** | [![greece-notifier](https://ag.hexor.cy/api/badge?name=greece-notifier&revision=true)](https://ag.hexor.cy/applications/argocd/greece-notifier) |
| **hexound** | [![hexound](https://ag.hexor.cy/api/badge?name=hexound&revision=true)](https://ag.hexor.cy/applications/argocd/hexound) |
@@ -47,7 +46,6 @@ ArgoCD homelab project
| **jellyfin** | [![jellyfin](https://ag.hexor.cy/api/badge?name=jellyfin&revision=true)](https://ag.hexor.cy/applications/argocd/jellyfin) |
| **k8s-secrets** | [![k8s-secrets](https://ag.hexor.cy/api/badge?name=k8s-secrets&revision=true)](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
| **khm** | [![khm](https://ag.hexor.cy/api/badge?name=khm&revision=true)](https://ag.hexor.cy/applications/argocd/khm) |
| **lidarr** | [![lidarr](https://ag.hexor.cy/api/badge?name=lidarr&revision=true)](https://ag.hexor.cy/applications/argocd/lidarr) |
| **mtproxy** | [![mtproxy](https://ag.hexor.cy/api/badge?name=mtproxy&revision=true)](https://ag.hexor.cy/applications/argocd/mtproxy) |
| **n8n** | [![n8n](https://ag.hexor.cy/api/badge?name=n8n&revision=true)](https://ag.hexor.cy/applications/argocd/n8n) |
| **ollama** | [![ollama](https://ag.hexor.cy/api/badge?name=ollama&revision=true)](https://ag.hexor.cy/applications/argocd/ollama) |

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: comfyui
namespace: argocd
spec:
project: apps
destination:
namespace: comfyui
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/comfyui
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,57 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: comfyui
namespace: comfyui
labels:
app: comfyui
spec:
replicas: 1
selector:
matchLabels:
app: comfyui
template:
metadata:
labels:
app: comfyui
spec:
runtimeClassName: nvidia
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
# Fix permissions mismatch usually happening when mapping host paths
securityContext:
runAsUser: 0
initContainers:
- name: create-data-dir
image: busybox
command: ["sh", "-c", "mkdir -p /host.data && chown -R 1000:1000 /host.data"]
volumeMounts:
- name: data
mountPath: /host.data
containers:
- name: comfyui
image: runpod/comfyui:latest-5090
imagePullPolicy: IfNotPresent
env:
- name: COMFYUI_PORT
value: "8188"
ports:
- containerPort: 8188
name: http
protocol: TCP
resources:
limits:
nvidia.com/gpu: 1
volumeMounts:
- name: data
# For ai-dock images, /workspace is the persistent user directory
mountPath: /workspace
volumes:
- name: data
persistentVolumeClaim:
claimName: comfyui-data-pvc

View File

@@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- local-pv.yaml
- pvc.yaml
- deployment.yaml
- service.yaml

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: comfyui-data-pv
spec:
capacity:
storage: 200Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
local:
path: /data/comfyui
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- uk-desktop.tail2fe2d.ts.net

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: comfyui

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: comfyui-data-pvc
namespace: comfyui
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 200Gi

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: comfyui
namespace: comfyui
labels:
app: comfyui
spec:
ports:
- name: http
port: 8188
targetPort: 8188
protocol: TCP
selector:
app: comfyui

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: lidarr
namespace: argocd
spec:
project: apps
destination:
namespace: lidarr
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/lidarr
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
helmCharts:
- name: lidarr
repo: https://k8s-home-lab.github.io/helm-charts/
version: 15.3.0
releaseName: lidarr
namespace: lidarr
valuesFile: lidarr-values.yaml
includeCRDs: true

View File

@@ -1,27 +0,0 @@
env:
TZ: Asia/Nicosia
resources:
requests:
memory: "512Mi"
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
persistence:
config:
enabled: true
type: hostPath
hostPath: /k8s/lidarr
mountPath: /config
downloads:
enabled: true
type: hostPath
hostPath: /k8s/media/downloads
mountPath: /downloads
accessMode: ReadWriteOnce

View File

@@ -85,8 +85,6 @@ spec:
value: "queue"
- name: QUEUE_BULL_REDIS_HOST
value: "n8n-redis"
- name: QUEUE_BULL_REDIS_PORT
value: "6379"
- name: NODE_ENV
value: "production"
- name: WEBHOOK_URL
@@ -134,7 +132,7 @@ spec:
memory: 512Mi
limits:
cpu: 4000m
memory: 2048Mi
memory: 2048Gi
livenessProbe:
httpGet:
path: /healthz

View File

@@ -1,84 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-worker
labels:
app: n8n
component: worker
spec:
replicas: 2
selector:
matchLabels:
app: n8n
component: worker
template:
metadata:
labels:
app: n8n
component: worker
spec:
serviceAccountName: n8n
containers:
- name: n8n-worker
image: n8nio/n8n:latest
command:
- n8n
- worker
env:
- name: HOME
value: "/home/node"
- name: N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS
value: "true"
- name: EXECUTIONS_MODE
value: "queue"
- name: QUEUE_BULL_REDIS_HOST
value: "n8n-redis"
- name: QUEUE_BULL_REDIS_PORT
value: "6379"
- name: NODE_ENV
value: "production"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: DB_TYPE
value: "postgresdb"
- name: DB_POSTGRESDB_HOST
value: "psql.psql.svc"
- name: DB_POSTGRESDB_DATABASE
value: "n8n"
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
name: credentials
key: username
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
name: credentials
key: password
- name: N8N_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: credentials
key: encryptionkey
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2048Mi
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -10,7 +10,7 @@ resources:
- paddleocr-deployment.yaml
- paddleocr-service.yaml
- deployment-main.yaml
- deployment-worker.yaml
# - deployment-worker.yaml
- deployment-runner.yaml
- service.yaml
- ingress.yaml

View File

@@ -3,23 +3,19 @@ kind: Kustomization
resources:
- external-secrets.yaml
- local-pv.yaml
helmCharts:
- name: ollama
repo: https://otwld.github.io/ollama-helm/
version: 1.49.0
version: 0.4.0
releaseName: ollama
namespace: ollama
valuesFile: ollama-values.yaml
includeCRDs: true
- name: open-webui
repo: https://helm.openwebui.com/
version: 12.8.1
version: 8.14.0
releaseName: openweb-ui
namespace: ollama
valuesFile: openweb-ui-values.yaml
includeCRDs: true
patches:
- path: patch-runtimeclass.yaml
includeCRDs: true

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: ollama-local-pv
spec:
capacity:
storage: 100Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
local:
path: /var/lib/ollama
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- uk-desktop.tail2fe2d.ts.net

View File

@@ -4,11 +4,6 @@ image:
tag: "latest"
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
ingress:
enabled: false
ollama:
@@ -16,7 +11,3 @@ ollama:
enabled: true
type: 'nvidia'
number: 1
persistentVolume:
enabled: true
size: 100Gi
storageClass: "local-path"

View File

@@ -1,4 +1,4 @@
clusterDomain: cluster.local
clusterDomain: ai.hexor.cy
extraEnvVars:
GLOBAL_LOG_LEVEL: debug
@@ -32,22 +32,12 @@ ollama:
pipelines:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
tika:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
websocket:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
redis:
master:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: true
@@ -56,5 +46,7 @@ ingress:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
host: "ai.hexor.cy"
tls: true
existingSecret: ollama-tls
tls:
- hosts:
- '*.hexor.cy'
secretName: ollama-tls

View File

@@ -1,9 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
namespace: ollama
spec:
template:
spec:
runtimeClassName: nvidia

View File

@@ -10,14 +10,3 @@ tolerations:
runtimeClassName: nvidia
setAsDefault: false
config:
default: any
map:
any: |-
version: v1
sharing:
timeSlicing:
resources:
- name: nvidia.com/gpu
replicas: 4

View File

@@ -260,7 +260,7 @@ data:
to: 0
datasourceUid: P76F38748CEC837F0
model:
expr: 'node_load15 / on(instance) group_left count by(instance)(node_cpu_seconds_total{mode="idle"})'
expr: 'node_load5 / on(instance) group_left count by(instance)(node_cpu_seconds_total{mode="idle"})'
refId: A
intervalMs: 1000
maxDataPoints: 43200
@@ -273,7 +273,7 @@ data:
conditions:
- evaluator:
params:
- 2
- 0.8
type: gt
operator:
type: and
@@ -283,16 +283,16 @@ data:
type: __expr__
uid: __expr__
expression: A
reducer: last
reducer: max
refId: B
type: reduce
noDataState: NoData
execErrState: Alerting
for: 15m
for: 5m
annotations:
node: '{{ $labels.instance }}'
load_average: '{{ printf "%.2f" $values.A }}'
summary: 'Node load average is critically high relative to CPU count'
summary: 'Node load average is high relative to CPU count'
labels:
severity: warning

View File

@@ -0,0 +1,85 @@
envFromSecret: grafana-admin
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
admin:
existingSecret: grafana-admin
userKey: username
passwordKey: password
grafana.ini:
auth:
signout_redirect_url: https://idm.hexor.cy/application/o/grafana/end-session/
# oauth_auto_login: true
auth.generic_oauth:
name: authentik
enabled: true
scopes: "openid profile email"
auth_url: https://idm.hexor.cy/application/o/authorize/
token_url: https://idm.hexor.cy/application/o/token/
api_url: https://idm.hexor.cy/application/o/userinfo/
role_attribute_path: >-
contains(groups, 'Grafana Admin') && 'Admin' ||
contains(groups, 'Grafana Editors') && 'Editor' ||
contains(groups, 'Grafana Viewer') && 'Viewer'
database:
type: postgres
host: psql.psql.svc:5432
name: grafana
user: grafana
ssl_mode: disable
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus Local
type: prometheus
url: http://prometheus-kube-prometheus-prometheus.prometheus.svc:9090
access: proxy
isDefault: true
- name: Loki
type: loki
url: http://loki-gateway.prometheus.svc:80
access: proxy
ingress:
enabled: true
ingressClassName: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
hosts:
- gf.hexor.cy
tls:
- secretName: grafana-tls
hosts:
- '*.hexor.cy'
extraConfigmapMounts:
- name: grafana-alerting-rules
mountPath: /etc/grafana/provisioning/alerting/rules.yaml
configMap: grafana-alerting
subPath: rules.yaml
readOnly: true
- name: grafana-alerting-contactpoints
mountPath: /etc/grafana/provisioning/alerting/contactpoints.yaml
configMap: grafana-alerting
subPath: contactpoints.yaml
readOnly: true
- name: grafana-alerting-policies
mountPath: /etc/grafana/provisioning/alerting/policies.yaml
configMap: grafana-alerting
subPath: policies.yaml
readOnly: true
envValueFrom:
TELEGRAM_BOT_TOKEN:
secretKeyRef:
name: grafana-telegram
key: bot-token
TELEGRAM_CHAT_ID:
secretKeyRef:
name: grafana-telegram
key: chat-id

View File

@@ -16,6 +16,14 @@ helmCharts:
valuesFile: prom-values.yaml
includeCRDs: true
- name: grafana
repo: https://grafana.github.io/helm-charts
version: 10.2.0
releaseName: grafana
namespace: prometheus
valuesFile: grafana-values.yaml
includeCRDs: true
- name: loki
repo: https://grafana.github.io/helm-charts
version: 6.29.0

View File

@@ -1,4 +1,5 @@
grafana:
enabled: false
alertmanager:
config:
@@ -91,88 +92,3 @@ prometheus:
requests:
storage: 400Gi
grafana:
enabled: true
serviceAccount:
create: true
name: "prom-grafana-sa"
envFromSecret: grafana-admin
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
admin:
existingSecret: grafana-admin
userKey: username
passwordKey: password
grafana.ini:
auth:
signout_redirect_url: https://idm.hexor.cy/application/o/grafana/end-session/
auth.generic_oauth:
name: authentik
enabled: true
scopes: "openid profile email"
auth_url: https://idm.hexor.cy/application/o/authorize/
token_url: https://idm.hexor.cy/application/o/token/
api_url: https://idm.hexor.cy/application/o/userinfo/
role_attribute_path: >-
contains(groups, 'Grafana Admin') && 'Admin' ||
contains(groups, 'Grafana Editors') && 'Editor' ||
contains(groups, 'Grafana Viewer') && 'Viewer'
database:
type: postgres
host: psql.psql.svc:5432
name: grafana
user: grafana
ssl_mode: disable
# The Loki datasource config needs to be preserved,
# but instead of "datasources.datasources.yaml", we define it like this for the prometheus-stack chart:
additionalDataSources:
- name: Loki
type: loki
url: http://loki-gateway.prometheus.svc:80
access: proxy
orgId: 1
ingress:
enabled: true
ingressClassName: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
hosts:
- gf.hexor.cy
tls:
- secretName: grafana-tls
hosts:
- '*.hexor.cy'
extraConfigmapMounts:
- name: grafana-alerting-rules
mountPath: /etc/grafana/provisioning/alerting/rules.yaml
configMap: grafana-alerting
subPath: rules.yaml
readOnly: true
- name: grafana-alerting-contactpoints
mountPath: /etc/grafana/provisioning/alerting/contactpoints.yaml
configMap: grafana-alerting
subPath: contactpoints.yaml
readOnly: true
- name: grafana-alerting-policies
mountPath: /etc/grafana/provisioning/alerting/policies.yaml
configMap: grafana-alerting
subPath: policies.yaml
readOnly: true
envValueFrom:
TELEGRAM_BOT_TOKEN:
secretKeyRef:
name: grafana-telegram
key: bot-token
TELEGRAM_CHAT_ID:
secretKeyRef:
name: grafana-telegram
key: chat-id