Compare commits

..

1 Commits

Author SHA1 Message Date
Gitea Actions Bot
977b7b5930 Auto-update README with current k8s applications
All checks were successful
Terraform / Terraform (pull_request) Successful in 42s
Generated by CI/CD workflow on 2025-11-24 16:34:28

This PR updates the README.md file with the current list of applications found in the k8s/ directory structure.
2025-11-24 16:34:28 +00:00
135 changed files with 506 additions and 89618 deletions

View File

@@ -25,34 +25,26 @@ jobs:
uses: actions/checkout@v3
- name: Setup Terraform
uses: hashicorp/setup-terraform@v4.0.0
uses: hashicorp/setup-terraform@v2
with:
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
- name: Terraform Init
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform init
working-directory: ./terraform/authentik
- name: Terraform Format
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform fmt -check
continue-on-error: true
working-directory: ./terraform/authentik
- name: Terraform Apply
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: terraform apply -input=false -auto-approve -parallelism=100
run: terraform apply -var-file proxy-apps.tfvars -var-file oauth2-apps.tfvars -var-file terraform.tfvars -var-file groups.tfvars -input=false -auto-approve -parallelism=100
working-directory: ./terraform/authentik
- name: Generate Wiki Content
if: success()
continue-on-error: true
env:
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
run: |
echo "📋 Starting Wiki generation..."
cd ./terraform/authentik

View File

@@ -22,13 +22,12 @@ jobs:
- name: Install Python dependencies
run: |
python3 -m venv .venv
.venv/bin/pip install pyyaml
pip install pyyaml
- name: Generate K8s Services Wiki
run: |
echo "📋 Starting K8s wiki generation..."
.venv/bin/python .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
python3 .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
if [ -f "Kubernetes-Services.md" ]; then
echo "✅ Wiki content generated successfully"

1
.gitignore vendored
View File

@@ -13,7 +13,6 @@ crash.*.log
*.tfvars
*.tfvars.json
!*terraform.tfvars
!*.auto.tfvars
# claude ai
.claude/

View File

@@ -16,10 +16,8 @@ ArgoCD homelab project
| **authentik** | [![authentik](https://ag.hexor.cy/api/badge?name=authentik&revision=true)](https://ag.hexor.cy/applications/argocd/authentik) |
| **cert-manager** | [![cert-manager](https://ag.hexor.cy/api/badge?name=cert-manager&revision=true)](https://ag.hexor.cy/applications/argocd/cert-manager) |
| **external-secrets** | [![external-secrets](https://ag.hexor.cy/api/badge?name=external-secrets&revision=true)](https://ag.hexor.cy/applications/argocd/external-secrets) |
| **gpu** | [![gpu](https://ag.hexor.cy/api/badge?name=gpu&revision=true)](https://ag.hexor.cy/applications/argocd/gpu) |
| **kube-system-custom** | [![kube-system-custom](https://ag.hexor.cy/api/badge?name=kube-system-custom&revision=true)](https://ag.hexor.cy/applications/argocd/kube-system-custom) |
| **kubernetes-dashboard** | [![kubernetes-dashboard](https://ag.hexor.cy/api/badge?name=kubernetes-dashboard&revision=true)](https://ag.hexor.cy/applications/argocd/kubernetes-dashboard) |
| **longhorn** | [![longhorn](https://ag.hexor.cy/api/badge?name=longhorn&revision=true)](https://ag.hexor.cy/applications/argocd/longhorn) |
| **postgresql** | [![postgresql](https://ag.hexor.cy/api/badge?name=postgresql&revision=true)](https://ag.hexor.cy/applications/argocd/postgresql) |
| **prom-stack** | [![prom-stack](https://ag.hexor.cy/api/badge?name=prom-stack&revision=true)](https://ag.hexor.cy/applications/argocd/prom-stack) |
| **system-upgrade** | [![system-upgrade](https://ag.hexor.cy/api/badge?name=system-upgrade&revision=true)](https://ag.hexor.cy/applications/argocd/system-upgrade) |
@@ -38,8 +36,6 @@ ArgoCD homelab project
| Application | Status |
| :--- | :---: |
| **comfyui** | [![comfyui](https://ag.hexor.cy/api/badge?name=comfyui&revision=true)](https://ag.hexor.cy/applications/argocd/comfyui) |
| **furumi-server** | [![furumi-server](https://ag.hexor.cy/api/badge?name=furumi-server&revision=true)](https://ag.hexor.cy/applications/argocd/furumi-server) |
| **gitea** | [![gitea](https://ag.hexor.cy/api/badge?name=gitea&revision=true)](https://ag.hexor.cy/applications/argocd/gitea) |
| **greece-notifier** | [![greece-notifier](https://ag.hexor.cy/api/badge?name=greece-notifier&revision=true)](https://ag.hexor.cy/applications/argocd/greece-notifier) |
| **hexound** | [![hexound](https://ag.hexor.cy/api/badge?name=hexound&revision=true)](https://ag.hexor.cy/applications/argocd/hexound) |
@@ -48,9 +44,6 @@ ArgoCD homelab project
| **jellyfin** | [![jellyfin](https://ag.hexor.cy/api/badge?name=jellyfin&revision=true)](https://ag.hexor.cy/applications/argocd/jellyfin) |
| **k8s-secrets** | [![k8s-secrets](https://ag.hexor.cy/api/badge?name=k8s-secrets&revision=true)](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
| **khm** | [![khm](https://ag.hexor.cy/api/badge?name=khm&revision=true)](https://ag.hexor.cy/applications/argocd/khm) |
| **lidarr** | [![lidarr](https://ag.hexor.cy/api/badge?name=lidarr&revision=true)](https://ag.hexor.cy/applications/argocd/lidarr) |
| **mtproxy** | [![mtproxy](https://ag.hexor.cy/api/badge?name=mtproxy&revision=true)](https://ag.hexor.cy/applications/argocd/mtproxy) |
| **n8n** | [![n8n](https://ag.hexor.cy/api/badge?name=n8n&revision=true)](https://ag.hexor.cy/applications/argocd/n8n) |
| **ollama** | [![ollama](https://ag.hexor.cy/api/badge?name=ollama&revision=true)](https://ag.hexor.cy/applications/argocd/ollama) |
| **paperless** | [![paperless](https://ag.hexor.cy/api/badge?name=paperless&revision=true)](https://ag.hexor.cy/applications/argocd/paperless) |
| **pasarguard** | [![pasarguard](https://ag.hexor.cy/api/badge?name=pasarguard&revision=true)](https://ag.hexor.cy/applications/argocd/pasarguard) |
@@ -63,7 +56,6 @@ ArgoCD homelab project
| **tg-bots** | [![tg-bots](https://ag.hexor.cy/api/badge?name=tg-bots&revision=true)](https://ag.hexor.cy/applications/argocd/tg-bots) |
| **vaultwarden** | [![vaultwarden](https://ag.hexor.cy/api/badge?name=vaultwarden&revision=true)](https://ag.hexor.cy/applications/argocd/vaultwarden) |
| **vpn** | [![vpn](https://ag.hexor.cy/api/badge?name=vpn&revision=true)](https://ag.hexor.cy/applications/argocd/vpn) |
| **xandikos** | [![xandikos](https://ag.hexor.cy/api/badge?name=xandikos&revision=true)](https://ag.hexor.cy/applications/argocd/xandikos) |
</td>
</tr>

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: comfyui
namespace: argocd
spec:
project: apps
destination:
namespace: comfyui
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/comfyui
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,57 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: comfyui
namespace: comfyui
labels:
app: comfyui
spec:
replicas: 1
selector:
matchLabels:
app: comfyui
template:
metadata:
labels:
app: comfyui
spec:
runtimeClassName: nvidia
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
# Fix permissions mismatch usually happening when mapping host paths
securityContext:
runAsUser: 0
initContainers:
- name: create-data-dir
image: busybox
command: ["sh", "-c", "mkdir -p /host.data && chown -R 1000:1000 /host.data"]
volumeMounts:
- name: data
mountPath: /host.data
containers:
- name: comfyui
image: runpod/comfyui:latest-5090
imagePullPolicy: IfNotPresent
env:
- name: COMFYUI_PORT
value: "8188"
ports:
- containerPort: 8188
name: http
protocol: TCP
resources:
limits:
nvidia.com/gpu: 1
volumeMounts:
- name: data
# For ai-dock images, /workspace is the persistent user directory
mountPath: /workspace
volumes:
- name: data
persistentVolumeClaim:
claimName: comfyui-data-pvc

View File

@@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- local-pv.yaml
- pvc.yaml
- deployment.yaml
- service.yaml

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: comfyui-data-pv
spec:
capacity:
storage: 200Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
local:
path: /data/comfyui
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- uk-desktop.tail2fe2d.ts.net

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: comfyui

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: comfyui-data-pvc
namespace: comfyui
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 200Gi

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: comfyui
namespace: comfyui
labels:
app: comfyui
spec:
ports:
- name: http
port: 8188
targetPort: 8188
protocol: TCP
selector:
app: comfyui

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: furumi-server
namespace: argocd
spec:
project: apps
destination:
namespace: furumi-server
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/furumi-server
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,42 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: furumi-server
labels:
app: furumi-server
spec:
replicas: 1
selector:
matchLabels:
app: furumi-server
template:
metadata:
labels:
app: furumi-server
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: furumi-server
image: ultradesu/furumi-server:latest
imagePullPolicy: Always
env:
- name: FURUMI_TOKEN
value: "f38387266e75effe891b7953eb9c06b4"
- name: FURUMI_ROOT
value: "/media"
ports:
- name: grpc
containerPort: 50051
protocol: TCP
- name: metrics
containerPort: 9090
protocol: TCP
volumeMounts:
- name: music
mountPath: /media
volumes:
- name: music
hostPath:
path: /k8s/media/downloads/Lidarr_Music
type: DirectoryOrCreate

View File

@@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
- deployment.yaml
- service.yaml
- servicemonitor.yaml

View File

@@ -1,30 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: furumi-server-grpc
spec:
type: LoadBalancer
selector:
app: furumi-server
ports:
- name: grpc
protocol: TCP
port: 50051
targetPort: 50051
---
apiVersion: v1
kind: Service
metadata:
name: furumi-server-metrics
labels:
app: furumi-server
spec:
type: ClusterIP
selector:
app: furumi-server
ports:
- name: metrics
protocol: TCP
port: 9090
targetPort: 9090

View File

@@ -1,21 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: furumi-server-metrics
labels:
app: furumi-server
release: prometheus
spec:
selector:
matchLabels:
app: furumi-server
endpoints:
- port: metrics
path: /metrics
interval: 30s
scrapeTimeout: 10s
honorLabels: true
namespaceSelector:
matchNames:
- furumi-server

View File

@@ -77,8 +77,8 @@ spec:
labels:
app: gitea-runner
spec:
#nodeSelector:
# kubernetes.io/hostname: home.homenet
nodeSelector:
kubernetes.io/hostname: home.homenet
volumes:
- name: docker-sock
hostPath:
@@ -90,30 +90,27 @@ spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- home.homenet
- weight: 2
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- master.tail2fe2d.ts.net
- weight: 3
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- it.tail2fe2d.ts.net
- ch.tail2fe2d.ts.net
- us.tail2fe2d.ts.net
- home.homenet
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- master.tail2fe2d.ts.net
- weight: 2
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- nas.homenet
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
@@ -121,9 +118,7 @@ spec:
operator: In
values:
- home.homenet
- it.tail2fe2d.ts.net
- ch.tail2fe2d.ts.net
- us.tail2fe2d.ts.net
- nas.homenet
- master.tail2fe2d.ts.net
containers:
- name: gitea-runner

View File

@@ -74,14 +74,19 @@ spec:
- nas.homenet
volumes:
- name: upload-storage
persistentVolumeClaim:
claimName: immich-upload-pvc
nfs:
server: nas.homenet
path: /mnt/storage/Storage/k8s/immich/library/
readOnly: false
- name: gphoto-storage
persistentVolumeClaim:
claimName: immich-gphoto-pvc
nfs:
server: nas.homenet
path: /mnt/storage/Storage/k8s/immich/GPHOTO/
readOnly: false
- name: camera
persistentVolumeClaim:
claimName: immich-camera-pvc
nfs:
server: nas.homenet
path: /mnt/storage/Storage/Syncthing-repos/PhoneCamera/
readOnly: true
- name: localtime
hostPath:

View File

@@ -1,52 +1,79 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-upload-pv
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/storage/Storage/k8s/immich/library
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-upload-pvc
namespace: immich
spec:
storageClassName: ""
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
- ReadWriteOnce
volumeName: immich-upload-pv
resources:
requests:
storage: 500Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-gphoto-pv
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/storage/Storage/k8s/immich/GPHOTO
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-gphoto-pvc
namespace: immich
spec:
storageClassName: ""
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
- ReadWriteOnce
volumeName: immich-gphoto-pv
resources:
requests:
storage: 500Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-db-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/storage/Storage/k8s/immich/db
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-db-pvc
namespace: immich
spec:
storageClassName: ""
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
- ReadWriteOnce
volumeName: immich-db-pv
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-camera-pvc
namespace: immich
spec:
accessModes:
- ReadOnlyMany
storageClassName: nfs-csi
resources:
requests:
storage: 100Gi

View File

@@ -1,5 +1,5 @@
image:
tag: 10.11.4
tag: 10.10.7
resources:
requests:
memory: "2Gi"
@@ -36,40 +36,8 @@ ingress:
paths:
- path: /
pathType: Prefix
- host: us.hexor.cy
paths:
- path: /
pathType: Prefix
- host: ch.hexor.cy
paths:
- path: /
pathType: Prefix
- host: jp.hexor.cy
paths:
- path: /
pathType: Prefix
- host: spb.hexor.cy
paths:
- path: /
pathType: Prefix
- host: cy.hexor.cy
paths:
- path: /
pathType: Prefix
- host: am.hexor.cy
paths:
- path: /
pathType: Prefix
- host: de.hexor.cy
paths:
- path: /
pathType: Prefix
- host: it.hexor.cy
paths:
- path: /
pathType: Prefix
tls:
- secretName: jellyfin-tls
hosts:
- '*.hexor.cy'
- 'jf.hexor.cy'

View File

@@ -19,7 +19,7 @@ spec:
kubernetes.io/os: linux
containers:
- name: secret-reader
image: ultradesu/k8s-secrets:0.2.1
image: ultradesu/k8s-secrets:0.1.1
imagePullPolicy: Always
args:
- "--secrets"
@@ -28,7 +28,6 @@ spec:
- "k8s-secret"
- "--port"
- "3000"
- "--webhook"
ports:
- containerPort: 3000
name: http

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: lidarr
namespace: argocd
spec:
project: apps
destination:
namespace: lidarr
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/lidarr
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
helmCharts:
- name: lidarr
repo: https://k8s-home-lab.github.io/helm-charts/
version: 15.3.0
releaseName: lidarr
namespace: lidarr
valuesFile: lidarr-values.yaml
includeCRDs: true

View File

@@ -1,27 +0,0 @@
env:
TZ: Asia/Nicosia
resources:
requests:
memory: "512Mi"
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1500m"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
persistence:
config:
enabled: true
type: hostPath
hostPath: /k8s/lidarr
mountPath: /config
downloads:
enabled: true
type: hostPath
hostPath: /k8s/media/downloads
mountPath: /downloads
accessMode: ReadWriteOnce

View File

@@ -1,53 +0,0 @@
FROM --platform=$BUILDPLATFORM debian:bookworm-slim AS builder
ARG TARGETARCH
RUN apt-get update && apt-get install -y \
git curl make gcc libssl-dev zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
RUN if [ "$(dpkg --print-architecture)" != "$TARGETARCH" ]; then \
dpkg --add-architecture $TARGETARCH && \
apt-get update && \
case "$TARGETARCH" in \
arm64) apt-get install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 ;; \
amd64) apt-get install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 ;; \
esac && \
rm -rf /var/lib/apt/lists/*; \
fi
RUN git clone https://github.com/TelegramMessenger/MTProxy.git /src
WORKDIR /src
RUN NATIVE=$(dpkg --print-architecture) && \
if [ "$NATIVE" != "$TARGETARCH" ]; then \
case "$TARGETARCH" in \
arm64) export CC=aarch64-linux-gnu-gcc ;; \
amd64) export CC=x86_64-linux-gnu-gcc ;; \
esac; \
fi && \
make -j$(nproc)
FROM debian:bookworm-slim
ENV PROXY_PORT=30443
ENV STATS_PORT=8888
ENV WORKERS=1
ENV RUN_USER=nobody
RUN apt-get update && apt-get install -y \
curl libssl3 zlib1g xxd \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /src/objs/bin/mtproto-proxy /usr/local/bin/mtproto-proxy
RUN curl -s https://core.telegram.org/getProxySecret -o /etc/mtproxy/proxy-secret --create-dirs && \
curl -s https://core.telegram.org/getProxyConfig -o /etc/mtproxy/proxy-multi.conf
ENTRYPOINT mtproto-proxy \
-u ${RUN_USER} \
-p ${STATS_PORT} \
-H ${PROXY_PORT} \
-M ${WORKERS} \
--aes-pwd /etc/mtproxy/proxy-secret \
/etc/mtproxy/proxy-multi.conf

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mtproxy
namespace: argocd
spec:
project: apps
destination:
namespace: mtproxy
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/mtproxy
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,117 +0,0 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: mtproxy
labels:
app: mtproxy
spec:
selector:
matchLabels:
app: mtproxy
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: mtproxy
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: mtproxy
operator: Exists
serviceAccountName: mtproxy
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: register-proxy
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SECRET
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: SECRET
- name: PORT
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: PORT
volumeMounts:
- name: data
mountPath: /data
command:
- /bin/bash
- -c
- |
set -e
curl -s https://core.telegram.org/getProxySecret -o /data/proxy-secret
curl -s https://core.telegram.org/getProxyConfig -o /data/proxy-multi.conf
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
SERVER=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.mtproxy}')
if [ -z "${SERVER}" ]; then
echo "ERROR: node ${NODE_NAME} has no mtproxy label"
exit 1
fi
LINK="tg://proxy?server=${SERVER}&port=${PORT}&secret=${SECRET}"
echo "Registering: ${SERVER} -> ${LINK}"
if kubectl get secret mtproxy-links -n "${NAMESPACE}" &>/dev/null; then
kubectl patch secret mtproxy-links -n "${NAMESPACE}" \
--type merge -p "{\"stringData\":{\"${SERVER}\":\"${LINK}\"}}"
else
kubectl create secret generic mtproxy-links -n "${NAMESPACE}" \
--from-literal="${SERVER}=${LINK}"
fi
echo "Done"
containers:
- name: mtproxy
image: telegrammessenger/proxy:latest
# image: ultradesu/mtproxy:v0.02
imagePullPolicy: Always
ports:
- name: proxy
containerPort: 30443
protocol: TCP
command:
- /bin/sh
- -c
- >-
mtproto-proxy
-u nobody
-p 8888
-H $(PORT)
-M 1
-S $(SECRET)
--aes-pwd /data/proxy-secret
/data/proxy-multi.conf
env:
- name: SECRET
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: SECRET
- name: PORT
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: PORT
volumeMounts:
- name: data
mountPath: /data
#resources:
# requests:
# memory: "128Mi"
# cpu: "100m"
# limits:
# memory: "256Mi"
# cpu: "500m"
volumes:
- name: data
emptyDir: {}

View File

@@ -1,25 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tgproxy-secret
spec:
target:
name: tgproxy-secret
deletionPolicy: Delete
template:
type: Opaque
data:
SECRET: |-
{{ .secret }}
PORT: "30443"
data:
- secretKey: secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 58a37daf-72d8-430d-86bd-6152aa8f888d
property: fields[0].value

View File

@@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./app.yaml
- ./rbac.yaml
- ./daemonset.yaml
- ./external-secrets.yaml
- ./service.yaml
- ./secret-reader.yaml
# - ./storage.yaml

View File

@@ -1,58 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: mtproxy
labels:
app: mtproxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: mtproxy-node-reader
labels:
app: mtproxy
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: mtproxy-node-reader
labels:
app: mtproxy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: mtproxy-node-reader
subjects:
- kind: ServiceAccount
name: mtproxy
namespace: mtproxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mtproxy-secret-manager
labels:
app: mtproxy
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mtproxy-secret-manager
labels:
app: mtproxy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: mtproxy-secret-manager
subjects:
- kind: ServiceAccount
name: mtproxy

View File

@@ -1,63 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: secret-reader
labels:
app: secret-reader
spec:
replicas: 1
selector:
matchLabels:
app: secret-reader
template:
metadata:
labels:
app: secret-reader
spec:
serviceAccountName: mtproxy
nodeSelector:
kubernetes.io/os: linux
containers:
- name: secret-reader
image: ultradesu/k8s-secrets:0.2.1
imagePullPolicy: Always
args:
- "--secrets"
- "mtproxy-links"
- "--namespace"
- "mtproxy"
- "--port"
- "3000"
ports:
- containerPort: 3000
name: http
env:
- name: RUST_LOG
value: "info"
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "150m"
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 5
periodSeconds: 5
securityContext:
runAsNonRoot: true
runAsUser: 1000
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL

View File

@@ -1,16 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: secret-reader
labels:
app: secret-reader
spec:
type: ClusterIP
selector:
app: secret-reader
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http

View File

@@ -1,12 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mtproxy-data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: n8n
namespace: argocd
spec:
project: apps
destination:
namespace: n8n
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/n8n
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,165 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-main
labels:
app: n8n
component: main
spec:
replicas: 1
selector:
matchLabels:
app: n8n
component: main
template:
metadata:
labels:
app: n8n
component: main
spec:
serviceAccountName: n8n
initContainers:
- name: install-tools
image: alpine:3.22
command:
- /bin/sh
- -c
- |
set -e
if [ -x /tools/kubectl ]; then
echo "kubectl already exists, skipping download"
/tools/kubectl version --client
exit 0
fi
echo "Downloading kubectl..."
ARCH=$(uname -m)
case $ARCH in
x86_64) ARCH="amd64" ;;
aarch64) ARCH="arm64" ;;
esac
wget -O /tools/kubectl "https://dl.k8s.io/release/$(wget -qO- https://dl.k8s.io/release/stable.txt)/bin/linux/${ARCH}/kubectl"
chmod +x /tools/kubectl
/tools/kubectl version --client
volumeMounts:
- name: tools
mountPath: /tools
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
containers:
- name: n8n
image: n8nio/n8n:latest
ports:
- containerPort: 5678
name: http
- containerPort: 5679
name: task-broker
env:
- name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- name: HOME
value: "/home/node"
- name: N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS
value: "true"
- name: NODES_EXCLUDE
value: "[]"
- name: N8N_HOST
value: "n8n.hexor.cy"
- name: N8N_PORT
value: "5678"
- name: N8N_PROTOCOL
value: "https"
- name: N8N_RUNNERS_ENABLED
value: "true"
- name: N8N_RUNNERS_MODE
value: "external"
- name: N8N_RUNNERS_BROKER_LISTEN_ADDRESS
value: "0.0.0.0"
- name: N8N_LISTEN_ADDRESS
value: "0.0.0.0"
- name: N8N_RUNNERS_BROKER_PORT
value: "5679"
- name: EXECUTIONS_MODE
value: "queue"
- name: QUEUE_BULL_REDIS_HOST
value: "n8n-redis"
- name: QUEUE_BULL_REDIS_PORT
value: "6379"
- name: NODE_ENV
value: "production"
- name: WEBHOOK_URL
value: "https://n8n.hexor.cy/"
- name: N8N_PROXY_HOPS
value: "1"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: DB_TYPE
value: "postgresdb"
- name: DB_POSTGRESDB_HOST
value: "psql.psql.svc"
- name: DB_POSTGRESDB_DATABASE
value: "n8n"
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
name: credentials
key: username
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
name: credentials
key: password
- name: N8N_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: credentials
key: encryptionkey
- name: N8N_RUNNERS_AUTH_TOKEN
valueFrom:
secretKeyRef:
name: credentials
key: runnertoken
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
- name: tools
mountPath: /opt/tools
resources:
requests:
cpu: 2000m
memory: 512Mi
limits:
cpu: 4000m
memory: 2048Mi
livenessProbe:
httpGet:
path: /healthz
port: http
initialDelaySeconds: 240
periodSeconds: 30
timeoutSeconds: 20
failureThreshold: 10
readinessProbe:
httpGet:
path: /healthz/readiness
port: http
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 15
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
- name: tools
persistentVolumeClaim:
claimName: n8n-tools
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -1,87 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-runner
labels:
app: n8n
component: runner
spec:
replicas: 2
selector:
matchLabels:
app: n8n
component: runner
template:
metadata:
labels:
app: n8n
component: runner
spec:
serviceAccountName: n8n
containers:
- name: n8n-runner
image: n8nio/runners:latest
ports:
- containerPort: 5680
name: health
env:
- name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- name: HOME
value: "/home/node"
- name: N8N_RUNNERS_TASK_BROKER_URI
value: "http://n8n:5679"
- name: N8N_RUNNERS_LAUNCHER_LOG_LEVEL
value: "info"
- name: N8N_RUNNERS_MAX_CONCURRENCY
value: "10"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: N8N_RUNNERS_AUTH_TOKEN
valueFrom:
secretKeyRef:
name: credentials
key: runnertoken
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
- name: tools
mountPath: /opt/tools
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2048Mi
livenessProbe:
httpGet:
path: /healthz
port: 5680
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz
port: 5680
initialDelaySeconds: 15
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
- name: tools
persistentVolumeClaim:
claimName: n8n-tools
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -1,84 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-worker
labels:
app: n8n
component: worker
spec:
replicas: 2
selector:
matchLabels:
app: n8n
component: worker
template:
metadata:
labels:
app: n8n
component: worker
spec:
serviceAccountName: n8n
containers:
- name: n8n-worker
image: n8nio/n8n:latest
command:
- n8n
- worker
env:
- name: HOME
value: "/home/node"
- name: N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS
value: "true"
- name: EXECUTIONS_MODE
value: "queue"
- name: QUEUE_BULL_REDIS_HOST
value: "n8n-redis"
- name: QUEUE_BULL_REDIS_PORT
value: "6379"
- name: NODE_ENV
value: "production"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: DB_TYPE
value: "postgresdb"
- name: DB_POSTGRESDB_HOST
value: "psql.psql.svc"
- name: DB_POSTGRESDB_DATABASE
value: "n8n"
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
name: credentials
key: username
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
name: credentials
key: password
- name: N8N_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: credentials
key: encryptionkey
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2048Mi
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -1,50 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: credentials
spec:
target:
name: credentials
deletionPolicy: Delete
template:
type: Opaque
data:
password: "{{ .psql | trim }}"
username: "n8n"
encryptionkey: "{{ .enc_pass | trim }}"
runnertoken: "{{ .runner_token | trim }}"
data:
- secretKey: psql
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[13].value
- secretKey: enc_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 18c92d73-9637-4419-8642-7f7b308460cb
property: fields[0].value
- secretKey: runner_token
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 18c92d73-9637-4419-8642-7f7b308460cb
property: fields[1].value

View File

@@ -1,28 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: n8n
labels:
app: n8n
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
ingressClassName: traefik
tls:
- hosts:
- n8n.hexor.cy
secretName: n8n-tls
rules:
- host: n8n.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: n8n
port:
number: 80

View File

@@ -1,28 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- external-secrets.yaml
- storage.yaml
- rbac.yaml
- redis-deployment.yaml
- redis-service.yaml
- paddleocr-deployment.yaml
- paddleocr-service.yaml
- deployment-main.yaml
- deployment-worker.yaml
- deployment-runner.yaml
- service.yaml
- ingress.yaml
helmCharts:
- name: yacy
repo: https://gt.hexor.cy/api/packages/ab/helm
version: 0.1.2
releaseName: yacy
namespace: n8n
valuesFile: values-yacy.yaml
includeCRDs: true
commonLabels:
app.kubernetes.io/name: n8n

View File

@@ -1,43 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: paddleocr
labels:
app: paddleocr
component: n8n
spec:
replicas: 1
selector:
matchLabels:
app: paddleocr
component: n8n
template:
metadata:
labels:
app: paddleocr
component: n8n
spec:
containers:
- name: paddleocr
image: c403/paddleocr
ports:
- containerPort: 5000
name: http
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1000m
memory: 2Gi
livenessProbe:
tcpSocket:
port: 5000
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
tcpSocket:
port: 5000
initialDelaySeconds: 30
periodSeconds: 10

View File

@@ -1,18 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: paddleocr
labels:
app: paddleocr
component: n8n
spec:
selector:
app: paddleocr
component: n8n
ports:
- name: http
port: 80
targetPort: 5000
protocol: TCP
type: ClusterIP

View File

@@ -1,45 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: n8n
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: n8n-clusterrole
rules:
# Core API group ("")
- apiGroups: [""]
resources: ["*"]
verbs: ["get", "list", "watch"]
# Common built-in API groups
- apiGroups: ["apps", "batch", "autoscaling", "extensions", "policy"]
resources: ["*"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io", "rbac.authorization.k8s.io", "apiextensions.k8s.io"]
resources: ["*"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io", "discovery.k8s.io", "events.k8s.io"]
resources: ["*"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io", "admissionregistration.k8s.io", "authentication.k8s.io", "authorization.k8s.io"]
resources: ["*"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: n8n-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: n8n-clusterrole
subjects:
- kind: ServiceAccount
name: n8n
namespace: n8n

View File

@@ -1,57 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-redis
labels:
app: redis
component: n8n
spec:
replicas: 1
selector:
matchLabels:
app: redis
component: n8n
template:
metadata:
labels:
app: redis
component: n8n
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
name: redis
command:
- redis-server
- --appendonly
- "yes"
- --save
- "900 1"
volumeMounts:
- name: redis-data
mountPath: /data
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
livenessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
exec:
command:
- redis-cli
- ping
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: redis-data
emptyDir: {}

View File

@@ -1,18 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: n8n-redis
labels:
app: redis
component: n8n
spec:
selector:
app: redis
component: n8n
ports:
- name: redis
port: 6379
targetPort: 6379
protocol: TCP
type: ClusterIP

View File

@@ -1,21 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: n8n
labels:
app: n8n
spec:
selector:
app: n8n
component: main
ports:
- name: http
port: 80
targetPort: 5678
protocol: TCP
- name: task-broker
port: 5679
targetPort: 5679
protocol: TCP
type: ClusterIP

View File

@@ -1,24 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: n8n-data
spec:
accessModes:
- ReadWriteMany
storageClassName: longhorn
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: n8n-tools
spec:
accessModes:
- ReadWriteMany
storageClassName: longhorn
resources:
requests:
storage: 20Gi

View File

@@ -1,24 +0,0 @@
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
resources:
limits:
memory: 2Gi
requests:
memory: 1Gi
persistence:
enabled: true
size: 10Gi
yacy:
network:
mode: "intranet"
config:
network.unit.bootstrap.seedlist: ""
network.unit.remotecrawl: "false"
network.unit.dhtredundancy.junior: "1"
network.unit.dhtredundancy.senior: "1"
index.receive.allow: "false"
index.distribute.allow: "false"
crawl.response.timeout: "10000"

View File

@@ -3,23 +3,19 @@ kind: Kustomization
resources:
- external-secrets.yaml
- local-pv.yaml
helmCharts:
- name: ollama
repo: https://otwld.github.io/ollama-helm/
version: 1.49.0
version: 0.4.0
releaseName: ollama
namespace: ollama
valuesFile: ollama-values.yaml
includeCRDs: true
- name: open-webui
repo: https://helm.openwebui.com/
version: 12.8.1
version: 8.14.0
releaseName: openweb-ui
namespace: ollama
valuesFile: openweb-ui-values.yaml
includeCRDs: true
patches:
- path: patch-runtimeclass.yaml
includeCRDs: true

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: ollama-local-pv
spec:
capacity:
storage: 100Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
local:
path: /var/lib/ollama
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- uk-desktop.tail2fe2d.ts.net

View File

@@ -3,20 +3,6 @@ image:
pullPolicy: Always
tag: "latest"
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: false
ollama:
gpu:
enabled: true
type: 'nvidia'
number: 1
persistentVolume:
enabled: true
size: 100Gi
storageClass: "local-path"

View File

@@ -1,4 +1,4 @@
clusterDomain: cluster.local
clusterDomain: ai.hexor.cy
extraEnvVars:
GLOBAL_LOG_LEVEL: debug
@@ -32,22 +32,12 @@ ollama:
pipelines:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
tika:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
websocket:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
redis:
master:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: true
@@ -56,5 +46,7 @@ ingress:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
host: "ai.hexor.cy"
tls: true
existingSecret: ollama-tls
tls:
- hosts:
- '*.hexor.cy'
secretName: ollama-tls

View File

@@ -1,9 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
namespace: ollama
spec:
template:
spec:
runtimeClassName: nvidia

View File

@@ -4,7 +4,6 @@ kind: Kustomization
resources:
- app.yaml
- external-secrets.yaml
- paperless-ai.yaml
helmCharts:
- name: paperless-ngx
@@ -28,11 +27,4 @@ helmCharts:
namespace: paperless
valuesFile: gotenberg-values.yaml
includeCRDs: true
#- name: redis
# repo: oci://registry-1.docker.io/bitnamicharts/redis
# version: 24.1.0
# releaseName: redis
# namespace: paperless
# includeCRDs: true
# valuesFile: bazarr-values.yaml

View File

@@ -1,101 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: paperless-ai
labels:
app: paperless-ai
spec:
replicas: 1
selector:
matchLabels:
app: paperless-ai
template:
metadata:
labels:
app: paperless-ai
spec:
nodeSelector:
kubernetes.io/hostname: nas.homenet
containers:
- name: paperless-ai
image: clusterzx/paperless-ai:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
name: http
env:
- name: NODE_ENV
value: production
- name: PAPERLESS_AI_PORT
value: "3000"
resources:
requests:
memory: 512Mi
cpu: 500m
limits:
memory: 1024Mi
cpu: 2000m
#livenessProbe:
# httpGet:
# path: /
# port: 8000
# initialDelaySeconds: 30
# periodSeconds: 10
#readinessProbe:
# httpGet:
# path: /
# port: 8000
# initialDelaySeconds: 5
# periodSeconds: 5
volumeMounts:
- name: data
mountPath: /app/data
volumes:
- name: data
hostPath:
path: /mnt/storage/Storage/k8s/paperless/ai-data
type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
name: paperless-ai
namespace: paperless
labels:
app: paperless-ai
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app: paperless-ai
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: paperless-ai-ingress
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: ai-docs.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: paperless-ai
port:
number: 3000
tls:
- secretName: docs-tls
hosts:
- '*.hexor.cy'

View File

@@ -1,5 +1,5 @@
image:
tag: latest
tag: 2.19.3
resources:
requests:
memory: "1Gi"
@@ -9,7 +9,7 @@ resources:
cpu: "3000m"
initContainers:
install-tesseract-langs:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
image: ghcr.io/paperless-ngx/paperless-ngx:2.18.2
resources:
requests:
memory: "256Mi"
@@ -107,8 +107,6 @@ persistence:
- path: /usr/src/paperless/consume
redis:
enabled: true
image:
tag: latest
master:
nodeSelector:
kubernetes.io/hostname: nas.homenet

View File

@@ -112,8 +112,47 @@ spec:
- name: scripts
mountPath: /scripts
containers:
- name: xray-exporter
image: alpine:3.18
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- /scripts/exporter-start.sh
ports:
- name: metrics
containerPort: 9550
protocol: TCP
livenessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "150m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: true
- name: scripts
mountPath: /scripts
- name: pasarguard-node
image: 'pasarguard/node:v0.2.1'
image: 'pasarguard/node:v0.1.1'
imagePullPolicy: Always
command:
- /bin/sh
@@ -162,56 +201,16 @@ spec:
resources:
requests:
memory: "128Mi"
#cpu: "500m"
cpu: "100m"
limits:
memory: "512Mi"
#cpu: "1200m"
cpu: "750m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: false
- name: scripts
mountPath: /scripts
- name: xray-exporter
image: alpine:3.18
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- /scripts/exporter-start.sh
ports:
- name: metrics
containerPort: 9550
protocol: TCP
livenessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /scrape
port: metrics
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "500m"
volumeMounts:
- name: shared-data
mountPath: /shared
readOnly: true
- name: scripts
mountPath: /scripts
volumes:
- name: shared-data
emptyDir: {}

View File

@@ -34,7 +34,7 @@ spec:
mountPath: /templates/subscription
containers:
- name: pasarguard-web
image: 'pasarguard/panel:latest'
image: 'pasarguard/panel:v1.7.2'
imagePullPolicy: Always
envFrom:
- secretRef:

View File

@@ -8,5 +8,3 @@ resources:
- ./subscription-page-configmap.yaml
- ./subscription-page-deployment.yaml
- ./servicemonitor.yaml
- ./user-ui-ingress.yaml
- ./panel-ingress.yaml

View File

@@ -1,37 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: panel-ui
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: rw.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave
port:
number: 3000
- host: rw.hexor.ru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave
port:
number: 3000
tls:
- secretName: remnawave-panel-tls
hosts:
- rw.hexor.cy
- rw.hexor.ru

View File

@@ -7,7 +7,7 @@ metadata:
app: remnawave-subscription-page
data:
APP_PORT: "3010"
REMNAWAVE_PANEL_URL: "https://rw.hexor.cy"
REMNAWAVE_PANEL_URL: "http://remnawave.remnawave.svc:3000"
META_TITLE: "RemnaWave Subscription"
META_DESCRIPTION: "Your VPN subscription portal"
META_KEYWORDS: "vpn,subscription,remnawave"

View File

@@ -28,6 +28,22 @@ spec:
- name: http
containerPort: 3010
protocol: TCP
livenessProbe:
httpGet:
path: /
port: 3010
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /
port: 3010
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 3
resources:
requests:
memory: "64Mi"

View File

@@ -1,37 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: user-ui
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: sub.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave-subscription-page
port:
number: 3010
- host: sub.hexor.ru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: remnawave-subscription-page
port:
number: 3010
tls:
- secretName: remnawave-user-ui-tls
hosts:
- sub.hexor.cy
- sub.hexor.ru

View File

@@ -16,18 +16,18 @@ helmCharts:
valuesFile: syncthing-master.yaml
includeCRDs: true
- name: syncthing
repo: https://k8s-home-lab.github.io/helm-charts
version: 4.0.0
releaseName: syncthing-khv
namespace: syncthing
valuesFile: syncthing-khv.yaml
includeCRDs: true
- name: syncthing
repo: https://k8s-home-lab.github.io/helm-charts
version: 4.0.0
releaseName: syncthing-nas
namespace: syncthing
valuesFile: syncthing-nas.yaml
includeCRDs: true
# - name: syncthing
# repo: https://k8s-home-lab.github.io/helm-charts
# version: 4.0.0
# releaseName: syncthing-khv
# namespace: syncthing
# valuesFile: syncthing-khv.yaml
# includeCRDs: true
includeCRDs: true

View File

@@ -1,3 +1,4 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -22,7 +23,7 @@ spec:
kubernetes.io/hostname: home.homenet
containers:
- name: desubot
image: "ultradesu/desubot:latest"
image: 'ultradesu/desubot:latest'
imagePullPolicy: Always
envFrom:
- secretRef:
@@ -31,11 +32,11 @@ spec:
- name: RUST_LOG
value: "info"
volumeMounts:
- mountPath: /storage
name: storage
- mountPath: /storage
name: storage
volumes:
- name: storage
persistentVolumeClaim:
claimName: desubot-storage
readOnly: false
nfs:
server: nas.homenet
path: /mnt/storage/Storage/k8s/desubot/
readOnly: false

View File

@@ -30,7 +30,7 @@ spec:
name: get-id-bot
env:
- name: RUST_LOG
value: "info,teloxide::error_handlers=off"
value: "info"

View File

@@ -7,6 +7,3 @@ resources:
- get-id-bot.yaml
- external-secrets.yaml
- desubot.yaml
- restart-job.yaml
- storage.yaml

View File

@@ -1,56 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tg-bots-restart-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tg-bots-restart-role
rules:
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tg-bots-restart-rb
subjects:
- kind: ServiceAccount
name: tg-bots-restart-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: tg-bots-restart-role
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: tg-bots-daily-restart
spec:
schedule: "0 4 * * *" # every day at 04:00
jobTemplate:
spec:
template:
spec:
serviceAccountName: tg-bots-restart-sa
restartPolicy: OnFailure
containers:
- name: kubectl
image: bitnami/kubectl:latest
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /bin/sh
- -c
- |
kubectl -n "$POD_NAMESPACE" rollout restart deployment/desubot
kubectl -n "$POD_NAMESPACE" rollout restart deployment/get-id-bot

View File

@@ -1,12 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: desubot-storage
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-csi
resources:
requests:
storage: 200Gi

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: xandikos
namespace: argocd
spec:
project: apps
destination:
namespace: xandikos
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/xandikos
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,70 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: xandikos
labels:
app: xandikos
spec:
selector:
matchLabels:
app: xandikos
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: xandikos
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
volumes:
- name: storage
hostPath:
path: /k8s/xandikos
type: Directory
containers:
- name: xandikos
image: ghcr.io/jelmer/xandikos:latest
imagePullPolicy: Always
command:
- "python3"
- "-m"
- "xandikos.web"
- "--port=8081"
- "-d/data"
- "--defaults"
- "--listen-address=0.0.0.0"
- "--route-prefix=/dav"
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /
port: 8081
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8081
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
ports:
- name: http
containerPort: 8081
protocol: TCP
volumeMounts:
- name: storage
mountPath: /data

View File

@@ -1,31 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: mmdl-secrets
spec:
target:
name: mmdl-secrets
deletionPolicy: Delete
template:
type: Opaque
data:
DB_DIALECT: 'postgres'
DB_HOST: psql.psql.svc
DB_USER: mmdl
DB_NAME: mmdl
DB_PORT: "5432"
DB_PASS: |-
{{ .pg_pass }}
AES_PASSWORD: |-
{{ .pg_pass }}
data:
- secretKey: pg_pass
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[12].value

View File

@@ -1,47 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: xandikos
annotations:
ingressClassName: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
rules:
- host: cal.hexor.cy
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mmdl
port:
number: 3000
- path: /dav
pathType: Prefix
backend:
service:
name: xandikos
port:
number: 8081
- path: /.well-known/carddav
pathType: Exact
backend:
service:
name: xandikos
port:
number: 8081
- path: /.well-known/caldav
pathType: Exact
backend:
service:
name: xandikos
port:
number: 8081
tls:
- secretName: xandikos-tls
hosts:
- cal.hexor.cy

View File

@@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- mmdl-deployment.yaml
- mmdl-service.yaml
- ingress.yaml
- external-secrets.yaml

View File

@@ -1,61 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mmdl
labels:
app: mmdl
spec:
selector:
matchLabels:
app: mmdl
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: mmdl
spec:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: mmdl
image: intriin/mmdl:latest
imagePullPolicy: Always
envFrom:
- secretRef:
name: mmdl-secrets
env:
- name: NEXTAUTH_URL
value: "https://cal.hexor.cy"
- name: CALDAV_SERVER_URL
value: "https://cal.hexor.cy/dav"
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
ports:
- name: http
containerPort: 3000
protocol: TCP

View File

@@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: mmdl
spec:
selector:
app: mmdl
type: ClusterIP
ports:
- name: http
port: 3000
protocol: TCP
targetPort: 3000

View File

@@ -1,16 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: xandikos
labels:
app: xandikos
spec:
selector:
app: xandikos
ports:
- protocol: TCP
port: 8081
targetPort: 8081
name: http
type: ClusterIP

View File

@@ -18,5 +18,4 @@ spec:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@@ -47,20 +47,3 @@ spec:
server: https://kubernetes.default.svc
sourceRepos:
- ssh://git@gt.hexor.cy:30022/ab/homelab.git
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: desktop
namespace: argocd
spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
description: Hexor Home Lab Desktop Apps
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
sourceRepos:
- ssh://git@gt.hexor.cy:30022/ab/homelab.git

View File

@@ -23,9 +23,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 1062e5b4-5380-49f1-97c3-340f26f3487e
property: fields[0].value
- secretKey: client_secret
@@ -34,9 +31,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 1062e5b4-5380-49f1-97c3-340f26f3487e
property: fields[1].value

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: argo-cd
repo: https://argoproj.github.io/argo-helm
version: 9.4.10
version: 9.1.4
releaseName: argocd
namespace: argocd
valuesFile: values.yaml

View File

@@ -2,7 +2,7 @@
global:
domain: ag.hexor.cy
nodeSelector: &nodeSelector
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
logging:
format: text
@@ -28,9 +28,8 @@ configs:
issuer: https://idm.hexor.cy/application/o/argocd/
clientID: $oidc-creds:id
clientSecret: $oidc-creds:secret
requestedScopes: ["openid", "profile", "email", "groups", "offline_access"]
requestedScopes: ["openid", "profile", "email", "groups"]
requestedIDTokenClaims: {"groups": {"essential": true}}
refreshTokenThreshold: 2m
rbac:
create: true
policy.default: ""
@@ -56,15 +55,15 @@ configs:
controller:
replicas: 1
nodeSelector:
<<: *nodeSelector
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# Dex OIDC provider
dex:
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
enabled: false
# Standard Redis disabled because Redis HA is enabled
@@ -87,7 +86,7 @@ redis-ha:
server:
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: false
@@ -100,11 +99,8 @@ server:
# Repository Server
repoServer:
replicas: 1
livenessProbe:
timeoutSeconds: 10
periodSeconds: 60
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# ApplicationSet Controller
@@ -112,7 +108,7 @@ applicationSet:
enabled: true # Enabled by default
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add resources (requests/limits), PDB etc. if needed
# Notifications Controller
@@ -120,5 +116,5 @@ notifications:
enabled: true # Enabled by default
replicas: 1
nodeSelector:
<<: *nodeSelector
kubernetes.io/hostname: master.tail2fe2d.ts.net
# Add notifiers, triggers, templates configurations if needed

View File

@@ -18,4 +18,4 @@ spec:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@@ -19,14 +19,6 @@ spec:
{{ .password }}
AUTHENTIK_SECRET_KEY: |-
{{ .secret_key }}
POSTGRES_PASSWORD: |-
{{ .password }}
POSTGRES_USER: |-
{{ .username }}
username: |-
{{ .password }}
password: |-
{{ .username }}
data:
- secretKey: password
sourceRef:
@@ -34,9 +26,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 279c2c1f-c147-4b6b-a511-36c3cd764f9d
property: login.password
- secretKey: username
@@ -45,9 +34,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 279c2c1f-c147-4b6b-a511-36c3cd764f9d
property: login.username
- secretKey: secret_key
@@ -56,9 +42,6 @@ spec:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 279c2c1f-c147-4b6b-a511-36c3cd764f9d
property: fields[0].value

View File

@@ -5,12 +5,12 @@ resources:
- app.yaml
- external-secrets.yaml
- https-middleware.yaml
# - worker-restart.yaml
- worker-restart.yaml
helmCharts:
- name: authentik
repo: https://charts.goauthentik.io
version: 2026.2.1
version: 2025.10.1
releaseName: authentik
namespace: authentik
valuesFile: values.yaml

View File

@@ -1,6 +1,8 @@
global:
image:
tag: "2026.2.1"
tag: "2025.10.1"
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
authentik:
error_reporting:
@@ -13,35 +15,14 @@ worker:
envFrom:
- secretRef:
name: authentik-creds
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
# sizeLimit: 512Mi
# volumeMounts:
# - name: dshm
# mountPath: /dev/shm
# livenessProbe:
# exec:
# command: ["/bin/sh", "-c", "kill -0 1"]
# initialDelaySeconds: 5
# periodSeconds: 10
# failureThreshold: 3
# timeoutSeconds: 3
# readinessProbe:
# exec:
# command: ["/bin/sh", "-c", "kill -0 1"]
# initialDelaySeconds: 5
# periodSeconds: 10
# failureThreshold: 3
# timeoutSeconds: 3
# startupProbe:
# exec:
# command: ["/bin/sh", "-c", "kill -0 1"]
# initialDelaySeconds: 30
# periodSeconds: 10
# failureThreshold: 60
# timeoutSeconds: 3
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 512Mi
volumeMounts:
- name: dshm
mountPath: /dev/shm
server:
envFrom:
- secretRef:
@@ -66,11 +47,11 @@ server:
- minecraft.hexor.cy # Minecraft UI and server
- pass.hexor.cy # k8s-secret for openai
- ps.hexor.cy # pasarguard UI
# - rw.hexor.cy # RemnaWave UI
- rw.hexor.cy # RemnaWave UI
tls:
- secretName: idm-tls
hosts:
- '*.hexor.cy'
redis:
enabled: false
enabled: true

View File

@@ -35,6 +35,5 @@ spec:
key: secretKey
selector:
dnsZones:
- "ps.hexor.cy"
- "of.hexor.cy"

View File

@@ -1,6 +1,6 @@
FROM debian:sid
ENV BW_CLI_VERSION=2025.12.1
ENV BW_CLI_VERSION=2025.5.0
RUN apt update && \
apt install -y wget unzip && \

View File

@@ -37,15 +37,15 @@ spec:
kubernetes.io/hostname: master.tail2fe2d.ts.net
containers:
- name: bitwarden-cli
image: ultradesu/bitwarden-client:2025.12.1
image: ultradesu/bitwarden-client:2025.5.0
imagePullPolicy: Always
resources:
requests:
memory: "128Mi"
cpu: "300m"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "1000m"
cpu: "500m"
env:
- name: BW_HOST
valueFrom:

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: gpu-system
namespace: argocd
spec:
project: core
destination:
namespace: gpu-system
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/core/gpu
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,15 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
- runtime-class.yaml
helmCharts:
- name: nvidia-device-plugin
repo: https://nvidia.github.io/k8s-device-plugin
version: 0.17.0
releaseName: nvidia-device-plugin
namespace: gpu-system
valuesFile: values.yaml
includeCRDs: true

View File

@@ -1,5 +0,0 @@
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: nvidia
handler: nvidia

View File

@@ -1,23 +0,0 @@
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
runtimeClassName: nvidia
setAsDefault: false
config:
default: any
map:
any: |-
version: v1
sharing:
timeSlicing:
resources:
- name: nvidia.com/gpu
replicas: 4

View File

@@ -3,15 +3,5 @@ kind: Kustomization
resources:
- app.yaml
- nfs-storage.yaml
- coredns-internal-resolve.yaml
helmCharts:
- name: csi-driver-nfs
repo: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
version: 4.12.0
releaseName: csi-driver-nfs
namespace: kube-system
#valuesFile: values.yaml
includeCRDs: true

View File

@@ -1,20 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
server: nas.homenet
share: /mnt/storage/Storage/PVC
reclaimPolicy: Retain
volumeBindingMode: Immediate
mountOptions:
- nfsvers=4.1
- rsize=1048576
- wsize=1048576
- timeo=14
- intr
- bg
- soft
- noatime

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: longhorn
namespace: argocd
spec:
project: core
destination:
namespace: longhorn
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/core/longhorn
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,15 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
#resources:
# - app.yaml
helmCharts:
- name: longhorn
repo: https://charts.longhorn.io
version: 1.11.0
releaseName: longhorn
namespace: longhorn
valuesFile: values.yaml
includeCRDs: true

View File

@@ -1,4 +0,0 @@
longhornUI:
replicas: 1
persistence:
reclaimPolicy: "Retain"

View File

@@ -121,12 +121,6 @@ spec:
{{ .pasarguard }}
USER_remnawave: |-
{{ .remnawave }}
USER_umami: |-
{{ .umami }}
USER_mmdl: |-
{{ .mmdl }}
USER_n8n: |-
{{ .n8n }}
data:
- secretKey: authentik
sourceRef:
@@ -238,37 +232,3 @@ spec:
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[10].value
- secretKey: umami
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[11].value
- secretKey: mmdl
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[12].value
- secretKey: n8n
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
key: 2a9deb39-ef22-433e-a1be-df1555625e22
property: fields[13].value

View File

@@ -28,7 +28,7 @@ spec:
type: DirectoryOrCreate
containers:
- name: psql
image: 'bitnamilegacy/postgresql:17'
image: 'bitnami/postgresql:17'
env:
- name: POSTGRESQL_PASSWORD
valueFrom:
@@ -63,7 +63,7 @@ spec:
containerPort: 9187
protocol: TCP
- name: user-creation
image: 'bitnamilegacy/postgresql:17'
image: 'bitnami/postgresql:17'
command:
- /bin/bash
- -c

Some files were not shown because too many files have changed in this diff Show More