Compare commits

..

1 Commits

Author SHA1 Message Date
Gitea Actions Bot
7b0e1a297f Auto-update README with current k8s applications
All checks were successful
Terraform / Terraform (pull_request) Successful in 19s
Generated by CI/CD workflow on 2026-02-11 10:13:04

This PR updates the README.md file with the current list of applications found in the k8s/ directory structure.
2026-02-11 10:13:04 +00:00
37 changed files with 117 additions and 817 deletions

View File

@@ -16,7 +16,6 @@ ArgoCD homelab project
| **authentik** | [![authentik](https://ag.hexor.cy/api/badge?name=authentik&revision=true)](https://ag.hexor.cy/applications/argocd/authentik) |
| **cert-manager** | [![cert-manager](https://ag.hexor.cy/api/badge?name=cert-manager&revision=true)](https://ag.hexor.cy/applications/argocd/cert-manager) |
| **external-secrets** | [![external-secrets](https://ag.hexor.cy/api/badge?name=external-secrets&revision=true)](https://ag.hexor.cy/applications/argocd/external-secrets) |
| **gpu** | [![gpu](https://ag.hexor.cy/api/badge?name=gpu&revision=true)](https://ag.hexor.cy/applications/argocd/gpu) |
| **kube-system-custom** | [![kube-system-custom](https://ag.hexor.cy/api/badge?name=kube-system-custom&revision=true)](https://ag.hexor.cy/applications/argocd/kube-system-custom) |
| **kubernetes-dashboard** | [![kubernetes-dashboard](https://ag.hexor.cy/api/badge?name=kubernetes-dashboard&revision=true)](https://ag.hexor.cy/applications/argocd/kubernetes-dashboard) |
| **longhorn** | [![longhorn](https://ag.hexor.cy/api/badge?name=longhorn&revision=true)](https://ag.hexor.cy/applications/argocd/longhorn) |
@@ -38,7 +37,6 @@ ArgoCD homelab project
| Application | Status |
| :--- | :---: |
| **comfyui** | [![comfyui](https://ag.hexor.cy/api/badge?name=comfyui&revision=true)](https://ag.hexor.cy/applications/argocd/comfyui) |
| **gitea** | [![gitea](https://ag.hexor.cy/api/badge?name=gitea&revision=true)](https://ag.hexor.cy/applications/argocd/gitea) |
| **greece-notifier** | [![greece-notifier](https://ag.hexor.cy/api/badge?name=greece-notifier&revision=true)](https://ag.hexor.cy/applications/argocd/greece-notifier) |
| **hexound** | [![hexound](https://ag.hexor.cy/api/badge?name=hexound&revision=true)](https://ag.hexor.cy/applications/argocd/hexound) |

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: comfyui
namespace: argocd
spec:
project: apps
destination:
namespace: comfyui
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/apps/comfyui
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,61 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: comfyui
namespace: comfyui
labels:
app: comfyui
spec:
replicas: 1
selector:
matchLabels:
app: comfyui
template:
metadata:
labels:
app: comfyui
spec:
runtimeClassName: nvidia
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
# Fix permissions mismatch usually happening when mapping host paths
securityContext:
runAsUser: 0
initContainers:
- name: create-data-dir
image: busybox
command: ["sh", "-c", "mkdir -p /host.data && chown -R 1000:1000 /host.data"]
volumeMounts:
- name: data
mountPath: /host.data
containers:
- name: comfyui
image: ghcr.io/ai-dock/comfyui:latest-cuda
imagePullPolicy: IfNotPresent
env:
- name: COMFYUI_FLAGS
value: "--listen 0.0.0.0"
- name: COMFYUI_PORT_LOCAL
value: "8188"
- name: COMFYUI_PORT_HOST
value: "8189"
ports:
- containerPort: 8188
name: http
protocol: TCP
resources:
limits:
nvidia.com/gpu: 1
volumeMounts:
- name: data
# For ai-dock images, /workspace is the persistent user directory
mountPath: /workspace
volumes:
- name: data
persistentVolumeClaim:
claimName: comfyui-data-pvc

View File

@@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- local-pv.yaml
- pvc.yaml
- deployment.yaml
- service.yaml

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: comfyui-data-pv
spec:
capacity:
storage: 200Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
local:
path: /data/comfyui
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- uk-desktop.tail2fe2d.ts.net

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: comfyui

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: comfyui-data-pvc
namespace: comfyui
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 200Gi

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: comfyui
namespace: comfyui
labels:
app: comfyui
spec:
ports:
- name: http
port: 8188
targetPort: 8188
protocol: TCP
selector:
app: comfyui

View File

@@ -1,53 +0,0 @@
FROM --platform=$BUILDPLATFORM debian:bookworm-slim AS builder
ARG TARGETARCH
RUN apt-get update && apt-get install -y \
git curl make gcc libssl-dev zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
RUN if [ "$(dpkg --print-architecture)" != "$TARGETARCH" ]; then \
dpkg --add-architecture $TARGETARCH && \
apt-get update && \
case "$TARGETARCH" in \
arm64) apt-get install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 ;; \
amd64) apt-get install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 ;; \
esac && \
rm -rf /var/lib/apt/lists/*; \
fi
RUN git clone https://github.com/TelegramMessenger/MTProxy.git /src
WORKDIR /src
RUN NATIVE=$(dpkg --print-architecture) && \
if [ "$NATIVE" != "$TARGETARCH" ]; then \
case "$TARGETARCH" in \
arm64) export CC=aarch64-linux-gnu-gcc ;; \
amd64) export CC=x86_64-linux-gnu-gcc ;; \
esac; \
fi && \
make -j$(nproc)
FROM debian:bookworm-slim
ENV PROXY_PORT=30443
ENV STATS_PORT=8888
ENV WORKERS=1
ENV RUN_USER=nobody
RUN apt-get update && apt-get install -y \
curl libssl3 zlib1g xxd \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /src/objs/bin/mtproto-proxy /usr/local/bin/mtproto-proxy
RUN curl -s https://core.telegram.org/getProxySecret -o /etc/mtproxy/proxy-secret --create-dirs && \
curl -s https://core.telegram.org/getProxyConfig -o /etc/mtproxy/proxy-multi.conf
ENTRYPOINT mtproto-proxy \
-u ${RUN_USER} \
-p ${STATS_PORT} \
-H ${PROXY_PORT} \
-M ${WORKERS} \
--aes-pwd /etc/mtproxy/proxy-secret \
/etc/mtproxy/proxy-multi.conf

View File

@@ -1,117 +0,0 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: mtproxy
labels:
app: mtproxy
spec:
selector:
matchLabels:
app: mtproxy
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: mtproxy
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: mtproxy
operator: Exists
serviceAccountName: mtproxy
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: register-proxy
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SECRET
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: SECRET
- name: PORT
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: PORT
volumeMounts:
- name: data
mountPath: /data
command:
- /bin/bash
- -c
- |
set -e
curl -s https://core.telegram.org/getProxySecret -o /data/proxy-secret
curl -s https://core.telegram.org/getProxyConfig -o /data/proxy-multi.conf
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
SERVER=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.mtproxy}')
if [ -z "${SERVER}" ]; then
echo "ERROR: node ${NODE_NAME} has no mtproxy label"
exit 1
fi
LINK="tg://proxy?server=${SERVER}&port=${PORT}&secret=${SECRET}"
echo "Registering: ${SERVER} -> ${LINK}"
if kubectl get secret mtproxy-links -n "${NAMESPACE}" &>/dev/null; then
kubectl patch secret mtproxy-links -n "${NAMESPACE}" \
--type merge -p "{\"stringData\":{\"${SERVER}\":\"${LINK}\"}}"
else
kubectl create secret generic mtproxy-links -n "${NAMESPACE}" \
--from-literal="${SERVER}=${LINK}"
fi
echo "Done"
containers:
- name: mtproxy
image: telegrammessenger/proxy:latest
# image: ultradesu/mtproxy:v0.02
imagePullPolicy: Always
ports:
- name: proxy
containerPort: 30443
protocol: TCP
command:
- /bin/sh
- -c
- >-
mtproto-proxy
-u nobody
-p 8888
-H $(PORT)
-M 1
-S $(SECRET)
--aes-pwd /data/proxy-secret
/data/proxy-multi.conf
env:
- name: SECRET
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: SECRET
- name: PORT
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: PORT
volumeMounts:
- name: data
mountPath: /data
#resources:
# requests:
# memory: "128Mi"
# cpu: "100m"
# limits:
# memory: "256Mi"
# cpu: "500m"
volumes:
- name: data
emptyDir: {}

View File

@@ -0,0 +1,49 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mtproxy
labels:
app: mtproxy
spec:
replicas: 1
selector:
matchLabels:
app: mtproxy
template:
metadata:
labels:
app: mtproxy
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: xray-node-address
operator: Exists
containers:
- name: mtproxy
image: telegrammessenger/proxy:latest
imagePullPolicy: Always
ports:
- name: proxy
containerPort: 443
protocol: TCP
env:
- name: SECRET
value: "00baadf00d15abad1deaa51sbaadcafe"
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
volumes:
- name: data
persistentVolumeClaim:
claimName: mtproxy-data

View File

@@ -1,25 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tgproxy-secret
spec:
target:
name: tgproxy-secret
deletionPolicy: Delete
template:
type: Opaque
data:
SECRET: |-
{{ .secret }}
PORT: "30443"
data:
- secretKey: secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 58a37daf-72d8-430d-86bd-6152aa8f888d
property: fields[0].value

View File

@@ -3,9 +3,6 @@ kind: Kustomization
resources:
- ./app.yaml
- ./rbac.yaml
- ./daemonset.yaml
- ./external-secrets.yaml
- ./deployment.yaml
- ./service.yaml
- ./secret-reader.yaml
# - ./storage.yaml
- ./storage.yaml

View File

@@ -1,58 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: mtproxy
labels:
app: mtproxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: mtproxy-node-reader
labels:
app: mtproxy
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: mtproxy-node-reader
labels:
app: mtproxy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: mtproxy-node-reader
subjects:
- kind: ServiceAccount
name: mtproxy
namespace: mtproxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mtproxy-secret-manager
labels:
app: mtproxy
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mtproxy-secret-manager
labels:
app: mtproxy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: mtproxy-secret-manager
subjects:
- kind: ServiceAccount
name: mtproxy

View File

@@ -1,63 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: secret-reader
labels:
app: secret-reader
spec:
replicas: 1
selector:
matchLabels:
app: secret-reader
template:
metadata:
labels:
app: secret-reader
spec:
serviceAccountName: mtproxy
nodeSelector:
kubernetes.io/os: linux
containers:
- name: secret-reader
image: ultradesu/k8s-secrets:0.2.1
imagePullPolicy: Always
args:
- "--secrets"
- "mtproxy-links"
- "--namespace"
- "mtproxy"
- "--port"
- "3000"
ports:
- containerPort: 3000
name: http
env:
- name: RUST_LOG
value: "info"
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "150m"
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 5
periodSeconds: 5
securityContext:
runAsNonRoot: true
runAsUser: 1000
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL

View File

@@ -2,15 +2,13 @@
apiVersion: v1
kind: Service
metadata:
name: secret-reader
labels:
app: secret-reader
name: mtproxy
spec:
type: ClusterIP
type: LoadBalancer
selector:
app: secret-reader
app: mtproxy
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http
- name: proxy
port: 30443
targetPort: 443
protocol: TCP

View File

@@ -50,12 +50,10 @@ spec:
runAsNonRoot: true
containers:
- name: n8n
image: n8nio/n8n:latest
image: docker.n8n.io/n8nio/n8n:latest
ports:
- containerPort: 5678
name: http
- containerPort: 5679
name: task-broker
env:
- name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@@ -75,24 +73,14 @@ spec:
value: "true"
- name: N8N_RUNNERS_MODE
value: "external"
- name: N8N_RUNNERS_BROKER_LISTEN_ADDRESS
value: "0.0.0.0"
- name: N8N_LISTEN_ADDRESS
value: "0.0.0.0"
- name: N8N_RUNNERS_BROKER_PORT
value: "5679"
- name: EXECUTIONS_MODE
value: "queue"
- name: QUEUE_BULL_REDIS_HOST
value: "n8n-redis"
- name: QUEUE_BULL_REDIS_PORT
value: "6379"
- name: NODE_ENV
value: "production"
- name: WEBHOOK_URL
value: "https://n8n.hexor.cy/"
- name: N8N_PROXY_HOPS
value: "1"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
@@ -134,23 +122,23 @@ spec:
memory: 512Mi
limits:
cpu: 4000m
memory: 2048Mi
memory: 2048Gi
livenessProbe:
httpGet:
path: /healthz
port: http
initialDelaySeconds: 240
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
failureThreshold: 10
timeoutSeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /healthz/readiness
port: http
initialDelaySeconds: 120
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 15
failureThreshold: 10
volumes:
- name: n8n-data
persistentVolumeClaim:

View File

@@ -1,87 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-runner
labels:
app: n8n
component: runner
spec:
replicas: 2
selector:
matchLabels:
app: n8n
component: runner
template:
metadata:
labels:
app: n8n
component: runner
spec:
serviceAccountName: n8n
containers:
- name: n8n-runner
image: n8nio/runners:latest
ports:
- containerPort: 5680
name: health
env:
- name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- name: HOME
value: "/home/node"
- name: N8N_RUNNERS_TASK_BROKER_URI
value: "http://n8n:5679"
- name: N8N_RUNNERS_LAUNCHER_LOG_LEVEL
value: "info"
- name: N8N_RUNNERS_MAX_CONCURRENCY
value: "10"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: N8N_RUNNERS_AUTH_TOKEN
valueFrom:
secretKeyRef:
name: credentials
key: runnertoken
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
- name: tools
mountPath: /opt/tools
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2048Mi
livenessProbe:
httpGet:
path: /healthz
port: 5680
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz
port: 5680
initialDelaySeconds: 15
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
- name: tools
persistentVolumeClaim:
claimName: n8n-tools
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -21,21 +21,29 @@ spec:
serviceAccountName: n8n
containers:
- name: n8n-worker
image: n8nio/n8n:latest
command:
- n8n
- worker
image: docker.n8n.io/n8nio/n8n:latest
command: ["n8n", "worker"]
env:
- name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- name: HOME
value: "/home/node"
- name: NODES_EXCLUDE
value: "[]"
- name: N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS
value: "true"
- name: N8N_RUNNERS_ENABLED
value: "true"
- name: N8N_RUNNERS_MODE
value: "external"
- name: N8N_PORT
value: "80"
- name: EXECUTIONS_MODE
value: "queue"
- name: QUEUE_BULL_REDIS_HOST
value: "n8n-redis"
- name: QUEUE_BULL_REDIS_PORT
value: "6379"
- name: N8N_RUNNERS_TASK_BROKER_URI
value: "http://n8n:80"
- name: NODE_ENV
value: "production"
- name: GENERIC_TIMEZONE
@@ -63,20 +71,40 @@ spec:
secretKeyRef:
name: credentials
key: encryptionkey
- name: N8N_RUNNERS_AUTH_TOKEN
valueFrom:
secretKeyRef:
name: credentials
key: runnertoken
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
- name: tools
mountPath: /opt/tools
resources:
requests:
cpu: 500m
cpu: 2000m
memory: 512Mi
limits:
cpu: 2000m
memory: 2048Mi
cpu: 4000m
memory: 2048Gi
livenessProbe:
exec:
command:
- /bin/sh
- -c
- "ps aux | grep '[n]8n worker' || exit 1"
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
- name: tools
persistentVolumeClaim:
claimName: n8n-tools
securityContext:
runAsUser: 1000
runAsGroup: 1000

View File

@@ -7,11 +7,8 @@ resources:
- rbac.yaml
- redis-deployment.yaml
- redis-service.yaml
- paddleocr-deployment.yaml
- paddleocr-service.yaml
- deployment-main.yaml
- deployment-worker.yaml
- deployment-runner.yaml
- service.yaml
- ingress.yaml

View File

@@ -1,43 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: paddleocr
labels:
app: paddleocr
component: n8n
spec:
replicas: 1
selector:
matchLabels:
app: paddleocr
component: n8n
template:
metadata:
labels:
app: paddleocr
component: n8n
spec:
containers:
- name: paddleocr
image: c403/paddleocr
ports:
- containerPort: 5000
name: http
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1000m
memory: 2Gi
livenessProbe:
tcpSocket:
port: 5000
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
tcpSocket:
port: 5000
initialDelaySeconds: 30
periodSeconds: 10

View File

@@ -1,18 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: paddleocr
labels:
app: paddleocr
component: n8n
spec:
selector:
app: paddleocr
component: n8n
ports:
- name: http
port: 80
targetPort: 5000
protocol: TCP
type: ClusterIP

View File

@@ -14,8 +14,4 @@ spec:
port: 80
targetPort: 5678
protocol: TCP
- name: task-broker
port: 5679
targetPort: 5679
protocol: TCP
type: ClusterIP

View File

@@ -3,23 +3,19 @@ kind: Kustomization
resources:
- external-secrets.yaml
- local-pv.yaml
helmCharts:
- name: ollama
repo: https://otwld.github.io/ollama-helm/
version: 1.49.0
version: 0.4.0
releaseName: ollama
namespace: ollama
valuesFile: ollama-values.yaml
includeCRDs: true
- name: open-webui
repo: https://helm.openwebui.com/
version: 12.8.1
version: 8.14.0
releaseName: openweb-ui
namespace: ollama
valuesFile: openweb-ui-values.yaml
includeCRDs: true
patches:
- path: patch-runtimeclass.yaml
includeCRDs: true

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: ollama-local-pv
spec:
capacity:
storage: 100Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
local:
path: /var/lib/ollama
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- uk-desktop.tail2fe2d.ts.net

View File

@@ -3,20 +3,6 @@ image:
pullPolicy: Always
tag: "latest"
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: false
ollama:
gpu:
enabled: true
type: 'nvidia'
number: 1
persistentVolume:
enabled: true
size: 100Gi
storageClass: "local-path"

View File

@@ -1,4 +1,4 @@
clusterDomain: cluster.local
clusterDomain: ai.hexor.cy
extraEnvVars:
GLOBAL_LOG_LEVEL: debug
@@ -32,22 +32,12 @@ ollama:
pipelines:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
tika:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
websocket:
enabled: true
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
redis:
master:
nodeSelector:
kubernetes.io/hostname: master.tail2fe2d.ts.net
ingress:
enabled: true
@@ -56,5 +46,7 @@ ingress:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
host: "ai.hexor.cy"
tls: true
existingSecret: ollama-tls
tls:
- hosts:
- '*.hexor.cy'
secretName: ollama-tls

View File

@@ -1,9 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
namespace: ollama
spec:
template:
spec:
runtimeClassName: nvidia

View File

@@ -1,5 +1,5 @@
image:
tag: latest
tag: 2.20.3
resources:
requests:
memory: "1Gi"
@@ -9,7 +9,7 @@ resources:
cpu: "3000m"
initContainers:
install-tesseract-langs:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
image: ghcr.io/paperless-ngx/paperless-ngx:2.18.2
resources:
requests:
memory: "256Mi"

View File

@@ -19,14 +19,6 @@ spec:
{{ .password }}
AUTHENTIK_SECRET_KEY: |-
{{ .secret_key }}
POSTGRES_PASSWORD: |-
{{ .password }}
POSTGRES_USER: |-
{{ .username }}
username: |-
{{ .password }}
password: |-
{{ .username }}
data:
- secretKey: password
sourceRef:

View File

@@ -5,12 +5,12 @@ resources:
- app.yaml
- external-secrets.yaml
- https-middleware.yaml
# - worker-restart.yaml
- worker-restart.yaml
helmCharts:
- name: authentik
repo: https://charts.goauthentik.io
version: 2026.2.0
version: 2025.10.1
releaseName: authentik
namespace: authentik
valuesFile: values.yaml

View File

@@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: gpu-system
namespace: argocd
spec:
project: core
destination:
namespace: gpu-system
server: https://kubernetes.default.svc
source:
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
targetRevision: HEAD
path: k8s/core/gpu
syncPolicy:
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,15 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app.yaml
- runtime-class.yaml
helmCharts:
- name: nvidia-device-plugin
repo: https://nvidia.github.io/k8s-device-plugin
version: 0.17.0
releaseName: nvidia-device-plugin
namespace: gpu-system
valuesFile: values.yaml
includeCRDs: true

View File

@@ -1,5 +0,0 @@
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: nvidia
handler: nvidia

View File

@@ -1,23 +0,0 @@
nodeSelector:
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
tolerations:
- key: workload
operator: Equal
value: desktop
effect: NoSchedule
runtimeClassName: nvidia
setAsDefault: false
config:
default: any
map:
any: |-
version: v1
sharing:
timeSlicing:
resources:
- name: nvidia.com/gpu
replicas: 4

View File

@@ -152,7 +152,7 @@ data:
var d = "$";
var q = "'";
return d + 'f="' + d + 'env:TEMP\\mc-install.ps1"; iwr -useb https://minecraft.hexor.cy/clients/win-install.ps1 -OutFile '
+ d + 'f; powershell -ExecutionPolicy Bypass -File ' + d + 'f -Username ' + q + username + q + '; Remove-Item ' + d + 'f';
+ d + 'f; & ' + d + 'f -Username ' + q + username + q + '; Remove-Item ' + d + 'f';
}
document.getElementById("showInstallBtn").addEventListener("click", function(e) {

View File

@@ -60,23 +60,7 @@ EOT
create_group = true
access_groups = ["admins"]
}
"mtproxy-links" = {
name = "mtproxy-links"
slug = "mtproxy-links"
group = "Core"
external_host = "https://proxy.hexor.cy"
internal_host = "http://secret-reader.mtproxy.svc:80"
internal_host_ssl_validation = false
meta_description = ""
skip_path_regex = <<-EOT
/webhook
EOT
meta_icon = "https://img.icons8.com/ios-filled/50/password.png"
mode = "proxy"
outpost = "kubernetes-outpost"
create_group = true
access_groups = ["admins"]
}
# Tools applications
"vpn" = {
name = "VPN"