Compare commits

..

1 Commits

Author SHA1 Message Date
Gitea Actions Bot
f79dd14e43 Auto-update README with current k8s applications
All checks were successful
Terraform / Terraform (pull_request) Successful in 17s
Generated by CI/CD workflow on 2026-02-11 10:14:56

This PR updates the README.md file with the current list of applications found in the k8s/ directory structure.
2026-02-11 10:14:56 +00:00
16 changed files with 178 additions and 515 deletions

View File

@@ -1,53 +0,0 @@
FROM --platform=$BUILDPLATFORM debian:bookworm-slim AS builder
ARG TARGETARCH
RUN apt-get update && apt-get install -y \
git curl make gcc libssl-dev zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
RUN if [ "$(dpkg --print-architecture)" != "$TARGETARCH" ]; then \
dpkg --add-architecture $TARGETARCH && \
apt-get update && \
case "$TARGETARCH" in \
arm64) apt-get install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 ;; \
amd64) apt-get install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 ;; \
esac && \
rm -rf /var/lib/apt/lists/*; \
fi
RUN git clone https://github.com/TelegramMessenger/MTProxy.git /src
WORKDIR /src
RUN NATIVE=$(dpkg --print-architecture) && \
if [ "$NATIVE" != "$TARGETARCH" ]; then \
case "$TARGETARCH" in \
arm64) export CC=aarch64-linux-gnu-gcc ;; \
amd64) export CC=x86_64-linux-gnu-gcc ;; \
esac; \
fi && \
make -j$(nproc)
FROM debian:bookworm-slim
ENV PROXY_PORT=30443
ENV STATS_PORT=8888
ENV WORKERS=1
ENV RUN_USER=nobody
RUN apt-get update && apt-get install -y \
curl libssl3 zlib1g xxd \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /src/objs/bin/mtproto-proxy /usr/local/bin/mtproto-proxy
RUN curl -s https://core.telegram.org/getProxySecret -o /etc/mtproxy/proxy-secret --create-dirs && \
curl -s https://core.telegram.org/getProxyConfig -o /etc/mtproxy/proxy-multi.conf
ENTRYPOINT mtproto-proxy \
-u ${RUN_USER} \
-p ${STATS_PORT} \
-H ${PROXY_PORT} \
-M ${WORKERS} \
--aes-pwd /etc/mtproxy/proxy-secret \
/etc/mtproxy/proxy-multi.conf

View File

@@ -1,117 +0,0 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: mtproxy
labels:
app: mtproxy
spec:
selector:
matchLabels:
app: mtproxy
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: mtproxy
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: mtproxy
operator: Exists
serviceAccountName: mtproxy
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: register-proxy
image: bitnami/kubectl:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SECRET
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: SECRET
- name: PORT
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: PORT
volumeMounts:
- name: data
mountPath: /data
command:
- /bin/bash
- -c
- |
set -e
curl -s https://core.telegram.org/getProxySecret -o /data/proxy-secret
curl -s https://core.telegram.org/getProxyConfig -o /data/proxy-multi.conf
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
SERVER=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.mtproxy}')
if [ -z "${SERVER}" ]; then
echo "ERROR: node ${NODE_NAME} has no mtproxy label"
exit 1
fi
LINK="tg://proxy?server=${SERVER}&port=${PORT}&secret=${SECRET}"
echo "Registering: ${SERVER} -> ${LINK}"
if kubectl get secret mtproxy-links -n "${NAMESPACE}" &>/dev/null; then
kubectl patch secret mtproxy-links -n "${NAMESPACE}" \
--type merge -p "{\"stringData\":{\"${SERVER}\":\"${LINK}\"}}"
else
kubectl create secret generic mtproxy-links -n "${NAMESPACE}" \
--from-literal="${SERVER}=${LINK}"
fi
echo "Done"
containers:
- name: mtproxy
image: telegrammessenger/proxy:latest
# image: ultradesu/mtproxy:v0.02
imagePullPolicy: Always
ports:
- name: proxy
containerPort: 30443
protocol: TCP
command:
- /bin/sh
- -c
- >-
mtproto-proxy
-u nobody
-p 8888
-H $(PORT)
-M 1
-S $(SECRET)
--aes-pwd /data/proxy-secret
/data/proxy-multi.conf
env:
- name: SECRET
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: SECRET
- name: PORT
valueFrom:
secretKeyRef:
name: tgproxy-secret
key: PORT
volumeMounts:
- name: data
mountPath: /data
#resources:
# requests:
# memory: "128Mi"
# cpu: "100m"
# limits:
# memory: "256Mi"
# cpu: "500m"
volumes:
- name: data
emptyDir: {}

View File

@@ -0,0 +1,49 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mtproxy
labels:
app: mtproxy
spec:
replicas: 1
selector:
matchLabels:
app: mtproxy
template:
metadata:
labels:
app: mtproxy
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: xray-node-address
operator: Exists
containers:
- name: mtproxy
image: telegrammessenger/proxy:latest
imagePullPolicy: Always
ports:
- name: proxy
containerPort: 443
protocol: TCP
env:
- name: SECRET
value: "00baadf00d15abad1deaa51sbaadcafe"
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
volumes:
- name: data
persistentVolumeClaim:
claimName: mtproxy-data

View File

@@ -1,25 +0,0 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tgproxy-secret
spec:
target:
name: tgproxy-secret
deletionPolicy: Delete
template:
type: Opaque
data:
SECRET: |-
{{ .secret }}
PORT: "30443"
data:
- secretKey: secret
sourceRef:
storeRef:
name: vaultwarden-login
kind: ClusterSecretStore
remoteRef:
key: 58a37daf-72d8-430d-86bd-6152aa8f888d
property: fields[0].value

View File

@@ -3,9 +3,6 @@ kind: Kustomization
resources: resources:
- ./app.yaml - ./app.yaml
- ./rbac.yaml - ./deployment.yaml
- ./daemonset.yaml
- ./external-secrets.yaml
- ./service.yaml - ./service.yaml
- ./secret-reader.yaml - ./storage.yaml
# - ./storage.yaml

View File

@@ -1,58 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: mtproxy
labels:
app: mtproxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: mtproxy-node-reader
labels:
app: mtproxy
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: mtproxy-node-reader
labels:
app: mtproxy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: mtproxy-node-reader
subjects:
- kind: ServiceAccount
name: mtproxy
namespace: mtproxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mtproxy-secret-manager
labels:
app: mtproxy
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mtproxy-secret-manager
labels:
app: mtproxy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: mtproxy-secret-manager
subjects:
- kind: ServiceAccount
name: mtproxy

View File

@@ -1,63 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: secret-reader
labels:
app: secret-reader
spec:
replicas: 1
selector:
matchLabels:
app: secret-reader
template:
metadata:
labels:
app: secret-reader
spec:
serviceAccountName: mtproxy
nodeSelector:
kubernetes.io/os: linux
containers:
- name: secret-reader
image: ultradesu/k8s-secrets:0.2.1
imagePullPolicy: Always
args:
- "--secrets"
- "mtproxy-links"
- "--namespace"
- "mtproxy"
- "--port"
- "3000"
ports:
- containerPort: 3000
name: http
env:
- name: RUST_LOG
value: "info"
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "150m"
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 5
periodSeconds: 5
securityContext:
runAsNonRoot: true
runAsUser: 1000
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL

View File

@@ -2,15 +2,13 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: secret-reader name: mtproxy
labels:
app: secret-reader
spec: spec:
type: ClusterIP type: LoadBalancer
selector: selector:
app: secret-reader app: mtproxy
ports: ports:
- port: 80 - name: proxy
targetPort: 3000 port: 30443
targetPort: 443
protocol: TCP protocol: TCP
name: http

View File

@@ -50,12 +50,10 @@ spec:
runAsNonRoot: true runAsNonRoot: true
containers: containers:
- name: n8n - name: n8n
image: n8nio/n8n:latest image: docker.n8n.io/n8nio/n8n:latest
ports: ports:
- containerPort: 5678 - containerPort: 5678
name: http name: http
- containerPort: 5679
name: task-broker
env: env:
- name: PATH - name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@@ -75,10 +73,6 @@ spec:
value: "true" value: "true"
- name: N8N_RUNNERS_MODE - name: N8N_RUNNERS_MODE
value: "external" value: "external"
- name: N8N_RUNNERS_BROKER_LISTEN_ADDRESS
value: "0.0.0.0"
- name: N8N_RUNNERS_BROKER_PORT
value: "5679"
- name: EXECUTIONS_MODE - name: EXECUTIONS_MODE
value: "queue" value: "queue"
- name: QUEUE_BULL_REDIS_HOST - name: QUEUE_BULL_REDIS_HOST
@@ -133,18 +127,18 @@ spec:
httpGet: httpGet:
path: /healthz path: /healthz
port: http port: http
initialDelaySeconds: 240 initialDelaySeconds: 120
periodSeconds: 30 periodSeconds: 30
timeoutSeconds: 20 timeoutSeconds: 10
failureThreshold: 10 failureThreshold: 6
readinessProbe: readinessProbe:
httpGet: httpGet:
path: /healthz/readiness path: /healthz/readiness
port: http port: http
initialDelaySeconds: 120 initialDelaySeconds: 60
periodSeconds: 10 periodSeconds: 10
timeoutSeconds: 5 timeoutSeconds: 5
failureThreshold: 15 failureThreshold: 10
volumes: volumes:
- name: n8n-data - name: n8n-data
persistentVolumeClaim: persistentVolumeClaim:

View File

@@ -1,87 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-runner
labels:
app: n8n
component: runner
spec:
replicas: 2
selector:
matchLabels:
app: n8n
component: runner
template:
metadata:
labels:
app: n8n
component: runner
spec:
serviceAccountName: n8n
containers:
- name: n8n-runner
image: n8nio/runners:latest
ports:
- containerPort: 5680
name: health
env:
- name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- name: HOME
value: "/home/node"
- name: N8N_RUNNERS_TASK_BROKER_URI
value: "http://n8n:5679"
- name: N8N_RUNNERS_LAUNCHER_LOG_LEVEL
value: "info"
- name: N8N_RUNNERS_MAX_CONCURRENCY
value: "10"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: N8N_RUNNERS_AUTH_TOKEN
valueFrom:
secretKeyRef:
name: credentials
key: runnertoken
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
- name: tools
mountPath: /opt/tools
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2048Mi
livenessProbe:
httpGet:
path: /healthz
port: 5680
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz
port: 5680
initialDelaySeconds: 15
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
- name: tools
persistentVolumeClaim:
claimName: n8n-tools
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -0,0 +1,112 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-worker
labels:
app: n8n
component: worker
spec:
replicas: 2
selector:
matchLabels:
app: n8n
component: worker
template:
metadata:
labels:
app: n8n
component: worker
spec:
serviceAccountName: n8n
containers:
- name: n8n-worker
image: docker.n8n.io/n8nio/n8n:latest
command: ["n8n", "worker"]
env:
- name: PATH
value: "/opt/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- name: HOME
value: "/home/node"
- name: NODES_EXCLUDE
value: "[]"
- name: N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS
value: "true"
- name: N8N_RUNNERS_ENABLED
value: "true"
- name: N8N_RUNNERS_MODE
value: "external"
- name: N8N_PORT
value: "80"
- name: EXECUTIONS_MODE
value: "queue"
- name: QUEUE_BULL_REDIS_HOST
value: "n8n-redis"
- name: N8N_RUNNERS_TASK_BROKER_URI
value: "http://n8n:80"
- name: NODE_ENV
value: "production"
- name: GENERIC_TIMEZONE
value: "Europe/Moscow"
- name: TZ
value: "Europe/Moscow"
- name: DB_TYPE
value: "postgresdb"
- name: DB_POSTGRESDB_HOST
value: "psql.psql.svc"
- name: DB_POSTGRESDB_DATABASE
value: "n8n"
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
name: credentials
key: username
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
name: credentials
key: password
- name: N8N_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: credentials
key: encryptionkey
- name: N8N_RUNNERS_AUTH_TOKEN
valueFrom:
secretKeyRef:
name: credentials
key: runnertoken
volumeMounts:
- name: n8n-data
mountPath: /home/node/.n8n
- name: tools
mountPath: /opt/tools
resources:
requests:
cpu: 2000m
memory: 512Mi
limits:
cpu: 4000m
memory: 2048Gi
livenessProbe:
exec:
command:
- /bin/sh
- -c
- "ps aux | grep '[n]8n worker' || exit 1"
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
volumes:
- name: n8n-data
persistentVolumeClaim:
claimName: n8n-data
- name: tools
persistentVolumeClaim:
claimName: n8n-tools
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000

View File

@@ -7,11 +7,8 @@ resources:
- rbac.yaml - rbac.yaml
- redis-deployment.yaml - redis-deployment.yaml
- redis-service.yaml - redis-service.yaml
- paddleocr-deployment.yaml
- paddleocr-service.yaml
- deployment-main.yaml - deployment-main.yaml
# - deployment-worker.yaml - deployment-worker.yaml
- deployment-runner.yaml
- service.yaml - service.yaml
- ingress.yaml - ingress.yaml

View File

@@ -1,43 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: paddleocr
labels:
app: paddleocr
component: n8n
spec:
replicas: 1
selector:
matchLabels:
app: paddleocr
component: n8n
template:
metadata:
labels:
app: paddleocr
component: n8n
spec:
containers:
- name: paddleocr
image: c403/paddleocr
ports:
- containerPort: 5000
name: http
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1000m
memory: 2Gi
livenessProbe:
tcpSocket:
port: 5000
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
tcpSocket:
port: 5000
initialDelaySeconds: 30
periodSeconds: 10

View File

@@ -1,18 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: paddleocr
labels:
app: paddleocr
component: n8n
spec:
selector:
app: paddleocr
component: n8n
ports:
- name: http
port: 80
targetPort: 5000
protocol: TCP
type: ClusterIP

View File

@@ -14,8 +14,4 @@ spec:
port: 80 port: 80
targetPort: 5678 targetPort: 5678
protocol: TCP protocol: TCP
- name: task-broker
port: 5679
targetPort: 5679
protocol: TCP
type: ClusterIP type: ClusterIP

View File

@@ -60,23 +60,7 @@ EOT
create_group = true create_group = true
access_groups = ["admins"] access_groups = ["admins"]
} }
"mtproxy-links" = {
name = "mtproxy-links"
slug = "mtproxy-links"
group = "Core"
external_host = "https://proxy.hexor.cy"
internal_host = "http://secret-reader.mtproxy.svc:80"
internal_host_ssl_validation = false
meta_description = ""
skip_path_regex = <<-EOT
/webhook
EOT
meta_icon = "https://img.icons8.com/ios-filled/50/password.png"
mode = "proxy"
outpost = "kubernetes-outpost"
create_group = true
access_groups = ["admins"]
}
# Tools applications # Tools applications
"vpn" = { "vpn" = {
name = "VPN" name = "VPN"