Compare commits
27 Commits
auto-updat
...
4ea48f0f94
| Author | SHA1 | Date | |
|---|---|---|---|
| 4ea48f0f94 | |||
| 4bfc35d8e2 | |||
| 46c0fab78a | |||
| 6dc43149f4 | |||
| ca1efe6230 | |||
| e90d2c9dc5 | |||
| a884c2b969 | |||
| db92976872 | |||
|
|
d924ebd3ee | ||
|
|
4b30185655 | ||
|
|
a65b37f000 | ||
|
|
f394b4f9da | ||
| 5d12fc854a | |||
|
|
f415e0711e | ||
| 14dc69904c | |||
| f6dc7aa6e3 | |||
| badd82f9af | |||
| a5cb49471a | |||
| 79c23e14b0 | |||
| 5bc44e45b0 | |||
|
|
4a80f2f596 | ||
| b58461232c | |||
| be6e601275 | |||
| 063a4a502b | |||
|
|
22382b63a1 | ||
|
|
718709115f | ||
|
|
df78728137 |
@@ -22,12 +22,13 @@ jobs:
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip install pyyaml
|
||||
python3 -m venv .venv
|
||||
.venv/bin/pip install pyyaml
|
||||
|
||||
- name: Generate K8s Services Wiki
|
||||
run: |
|
||||
echo "📋 Starting K8s wiki generation..."
|
||||
python3 .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
|
||||
.venv/bin/python .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
|
||||
|
||||
if [ -f "Kubernetes-Services.md" ]; then
|
||||
echo "✅ Wiki content generated successfully"
|
||||
|
||||
@@ -56,6 +56,7 @@ ArgoCD homelab project
|
||||
| **tg-bots** | [](https://ag.hexor.cy/applications/argocd/tg-bots) |
|
||||
| **vaultwarden** | [](https://ag.hexor.cy/applications/argocd/vaultwarden) |
|
||||
| **vpn** | [](https://ag.hexor.cy/applications/argocd/vpn) |
|
||||
| **xandikos** | [](https://ag.hexor.cy/applications/argocd/xandikos) |
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
@@ -77,8 +77,8 @@ spec:
|
||||
labels:
|
||||
app: gitea-runner
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: home.homenet
|
||||
#nodeSelector:
|
||||
# kubernetes.io/hostname: home.homenet
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
@@ -90,27 +90,30 @@ spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 3
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- home.homenet
|
||||
- weight: 1
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- master.tail2fe2d.ts.net
|
||||
- home.homenet
|
||||
- weight: 2
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- nas.homenet
|
||||
- master.tail2fe2d.ts.net
|
||||
- weight: 3
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- it.tail2fe2d.ts.net
|
||||
- ch.tail2fe2d.ts.net
|
||||
- us.tail2fe2d.ts.net
|
||||
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
@@ -118,7 +121,9 @@ spec:
|
||||
operator: In
|
||||
values:
|
||||
- home.homenet
|
||||
- nas.homenet
|
||||
- it.tail2fe2d.ts.net
|
||||
- ch.tail2fe2d.ts.net
|
||||
- us.tail2fe2d.ts.net
|
||||
- master.tail2fe2d.ts.net
|
||||
containers:
|
||||
- name: gitea-runner
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
kubernetes.io/os: linux
|
||||
containers:
|
||||
- name: secret-reader
|
||||
image: ultradesu/k8s-secrets:0.1.1
|
||||
image: ultradesu/k8s-secrets:0.2.1
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--secrets"
|
||||
@@ -28,6 +28,7 @@ spec:
|
||||
- "k8s-secret"
|
||||
- "--port"
|
||||
- "3000"
|
||||
- "--webhook"
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
name: http
|
||||
|
||||
@@ -192,10 +192,10 @@ spec:
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
cpu: "300m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "750m"
|
||||
cpu: "1000m"
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /shared
|
||||
|
||||
@@ -113,7 +113,7 @@ spec:
|
||||
mountPath: /scripts
|
||||
containers:
|
||||
- name: pasarguard-node
|
||||
image: 'pasarguard/node:v0.1.3'
|
||||
image: 'pasarguard/node:v0.1.4'
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sh
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
global:
|
||||
domain: ag.hexor.cy
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
nodeSelector: &nodeSelector
|
||||
kubernetes.io/hostname: ch.tail2fe2d.ts.net
|
||||
logging:
|
||||
format: text
|
||||
level: info
|
||||
@@ -56,14 +56,14 @@ configs:
|
||||
controller:
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
<<: *nodeSelector
|
||||
# Add resources (requests/limits), PDB etc. if needed
|
||||
|
||||
# Dex OIDC provider
|
||||
dex:
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
<<: *nodeSelector
|
||||
enabled: false
|
||||
|
||||
# Standard Redis disabled because Redis HA is enabled
|
||||
@@ -86,7 +86,7 @@ redis-ha:
|
||||
server:
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
<<: *nodeSelector
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
@@ -99,8 +99,11 @@ server:
|
||||
# Repository Server
|
||||
repoServer:
|
||||
replicas: 1
|
||||
livenessProbe:
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 60
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
<<: *nodeSelector
|
||||
# Add resources (requests/limits), PDB etc. if needed
|
||||
|
||||
# ApplicationSet Controller
|
||||
@@ -108,7 +111,7 @@ applicationSet:
|
||||
enabled: true # Enabled by default
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
<<: *nodeSelector
|
||||
# Add resources (requests/limits), PDB etc. if needed
|
||||
|
||||
# Notifications Controller
|
||||
@@ -116,5 +119,5 @@ notifications:
|
||||
enabled: true # Enabled by default
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
<<: *nodeSelector
|
||||
# Add notifiers, triggers, templates configurations if needed
|
||||
|
||||
@@ -42,10 +42,10 @@ spec:
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
cpu: "300m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
cpu: "800m"
|
||||
env:
|
||||
- name: BW_HOST
|
||||
valueFrom:
|
||||
|
||||
@@ -79,3 +79,83 @@ spec:
|
||||
key: 2a9deb39-ef22-433e-a1be-df1555625e22
|
||||
property: fields[2].value
|
||||
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: alertmanager-telegram
|
||||
spec:
|
||||
target:
|
||||
name: alertmanager-telegram-secret
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
TELEGRAM_BOT_TOKEN: |-
|
||||
{{ .bot_token }}
|
||||
TELEGRAM_CHAT_ID: |-
|
||||
{{ .chat_id }}
|
||||
data:
|
||||
- secretKey: bot_token
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
conversionStrategy: Default
|
||||
decodingStrategy: None
|
||||
metadataPolicy: None
|
||||
key: eca0fb0b-3939-40a8-890a-6294863e5a65
|
||||
property: fields[0].value
|
||||
- secretKey: chat_id
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
conversionStrategy: Default
|
||||
decodingStrategy: None
|
||||
metadataPolicy: None
|
||||
key: eca0fb0b-3939-40a8-890a-6294863e5a65
|
||||
property: fields[1].value
|
||||
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: grafana-telegram
|
||||
spec:
|
||||
target:
|
||||
name: grafana-telegram
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
bot-token: |-
|
||||
{{ .bot_token }}
|
||||
chat-id: |-
|
||||
{{ .chat_id }}
|
||||
data:
|
||||
- secretKey: bot_token
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
conversionStrategy: Default
|
||||
decodingStrategy: None
|
||||
metadataPolicy: None
|
||||
key: eca0fb0b-3939-40a8-890a-6294863e5a65
|
||||
property: fields[0].value
|
||||
- secretKey: chat_id
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
conversionStrategy: Default
|
||||
decodingStrategy: None
|
||||
metadataPolicy: None
|
||||
key: eca0fb0b-3939-40a8-890a-6294863e5a65
|
||||
property: fields[1].value
|
||||
|
||||
|
||||
69
k8s/core/prom-stack/grafana-alerting.yaml
Normal file
69
k8s/core/prom-stack/grafana-alerting.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
rules.yaml: |
|
||||
apiVersion: 1
|
||||
groups:
|
||||
- orgId: 1
|
||||
name: pasarguard_alerts
|
||||
folder: Kubernetes
|
||||
interval: 1m
|
||||
rules:
|
||||
- uid: pasarguard_cpu_throttling
|
||||
title: VPN CPU Throttle
|
||||
condition: A
|
||||
data:
|
||||
- refId: A
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: prometheus
|
||||
model:
|
||||
expr: 'rate(container_cpu_cfs_throttled_periods_total{container="pasarguard-node"}[5m]) > 0.1'
|
||||
refId: A
|
||||
noDataState: NoData
|
||||
execErrState: Alerting
|
||||
for: 5m
|
||||
annotations:
|
||||
description: 'Throttling rate: {{ printf "%.2f" $values.A.Value }}'
|
||||
summary: 'VPN node throttling CPU on {{ $labels.node }}'
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
contactpoints.yaml: |
|
||||
apiVersion: 1
|
||||
contactPoints:
|
||||
- orgId: 1
|
||||
name: telegram
|
||||
receivers:
|
||||
- uid: telegram_default
|
||||
type: telegram
|
||||
settings:
|
||||
bottoken: $TELEGRAM_BOT_TOKEN
|
||||
chatid: $TELEGRAM_CHAT_ID
|
||||
message: |
|
||||
{{ if eq .Status "firing" }}🔥 FIRING{{ else }}✅ RESOLVED{{ end }}
|
||||
|
||||
{{ range .Alerts }}
|
||||
📊 <b>{{ .Labels.alertname }}</b>
|
||||
{{ if .Annotations.summary }}{{ .Annotations.summary }}{{ end }}
|
||||
|
||||
🎯 <b>Details:</b>
|
||||
• Pod: <code>{{ .Labels.pod }}</code>
|
||||
• Node: <code>{{ .Labels.node }}</code>
|
||||
• Namespace: <code>{{ .Labels.namespace }}</code>
|
||||
{{ if .Annotations.description }}• {{ .Annotations.description }}{{ end }}
|
||||
|
||||
🔗 <a href="{{ .GeneratorURL }}">View in Grafana</a>
|
||||
{{ end }}
|
||||
parse_mode: HTML
|
||||
disableResolveMessage: false
|
||||
|
||||
policies.yaml: |
|
||||
apiVersion: 1
|
||||
policies:
|
||||
- orgId: 1
|
||||
receiver: telegram
|
||||
group_by:
|
||||
- grafana_folder
|
||||
- alertname
|
||||
group_wait: 10s
|
||||
group_interval: 5m
|
||||
repeat_interval: 4h
|
||||
@@ -38,6 +38,10 @@ datasources:
|
||||
url: http://prometheus-kube-prometheus-prometheus.prometheus.svc:9090
|
||||
access: proxy
|
||||
isDefault: true
|
||||
- name: Loki
|
||||
type: loki
|
||||
url: http://loki-gateway.prometheus.svc:80
|
||||
access: proxy
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
@@ -52,3 +56,19 @@ ingress:
|
||||
hosts:
|
||||
- '*.hexor.cy'
|
||||
|
||||
extraConfigmapMounts:
|
||||
- name: grafana-alerting
|
||||
mountPath: /etc/grafana/provisioning/alerting
|
||||
configMap: grafana-alerting
|
||||
readOnly: true
|
||||
|
||||
envValueFrom:
|
||||
TELEGRAM_BOT_TOKEN:
|
||||
secretKeyRef:
|
||||
name: grafana-telegram
|
||||
key: bot-token
|
||||
TELEGRAM_CHAT_ID:
|
||||
secretKeyRef:
|
||||
name: grafana-telegram
|
||||
key: chat-id
|
||||
|
||||
|
||||
@@ -6,6 +6,12 @@ resources:
|
||||
- persistentVolume.yaml
|
||||
- external-secrets.yaml
|
||||
|
||||
configMapGenerator:
|
||||
- name: grafana-alerting
|
||||
namespace: prometheus
|
||||
files:
|
||||
- grafana-alerting.yaml
|
||||
|
||||
helmCharts:
|
||||
- name: kube-prometheus-stack
|
||||
repo: https://prometheus-community.github.io/helm-charts
|
||||
@@ -23,3 +29,18 @@ helmCharts:
|
||||
valuesFile: grafana-values.yaml
|
||||
includeCRDs: true
|
||||
|
||||
- name: loki
|
||||
repo: https://grafana.github.io/helm-charts
|
||||
version: 6.29.0
|
||||
releaseName: loki
|
||||
namespace: prometheus
|
||||
valuesFile: loki-values.yaml
|
||||
includeCRDs: true
|
||||
|
||||
- name: promtail
|
||||
repo: https://grafana.github.io/helm-charts
|
||||
version: 6.16.6
|
||||
releaseName: promtail
|
||||
namespace: prometheus
|
||||
valuesFile: promtail-values.yaml
|
||||
|
||||
|
||||
75
k8s/core/prom-stack/loki-values.yaml
Normal file
75
k8s/core/prom-stack/loki-values.yaml
Normal file
@@ -0,0 +1,75 @@
|
||||
# Loki SingleBinary mode - optimal for homelab
|
||||
deploymentMode: SingleBinary
|
||||
|
||||
loki:
|
||||
auth_enabled: false
|
||||
commonConfig:
|
||||
replication_factor: 1
|
||||
path_prefix: /var/loki
|
||||
schemaConfig:
|
||||
configs:
|
||||
- from: 2024-01-01
|
||||
store: tsdb
|
||||
object_store: filesystem
|
||||
schema: v13
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
storage:
|
||||
type: filesystem
|
||||
filesystem:
|
||||
chunks_directory: /var/loki/chunks
|
||||
rules_directory: /var/loki/rules
|
||||
limits_config:
|
||||
reject_old_samples: false
|
||||
ingestion_rate_mb: 16
|
||||
ingestion_burst_size_mb: 32
|
||||
max_query_parallelism: 32
|
||||
volume_enabled: true
|
||||
|
||||
singleBinary:
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 50Gi
|
||||
storageClass: ""
|
||||
|
||||
# Disable distributed mode components
|
||||
read:
|
||||
replicas: 0
|
||||
write:
|
||||
replicas: 0
|
||||
backend:
|
||||
replicas: 0
|
||||
|
||||
# Disable memcached (not needed for SingleBinary)
|
||||
chunksCache:
|
||||
enabled: false
|
||||
resultsCache:
|
||||
enabled: false
|
||||
|
||||
# Gateway for Loki access
|
||||
gateway:
|
||||
enabled: true
|
||||
replicas: 1
|
||||
service:
|
||||
type: ClusterIP
|
||||
|
||||
# Disable tests and canary
|
||||
test:
|
||||
enabled: false
|
||||
lokiCanary:
|
||||
enabled: false
|
||||
|
||||
# Monitoring
|
||||
monitoring:
|
||||
dashboards:
|
||||
enabled: false
|
||||
rules:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
selfMonitoring:
|
||||
enabled: false
|
||||
@@ -1,5 +1,35 @@
|
||||
grafana:
|
||||
enabled: false
|
||||
|
||||
alertmanager:
|
||||
config:
|
||||
global:
|
||||
telegram_api_url: "https://api.telegram.org"
|
||||
route:
|
||||
group_by: ['alertname', 'cluster', 'service']
|
||||
group_wait: 10s
|
||||
group_interval: 10s
|
||||
repeat_interval: 12h
|
||||
receiver: 'telegram'
|
||||
receivers:
|
||||
- name: 'telegram'
|
||||
telegram_configs:
|
||||
- bot_token: '${TELEGRAM_BOT_TOKEN}'
|
||||
chat_id: ${TELEGRAM_CHAT_ID}
|
||||
parse_mode: 'HTML'
|
||||
message: |
|
||||
{{ range .Alerts }}
|
||||
<b>{{ .Labels.alertname }}</b>
|
||||
{{ if .Labels.severity }}<b>Severity:</b> {{ .Labels.severity }}{{ end }}
|
||||
<b>Status:</b> {{ .Status }}
|
||||
{{ if .Annotations.summary }}<b>Summary:</b> {{ .Annotations.summary }}{{ end }}
|
||||
{{ if .Annotations.description }}<b>Description:</b> {{ .Annotations.description }}{{ end }}
|
||||
{{ end }}
|
||||
|
||||
alertmanagerSpec:
|
||||
secrets:
|
||||
- alertmanager-telegram-secret
|
||||
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
enableRemoteWriteReceiver: true
|
||||
|
||||
37
k8s/core/prom-stack/promtail-values.yaml
Normal file
37
k8s/core/prom-stack/promtail-values.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
# Promtail - log collection agent for all cluster pods
|
||||
config:
|
||||
clients:
|
||||
- url: http://loki-gateway.prometheus.svc:80/loki/api/v1/push
|
||||
|
||||
# DaemonSet - runs on every node
|
||||
daemonset:
|
||||
enabled: true
|
||||
|
||||
# Tolerations for master/control-plane nodes
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
|
||||
# Init container to increase inotify limits
|
||||
initContainer:
|
||||
- name: init-inotify
|
||||
image: docker.io/busybox:1.36
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- sysctl -w fs.inotify.max_user_instances=512
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
@@ -102,3 +102,22 @@ spec:
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: jf-local-ingress
|
||||
annotations:
|
||||
ingressClassName: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: tr.uk
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: qbittorrent
|
||||
port:
|
||||
number: 80
|
||||
|
||||
@@ -9,6 +9,11 @@ resources:
|
||||
cpu: "6000m"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: uk-desktop.tail2fe2d.ts.net
|
||||
tolerations:
|
||||
- key: workload
|
||||
operator: Equal
|
||||
value: desktop
|
||||
effect: NoSchedule
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
@@ -28,11 +33,8 @@ persistence:
|
||||
ingress:
|
||||
enabled: true
|
||||
className: traefik
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt
|
||||
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
|
||||
hosts:
|
||||
- host: uk-desktop.uk
|
||||
- host: jf.uk
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
data "authentik_flow" "default_authorization_flow" {
|
||||
slug = var.default_authorization_flow
|
||||
}
|
||||
@@ -299,7 +298,7 @@ resource "authentik_outpost" "outposts" {
|
||||
kubernetes_ingress_class_name = null
|
||||
kubernetes_disabled_components = []
|
||||
kubernetes_ingress_annotations = {}
|
||||
kubernetes_ingress_secret_name = "authentik-outpost-tls"
|
||||
kubernetes_ingress_secret_name = "idm-tls"
|
||||
})
|
||||
|
||||
depends_on = [
|
||||
|
||||
@@ -51,6 +51,9 @@ proxy_applications = {
|
||||
internal_host = "http://secret-reader.k8s-secret.svc:80"
|
||||
internal_host_ssl_validation = false
|
||||
meta_description = ""
|
||||
skip_path_regex = <<-EOT
|
||||
/webhook
|
||||
EOT
|
||||
meta_icon = "https://img.icons8.com/ios-filled/50/password.png"
|
||||
mode = "proxy"
|
||||
outpost = "kubernetes-outpost"
|
||||
|
||||
Reference in New Issue
Block a user