alertmanager: config: global: telegram_api_url: "https://api.telegram.org" route: group_by: ['alertname', 'cluster', 'service'] group_wait: 10s group_interval: 10s repeat_interval: 12h receiver: 'telegram' receivers: - name: 'telegram' telegram_configs: - bot_token: '${TELEGRAM_BOT_TOKEN}' chat_id: ${TELEGRAM_CHAT_ID} parse_mode: 'HTML' message: | {{ range .Alerts }} {{ .Labels.alertname }} {{ if .Labels.severity }}Severity: {{ .Labels.severity }}{{ end }} Status: {{ .Status }} {{ if .Annotations.summary }}Summary: {{ .Annotations.summary }}{{ end }} {{ if .Annotations.description }}Description: {{ .Annotations.description }}{{ end }} {{ end }} ingress: enabled: true ingressClassName: traefik annotations: cert-manager.io/cluster-issuer: letsencrypt traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd hosts: - prom.hexor.cy paths: - /alertmanager tls: - secretName: alertmanager-tls hosts: - prom.hexor.cy alertmanagerSpec: secrets: - alertmanager-telegram-secret externalUrl: https://prom.hexor.cy/alertmanager routePrefix: /alertmanager prometheus: ingress: enabled: true ingressClassName: traefik annotations: cert-manager.io/cluster-issuer: letsencrypt traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd hosts: - prom.hexor.cy paths: - / tls: - secretName: prometheus-tls hosts: - prom.hexor.cy prometheusSpec: enableRemoteWriteReceiver: true additionalScrapeConfigs: - job_name: xray_vpn metrics_path: /scrape static_configs: - targets: ['cy.tail2fe2d.ts.net:9550'] labels: {job: cy} - targets: ['x86.tail2fe2d.ts.net:9550'] labels: {job: am} - targets: ['jp.tail2fe2d.ts.net:9550'] labels: {job: jp} - job_name: cs_16_server static_configs: - targets: ['prom-a2s-exporter.counter-strike.svc:9841'] labels: {instance: master} retention: "99999d" retentionSize: "0" nodeSelector: kubernetes.io/hostname: master.tail2fe2d.ts.net storageSpec: volumeClaimTemplate: spec: storageClassName: "" accessModes: ["ReadWriteOnce"] resources: requests: storage: 400Gi grafana: enabled: true envFromSecret: grafana-admin nodeSelector: kubernetes.io/hostname: master.tail2fe2d.ts.net admin: existingSecret: grafana-admin userKey: username passwordKey: password grafana.ini: auth: signout_redirect_url: https://idm.hexor.cy/application/o/grafana/end-session/ auth.generic_oauth: name: authentik enabled: true scopes: "openid profile email" auth_url: https://idm.hexor.cy/application/o/authorize/ token_url: https://idm.hexor.cy/application/o/token/ api_url: https://idm.hexor.cy/application/o/userinfo/ role_attribute_path: >- contains(groups, 'Grafana Admin') && 'Admin' || contains(groups, 'Grafana Editors') && 'Editor' || contains(groups, 'Grafana Viewer') && 'Viewer' database: type: postgres host: psql.psql.svc:5432 name: grafana user: grafana ssl_mode: disable # The Loki datasource config needs to be preserved, # but instead of "datasources.datasources.yaml", we define it like this for the prometheus-stack chart: additionalDataSources: - name: Loki type: loki url: http://loki-gateway.prometheus.svc:80 access: proxy ingress: enabled: true ingressClassName: traefik annotations: cert-manager.io/cluster-issuer: letsencrypt traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd hosts: - gf.hexor.cy tls: - secretName: grafana-tls hosts: - '*.hexor.cy' extraConfigmapMounts: - name: grafana-alerting-rules mountPath: /etc/grafana/provisioning/alerting/rules.yaml configMap: grafana-alerting subPath: rules.yaml readOnly: true - name: grafana-alerting-contactpoints mountPath: /etc/grafana/provisioning/alerting/contactpoints.yaml configMap: grafana-alerting subPath: contactpoints.yaml readOnly: true - name: grafana-alerting-policies mountPath: /etc/grafana/provisioning/alerting/policies.yaml configMap: grafana-alerting subPath: policies.yaml readOnly: true envValueFrom: TELEGRAM_BOT_TOKEN: secretKeyRef: name: grafana-telegram key: bot-token TELEGRAM_CHAT_ID: secretKeyRef: name: grafana-telegram key: chat-id