forked from ab/homelab
Compare commits
440 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c26fdfa914 | |||
| ddf438e903 | |||
| 3c95524bd1 | |||
| 6b7e35e7c8 | |||
| 7d17a5931c | |||
| 921cb50ddc | |||
| 598d2f80f6 | |||
| 9792dd799b | |||
| 9267f25278 | |||
| 812c37cfe7 | |||
|
|
a6a50c3c13 | ||
|
|
2ff8f9efe8 | ||
|
|
477708190d | ||
|
|
53d5723141 | ||
|
|
101d97eb51 | ||
|
|
4241c81fc0 | ||
|
|
bb9ce21bb4 | ||
|
|
95913c3f73 | ||
|
|
737604f466 | ||
|
|
2bb7ad6959 | ||
|
|
b2a77a6572 | ||
| fc1bc1a65e | |||
| a2114327cf | |||
| 5c886d7604 | |||
| ebcb6dde23 | |||
| c6f3528174 | |||
| a3c0c6bce0 | |||
| ae30d8e898 | |||
| 2dd9eb544b | |||
| 5d7882fb96 | |||
| 5065061eea | |||
| dccfa0ca7b | |||
|
|
883139964e | ||
| 74c2e2230c | |||
|
|
5d9fe8f832 | ||
|
|
af4a4c9216 | ||
|
|
43bd0b6ac3 | ||
|
|
c4a40fad9f | ||
|
|
e1d6a09f6f | ||
| e37c365630 | |||
|
|
6cf7a68ad7 | ||
|
|
b82407f31f | ||
| 117583e660 | |||
| 70d31aae17 | |||
| 9066b3f624 | |||
|
|
633c4eec94 | ||
|
|
89df3d985e | ||
|
|
8ef1242243 | ||
|
|
f07f0ea7c5 | ||
|
|
17321b5d61 | ||
| c9177d9878 | |||
| 06f8b620ae | |||
|
|
a3c02b3510 | ||
| b1acb37700 | |||
| a1288f4c99 | |||
|
|
1672cdf402 | ||
|
|
e96a75880e | ||
| 6c79543b84 | |||
| f41b86bad4 | |||
| d44ade2298 | |||
| 462cb9189d | |||
| 37bb407a77 | |||
| cae1c42004 | |||
| 663f2e7504 | |||
| 8679dbf47e | |||
|
|
1ace105757 | ||
|
|
ce4172b435 | ||
|
|
c841f95bd2 | ||
|
|
fb651bc6fe | ||
|
|
04330aa474 | ||
|
|
57d517af77 | ||
|
|
1f7d9e41c8 | ||
|
|
39a27c596f | ||
|
|
353bb877be | ||
|
|
e523bb8bea | ||
|
|
b433373725 | ||
|
|
3026e53746 | ||
|
|
63669c69ff | ||
|
|
fa98e553cd | ||
|
|
055ef8aa77 | ||
|
|
22b359a7ee | ||
| 611e3e31dd | |||
| ddbd53e476 | |||
| f8a9d91932 | |||
| 262fea115d | |||
|
|
1e1a015dc0 | ||
|
|
e76ebdd8c3 | ||
|
|
0c2ce55a41 | ||
|
|
6e9de5addf | ||
|
|
887a9a2306 | ||
|
|
776109d795 | ||
|
|
c998426b44 | ||
|
|
536be6a61f | ||
|
|
713481c726 | ||
| f6411b7b65 | |||
| 3af6d98be8 | |||
| a45af9d4bc | |||
| 76937930ce | |||
| d4ff8d4665 | |||
| e0cf9371ae | |||
| 1126cb25bc | |||
| 44250dc937 | |||
| d9db73e078 | |||
| 71ce9f15ef | |||
| 6b855294af | |||
| 8dd16e24e6 | |||
| 3df95f46a5 | |||
| c0151eb2c9 | |||
| 6d7e365058 | |||
| 0b5361323a | |||
| 56352fef4b | |||
| 7a1f792391 | |||
| defe0cbdf5 | |||
| 7285c62b37 | |||
| 60f8d86fca | |||
| 2387653edd | |||
| 78a639162b | |||
| 90b197bcbe | |||
|
|
156d26aaf9 | ||
| 700b9cf5ff | |||
| 84bd1fc05a | |||
| cb3defd28c | |||
| 47320b9df6 | |||
| 0e4204b2ea | |||
| 3230c67f16 | |||
| 60c764a657 | |||
| 869a75f6e4 | |||
| 26429f1d4c | |||
| 5718981b67 | |||
| 8c6ca27523 | |||
| 4334ea25b7 | |||
| 18e60344b3 | |||
| 113f1e668d | |||
| 9b1caa28ef | |||
|
|
bd927b7ea6 | ||
| 1688ac67c2 | |||
| eda944741e | |||
| e744629f85 | |||
|
|
e38ba5b70c | ||
| 860f83445a | |||
| 8e1e20f478 | |||
| 93afe8b187 | |||
| 54e83b0af9 | |||
| cbed0939fa | |||
| e545346fec | |||
| aae0255843 | |||
| c7fc3504ba | |||
| b2701a8435 | |||
| df39182fea | |||
| c9bee8c049 | |||
| 439ad751e9 | |||
| 4681a3b263 | |||
| 45f208a4ff | |||
| a19648aacc | |||
| 7d7906edc7 | |||
|
|
30a3b1cd60 | ||
|
|
8dfc8e2549 | ||
|
|
8e18775e3e | ||
| 1ee1b9a1c6 | |||
| 407b11f40d | |||
| 82cf742edb | |||
| 360f260cb5 | |||
| 31fe1bf95d | |||
| 7c8a3b2aaa | |||
| 792421ffc5 | |||
| 014db9ad81 | |||
| 602b729d97 | |||
| fa7b81c1b3 | |||
| 7deb5971a5 | |||
| dbfdc0f122 | |||
| 984f7030a3 | |||
| 3e2fe905bd | |||
| 45b49944b3 | |||
| bd8caa57bf | |||
| c93b8f2797 | |||
| 48fee5ceed | |||
| 75a9c140af | |||
| c8930bbebc | |||
| f52b3aab5b | |||
| 6f1b6dee5d | |||
| 5e1aa4cd37 | |||
| fef5303429 | |||
| ba389b8b1e | |||
| 79d106468a | |||
| ca19a1f46a | |||
| 280fbe41bf | |||
| 32c2ba2781 | |||
| 1587abfba8 | |||
| 704f9dc85e | |||
|
|
5ff4b8ecb2 | ||
|
|
1d8c0c1421 | ||
| 661c2c31aa | |||
| 6d76d20d02 | |||
| ab3e687cea | |||
|
|
6ade2bb13a | ||
| d49c382055 | |||
| 1b9775c63d | |||
| f7838be372 | |||
| ce74590719 | |||
|
|
280bdd3091 | ||
|
|
c34f5ed0a0 | ||
|
|
107782318b | ||
|
|
ed2a59948f | ||
| 03f6596262 | |||
| bed8f5b7c3 | |||
| 676a81852a | |||
| 73c09f80f7 | |||
| 104d67bfb3 | |||
|
|
71e5101604 | ||
|
|
5783db189a | ||
| 5659e4455b | |||
|
|
36e8c5c36b | ||
|
|
a6e0165027 | ||
|
|
09526f4e91 | ||
|
|
d1922019ab | ||
|
|
118a1c431a | ||
|
|
b9667ea5e7 | ||
|
|
b1446c53cd | ||
|
|
56fa6a5e05 | ||
|
|
aa19cd8e61 | ||
|
|
00837fb238 | ||
| 479a2a02ea | |||
|
|
95e12df43d | ||
| 5a33337aa1 | |||
| ce9ba3661b | |||
| 8bcba25f7e | |||
| be9f42afa7 | |||
| 656ec121d2 | |||
| 240fc4127f | |||
| 9b19d8ddd8 | |||
| 0b8fe99ee1 | |||
|
|
cff6c28b72 | ||
| 99a63eb840 | |||
| 4f3be5b14a | |||
| 9f5ec499dc | |||
|
|
7b169b8e6d | ||
| a79003740a | |||
|
|
6276d543bc | ||
|
|
18a9bfa22a | ||
|
|
4b6090910c | ||
|
|
cfa796cc94 | ||
|
|
3e4f5500d2 | ||
|
|
9dd761bc8e | ||
|
|
eb3b5183b0 | ||
|
|
c9c75c791b | ||
|
|
8b959fec49 | ||
|
|
1184ff9060 | ||
|
|
18c64ef812 | ||
|
|
993cf1985d | ||
|
|
3daf7cf79a | ||
|
|
caa3354b33 | ||
|
|
68ca195735 | ||
|
|
93d7cb6bf1 | ||
| 600a1dfb6e | |||
|
|
a8c089d9ec | ||
| e516f95f77 | |||
| 84dcdc343a | |||
| 094d80896a | |||
|
|
4ffc42af97 | ||
| b1183896f9 | |||
| 6f17dc23f1 | |||
| e353751031 | |||
| f3baf90672 | |||
|
|
d71935d063 | ||
| 7dde0d3f2a | |||
| 00cbd8830b | |||
| 3760908270 | |||
| 412d32aa68 | |||
| a6dd3638bb | |||
| 8e445f01ae | |||
| 3a8b14d3b4 | |||
| 7dde46f40d | |||
| 60fcf95476 | |||
| 7e95c361a8 | |||
| 0e5cb7686f | |||
| 15e1718861 | |||
| 902d630d09 | |||
| 9b7f953bd3 | |||
| 228a5e3b1c | |||
| 3a0bc6e0d2 | |||
|
|
39ac71e4ef | ||
|
|
fb80dfad0b | ||
|
|
8c3d29c5b6 | ||
| 1b69064a9a | |||
| 21fde3cfb4 | |||
| 201179f57d | |||
| 181b577255 | |||
| 10773abd14 | |||
| 049542f434 | |||
| ef6c7998a4 | |||
| ee703e8399 | |||
| 399833de34 | |||
| 4c571f63e7 | |||
| 56125caedf | |||
| 4aae2a87d6 | |||
| bbc1ba4c2a | |||
| 5924dcfc5f | |||
| 2cb9b5636c | |||
| a2d631d06f | |||
| aa61b78c47 | |||
|
|
27fe1d2f50 | ||
|
|
557d03eb32 | ||
|
|
f06c80162f | ||
| e8588a2d3b | |||
| 621871be93 | |||
| 91dcd9d5bf | |||
| e40357ad78 | |||
| fa544b3b7b | |||
| 8c112a1b0a | |||
| e1ffaa8ba2 | |||
| 1a2b09bcaa | |||
| 21f27adc80 | |||
| 142c828f2b | |||
| 5a01da21af | |||
| 269b2b5221 | |||
| 1417fa830d | |||
| 6b85343c9e | |||
| 0bcd23009c | |||
| 02b20c9fcc | |||
| dbe0fa9acf | |||
| 7d447163cb | |||
| b58a930052 | |||
| bf9b37b55f | |||
|
|
e093dd4f57 | ||
|
|
18a27dadcb | ||
|
|
288a4685d7 | ||
|
|
24d570e522 | ||
|
|
7541cee2eb | ||
|
|
c3fa6eb874 | ||
|
|
414d60edb4 | ||
|
|
364f5b38a9 | ||
|
|
e59215d2e9 | ||
|
|
3c6da4969c | ||
|
|
c08a3e745e | ||
|
|
00177d347f | ||
|
|
ca7fed506b | ||
|
|
2f0ada97cc | ||
|
|
ae516a79eb | ||
|
|
196d53a5a9 | ||
|
|
24d4d34733 | ||
| 74b7611ea0 | |||
| 91739d622e | |||
| 7730458061 | |||
| b482c2e768 | |||
| 7256c98046 | |||
| 56d315eb4a | |||
| 58a2cd4a74 | |||
| 0052a81389 | |||
| 34bd0f1ec4 | |||
| c1cedeaa13 | |||
| a37ccbe5ef | |||
| cc0a6559da | |||
| 88021e4bc0 | |||
| 81fa68af48 | |||
| 2a9c18cae0 | |||
| be5d76c1e8 | |||
| 229190f0e8 | |||
| d7adb966c4 | |||
| f8ef2a48f5 | |||
| a7cfc086d5 | |||
| dfba5434f7 | |||
|
|
4c68ddfc3a | ||
|
|
98a11199d0 | ||
| c9173fbcc3 | |||
| 4f91fdd26a | |||
|
|
b27d5594c5 | ||
| ae02f0fe2a | |||
|
|
b682b7f8ef | ||
| d7a425d005 | |||
| 422269f5e9 | |||
| a99b549e2e | |||
| a3c26117b3 | |||
| 5f8216cc7b | |||
| ceb405b069 | |||
| f53ea1976c | |||
| b9e1b73681 | |||
| 1b04222c3e | |||
| 3ed26f872c | |||
| aa615fe587 | |||
| 1be64f2f63 | |||
| 1212dfcaec | |||
| 28e06770c6 | |||
| 005cb0db72 | |||
| fd80f3ad65 | |||
| 5281d58fae | |||
| 4542d03bc5 | |||
| eb6a2e3e47 | |||
| 311ab269b6 | |||
| 5fa5843fa1 | |||
| 006f607e0d | |||
| 77371cd640 | |||
| e3373dfb5f | |||
| c3eb8ffc5c | |||
| c5eb2a80c2 | |||
| 46527d924a | |||
| 0c5076c649 | |||
| acf1f88412 | |||
| 01a88e21a2 | |||
| fbfbaf0826 | |||
| bf70cae59e | |||
| 95ea0c21fb | |||
| 816fa3662d | |||
| caeb350ece | |||
| ab184e559d | |||
| a6002e7cc3 | |||
| 03f61962f7 | |||
| 2ebc8e718e | |||
| a6cc4b067f | |||
| 37e79a1175 | |||
| 431f0df03d | |||
| bd91762c9d | |||
| e4c86235ae | |||
| 72a1154610 | |||
| 0beb0cd78b | |||
| e342aab9df | |||
| 26f811c3b7 | |||
| d1e834d175 | |||
| 02ec8fd4e1 | |||
| 7565c6c34f | |||
| a45c11f883 | |||
| cfc15d05eb | |||
| 3d1658f41d | |||
| 51a8cc1834 | |||
| 5dcbc9b11f | |||
| aed859b8e9 | |||
| 05f277c8cd | |||
| e25e9a8608 | |||
| 2ef7b23c69 | |||
| 4184534c8c | |||
| 145bdcaca1 | |||
| e0ef44d8bd | |||
| 628c250a0b | |||
| 2e0df4ad1b | |||
| 120d68bd57 | |||
| 6f7fc0b796 | |||
| a4f043c5c6 | |||
| 640447a4e0 | |||
| b55e1b936b | |||
| e939b14796 | |||
| a9d63a7c0c |
147
.gitea/scripts/README-update-readme.md
Normal file
147
.gitea/scripts/README-update-readme.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Auto-Update README Workflow
|
||||
|
||||
## Overview
|
||||
|
||||
This workflow automatically updates the `README.md` file with the current list of ArgoCD applications based on the directory structure in `k8s/`.
|
||||
|
||||
## How it works
|
||||
|
||||
1. **Trigger**: Workflow runs automatically when changes are pushed to `k8s/**` paths
|
||||
2. **Scan**: Python script scans `k8s/` directory structure and finds all applications
|
||||
3. **Generate**: Creates README.md with badges for all found applications
|
||||
4. **Create PR**: If changes detected, creates a Merge Request for manual review
|
||||
|
||||
## Files
|
||||
|
||||
- `.gitea/workflows/update-readme.yaml` - GitHub Actions workflow
|
||||
- `.gitea/scripts/generate-readme.py` - Python script for README generation
|
||||
|
||||
## Directory Structure
|
||||
|
||||
The script expects the following k8s directory structure:
|
||||
|
||||
```
|
||||
k8s/
|
||||
├── core/ # Core infrastructure applications
|
||||
│ ├── argocd/
|
||||
│ ├── authentik/
|
||||
│ └── ...
|
||||
├── apps/ # User applications
|
||||
│ ├── gitea/
|
||||
│ ├── immich/
|
||||
│ └── ...
|
||||
└── games/ # Game servers
|
||||
├── minecraft/
|
||||
└── ...
|
||||
```
|
||||
|
||||
Each subdirectory name becomes an application name in the README.
|
||||
|
||||
## Required Secrets
|
||||
|
||||
The workflow requires the following secrets to be configured in Gitea:
|
||||
|
||||
| Secret | Description | Example |
|
||||
|--------|-------------|---------|
|
||||
| `GT_URL` | Gitea instance URL | `https://gt.hexor.cy` |
|
||||
| `GT_TOKEN` | Gitea API token with repo write access | `glpat-xxxxx...` |
|
||||
| `GT_OWNER` | Repository owner (username or org) | `ab` |
|
||||
| `GT_REPO` | Repository name | `homelab` |
|
||||
|
||||
### How to create a Gitea Token
|
||||
|
||||
1. Go to Settings → Applications → Generate New Token
|
||||
2. Give it a name like "README Update Bot"
|
||||
3. Select scopes: `repo` (Full control of repositories)
|
||||
4. Generate and copy the token
|
||||
5. Add it as a secret in repository settings
|
||||
|
||||
## Badge Format
|
||||
|
||||
Badges are generated using a predictable pattern:
|
||||
|
||||
```markdown
|
||||
[](https://ag.hexor.cy/applications/argocd/app-name)
|
||||
```
|
||||
|
||||
This allows you to immediately see which applications are:
|
||||
- ✅ Healthy and synced (green badge)
|
||||
- ⚠️ Degraded or out of sync (yellow badge)
|
||||
- ❌ Unhealthy or failed (red badge)
|
||||
|
||||
## Manual Trigger
|
||||
|
||||
You can manually trigger the workflow from Gitea:
|
||||
|
||||
1. Go to Actions tab
|
||||
2. Select "Auto-update README" workflow
|
||||
3. Click "Run workflow"
|
||||
4. Select branch and run
|
||||
|
||||
## Example Output
|
||||
|
||||
The generated README will look like:
|
||||
|
||||
```markdown
|
||||
# homelab
|
||||
|
||||
ArgoCD homelab project
|
||||
|
||||
## ArgoCD Applications Status
|
||||
|
||||
| Application | Status |
|
||||
| :--- | :---: |
|
||||
| **argocd** | [](https://ag.hexor.cy/applications/argocd/argocd) |
|
||||
...
|
||||
```
|
||||
|
||||
## Reviewing Pull Requests
|
||||
|
||||
When the workflow creates a PR:
|
||||
|
||||
1. Check the Actions tab for the workflow run details
|
||||
2. Review the PR in the Pull Requests tab
|
||||
3. Verify the application list matches your k8s/ structure
|
||||
4. Merge when ready
|
||||
|
||||
The PR will include:
|
||||
- Updated application list
|
||||
- Timestamp of generation
|
||||
- Automatic commit message
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No PR created
|
||||
|
||||
- Check if there are actually changes in README.md
|
||||
- Verify secrets are configured correctly
|
||||
- Check workflow logs in Actions tab
|
||||
|
||||
### Wrong applications listed
|
||||
|
||||
- Verify k8s/ directory structure
|
||||
- Ensure folder names match expected application names
|
||||
- Check for hidden directories (starting with `.`)
|
||||
|
||||
### Badge not loading
|
||||
|
||||
- Verify ArgoCD badge API is accessible at `https://ag.hexor.cy`
|
||||
- Check application name matches ArgoCD application name
|
||||
- Ensure application exists in ArgoCD
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Update badge URL
|
||||
|
||||
If you need to change the badge URL pattern, edit:
|
||||
- `.gitea/scripts/generate-readme.py` - function `generate_badge_line()`
|
||||
|
||||
### Change workflow trigger
|
||||
|
||||
To modify when the workflow runs, edit:
|
||||
- `.gitea/workflows/update-readme.yaml` - `on:` section
|
||||
|
||||
### Add new categories
|
||||
|
||||
To add new categories (besides core/apps/games), edit:
|
||||
- `.gitea/scripts/generate-readme.py` - function `scan_k8s_directory()` and `generate_readme_content()`
|
||||
91
.gitea/scripts/README.md
Normal file
91
.gitea/scripts/README.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Wiki Generators for Homelab
|
||||
|
||||
Automated Wiki page generation scripts for documenting homelab infrastructure.
|
||||
|
||||
## 1. Authentik Applications Wiki Generator
|
||||
|
||||
Generates a Wiki page with all applications managed by Authentik from Terraform configuration.
|
||||
|
||||
### Files:
|
||||
- `generate-apps-wiki.py` - Generates Applications.md from Terraform output
|
||||
- `process-terraform-output.py` - Processes Terraform JSON output
|
||||
|
||||
### Workflow:
|
||||
- **Trigger**: Push to `main` branch with Terraform changes
|
||||
- **Workflow**: `.gitea/workflows/authentik-apps.yaml`
|
||||
- **Output**: Applications Wiki page
|
||||
|
||||
## 2. Kubernetes Services Wiki Generator
|
||||
|
||||
Analyzes k8s/ directory and generates comprehensive documentation for all Kubernetes services.
|
||||
|
||||
### Files:
|
||||
- `generate-k8s-wiki.py` - Main script for analyzing k8s services
|
||||
|
||||
### Features:
|
||||
- **Service Types**: Detects Helm Charts, Kustomize, and YAML manifests
|
||||
- **ArgoCD Integration**: Shows auto-sync status and project info
|
||||
- **Service Discovery**: Lists all services, ingresses, and external secrets
|
||||
- **Categorization**: Groups by apps, core, games categories
|
||||
- **Detailed Analysis**: Shows deployments, containers, files
|
||||
|
||||
### Workflow:
|
||||
- **Trigger**: Changes in `k8s/` directory
|
||||
- **Workflow**: `.gitea/workflows/k8s-wiki.yaml`
|
||||
- **Output**: Kubernetes-Services Wiki page
|
||||
|
||||
## GitHub Secrets Configuration
|
||||
|
||||
Required secrets in repository settings:
|
||||
|
||||
```
|
||||
GT_URL=https://gt.hexor.cy
|
||||
GT_WIKI_TOKEN=your_gitea_access_token
|
||||
GT_OWNER=your_username
|
||||
GT_REPO=homelab
|
||||
```
|
||||
|
||||
## Generated Wiki Pages Structure
|
||||
|
||||
### Applications Page
|
||||
- Table with icons (32x32), external/internal URLs
|
||||
- Statistics by type (Proxy vs OAuth2)
|
||||
- Grouping by categories (Core, Tools, Media, etc.)
|
||||
|
||||
### Kubernetes Services Page
|
||||
- Overview table with service types and status
|
||||
- Detailed sections by category
|
||||
- ArgoCD integration status
|
||||
- Service discovery information
|
||||
|
||||
## Local Testing
|
||||
|
||||
### Authentik Apps:
|
||||
```bash
|
||||
cd terraform/authentik
|
||||
terraform output -json > terraform-output.json
|
||||
python3 ../../.gitea/scripts/process-terraform-output.py terraform-output.json processed-output.json
|
||||
python3 ../../.gitea/scripts/generate-apps-wiki.py processed-output.json
|
||||
```
|
||||
|
||||
### K8s Services:
|
||||
```bash
|
||||
pip install pyyaml
|
||||
python3 .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues:
|
||||
|
||||
1. **Terraform output parsing errors**
|
||||
- Check for [command] prefix in output
|
||||
- Verify JSON structure with debug mode
|
||||
|
||||
2. **Wiki upload failures**
|
||||
- Verify Gitea token permissions
|
||||
- Check network connectivity to Gitea instance
|
||||
|
||||
3. **YAML parsing errors in k8s analysis**
|
||||
- Ensure valid YAML syntax in k8s files
|
||||
- Check PyYAML installation
|
||||
226
.gitea/scripts/generate-apps-wiki.py
Normal file
226
.gitea/scripts/generate-apps-wiki.py
Normal file
@@ -0,0 +1,226 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script for generating Wiki page with applications list from Terraform outputs
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
def generate_markdown_table(apps_data):
|
||||
"""Generates Markdown table for applications"""
|
||||
|
||||
# Combine all applications
|
||||
all_apps = []
|
||||
|
||||
if 'proxy_apps' in apps_data:
|
||||
for key, app in apps_data['proxy_apps'].items():
|
||||
all_apps.append({
|
||||
'key': key,
|
||||
'name': app['name'],
|
||||
'type': app['type'],
|
||||
'url': app['url'],
|
||||
'internal_url': app.get('internal_url', '-'),
|
||||
'group': app['group'],
|
||||
'description': app['description'],
|
||||
'icon': app['icon'],
|
||||
'slug': app['slug']
|
||||
})
|
||||
|
||||
if 'oauth_apps' in apps_data:
|
||||
for key, app in apps_data['oauth_apps'].items():
|
||||
all_apps.append({
|
||||
'key': key,
|
||||
'name': app['name'],
|
||||
'type': app['type'],
|
||||
'url': app['url'],
|
||||
'internal_url': '-', # OAuth apps don't have internal URLs
|
||||
'group': app['group'],
|
||||
'description': app['description'],
|
||||
'icon': app['icon'],
|
||||
'slug': app['slug']
|
||||
})
|
||||
|
||||
# Sort by groups, then by name
|
||||
all_apps.sort(key=lambda x: (x['group'], x['name']))
|
||||
|
||||
# Generate Markdown
|
||||
markdown = []
|
||||
markdown.append("# Authentik Applications")
|
||||
markdown.append("")
|
||||
markdown.append(f"*Automatically generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}*")
|
||||
markdown.append("")
|
||||
markdown.append("## All Applications")
|
||||
markdown.append("")
|
||||
|
||||
# Table
|
||||
markdown.append("| Icon | Name | Type | External URL | Internal URL | Group | Description |")
|
||||
markdown.append("|:----:|------|------|--------------|--------------|-------|-------------|")
|
||||
|
||||
for app in all_apps:
|
||||
# Icon with size constraint
|
||||
if app['icon']:
|
||||
icon = f'<img src="{app["icon"]}" width="32" height="32" alt="{app["name"]}">'
|
||||
else:
|
||||
icon = "📱"
|
||||
|
||||
# External URL link
|
||||
external_link = f"[🔗 {app['url'].replace('https://', '').replace('http://', '')}]({app['url']})" if app.get('url') else "-"
|
||||
|
||||
# Internal URL (only for proxy apps)
|
||||
internal_url = app.get('internal_url', '-')
|
||||
if internal_url != '-':
|
||||
# Show full internal URL without shortening
|
||||
internal_url = f"`{internal_url}`"
|
||||
|
||||
description = app['description'] if app['description'] else "-"
|
||||
|
||||
markdown.append(f"| {icon} | **{app['name']}** | {app['type']} | {external_link} | {internal_url} | {app['group']} | {description} |")
|
||||
|
||||
markdown.append("")
|
||||
|
||||
# Statistics
|
||||
proxy_count = len(apps_data.get('proxy_apps', {}))
|
||||
oauth_count = len(apps_data.get('oauth_apps', {}))
|
||||
total_count = proxy_count + oauth_count
|
||||
|
||||
markdown.append("## Statistics")
|
||||
markdown.append("")
|
||||
markdown.append(f"- **Total applications**: {total_count}")
|
||||
markdown.append(f"- **Proxy applications**: {proxy_count}")
|
||||
markdown.append(f"- **OAuth2/OpenID applications**: {oauth_count}")
|
||||
markdown.append("")
|
||||
|
||||
# Grouping by types
|
||||
groups = {}
|
||||
for app in all_apps:
|
||||
group = app['group']
|
||||
if group not in groups:
|
||||
groups[group] = {'proxy': 0, 'oauth': 0}
|
||||
if app['type'] == 'Proxy':
|
||||
groups[group]['proxy'] += 1
|
||||
else:
|
||||
groups[group]['oauth'] += 1
|
||||
|
||||
markdown.append("## Applications by Groups")
|
||||
markdown.append("")
|
||||
for group, counts in sorted(groups.items()):
|
||||
total = counts['proxy'] + counts['oauth']
|
||||
markdown.append(f"- **{group}**: {total} applications (Proxy: {counts['proxy']}, OAuth: {counts['oauth']})")
|
||||
|
||||
markdown.append("")
|
||||
markdown.append("---")
|
||||
markdown.append("*This page is automatically generated via Terraform CI/CD*")
|
||||
|
||||
return "\n".join(markdown)
|
||||
|
||||
def parse_terraform_output(output_data):
|
||||
"""Parse Terraform output JSON structure"""
|
||||
# Check if this is a full terraform output (with value, type, sensitive fields)
|
||||
if isinstance(output_data, dict) and 'applications_for_wiki' in output_data:
|
||||
# This is full terraform output format
|
||||
app_output = output_data.get('applications_for_wiki', {})
|
||||
if isinstance(app_output, dict) and 'value' in app_output:
|
||||
return app_output['value']
|
||||
else:
|
||||
return app_output
|
||||
else:
|
||||
# This is already the value extracted
|
||||
return output_data
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python3 generate-apps-wiki.py <terraform-output-json> [--debug]")
|
||||
sys.exit(1)
|
||||
|
||||
output_file = sys.argv[1]
|
||||
debug = "--debug" in sys.argv
|
||||
|
||||
try:
|
||||
# Check if file exists and has content
|
||||
if not os.path.exists(output_file):
|
||||
print(f"ERROR: File {output_file} not found")
|
||||
sys.exit(1)
|
||||
|
||||
file_size = os.path.getsize(output_file)
|
||||
if file_size == 0:
|
||||
print(f"ERROR: File {output_file} is empty")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"📄 Reading Terraform output file: {output_file} ({file_size} bytes)")
|
||||
|
||||
# Read file content
|
||||
with open(output_file, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
if debug:
|
||||
print(f"🔍 File content preview: {content[:200]}...")
|
||||
|
||||
# Clean content - remove command line if present
|
||||
if content.startswith('[command]'):
|
||||
print("⚠️ Detected command prefix, removing...")
|
||||
lines = content.split('\n', 1)
|
||||
if len(lines) > 1:
|
||||
content = lines[1]
|
||||
if debug:
|
||||
print(f"🔍 Cleaned content preview: {content[:200]}...")
|
||||
else:
|
||||
print("ERROR: File contains only command line, no JSON data")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse JSON
|
||||
try:
|
||||
terraform_output = json.loads(content)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"ERROR: Invalid JSON in {output_file}: {e}")
|
||||
print(f"Content starts with: {repr(content[:100])}")
|
||||
# Try to find where JSON starts
|
||||
json_start = content.find('{')
|
||||
if json_start > 0:
|
||||
print(f"Found JSON starting at position {json_start}, retrying...")
|
||||
content = content[json_start:]
|
||||
try:
|
||||
terraform_output = json.loads(content)
|
||||
except json.JSONDecodeError as e2:
|
||||
print(f"ERROR: Still invalid JSON: {e2}")
|
||||
sys.exit(1)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
# Extract application data using helper function
|
||||
apps_data = parse_terraform_output(terraform_output)
|
||||
|
||||
if not apps_data:
|
||||
print("ERROR: No applications data found in Terraform output")
|
||||
if debug:
|
||||
print(f"Full output structure: {json.dumps(terraform_output, indent=2)[:500]}...")
|
||||
sys.exit(1)
|
||||
|
||||
# Check if we have correct structure
|
||||
if 'proxy_apps' not in apps_data and 'oauth_apps' not in apps_data:
|
||||
print("ERROR: Expected 'proxy_apps' or 'oauth_apps' in output")
|
||||
print(f"Available keys: {list(apps_data.keys())}")
|
||||
if debug and apps_data:
|
||||
print(f"Data structure: {json.dumps(apps_data, indent=2)[:500]}...")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"📊 Found {len(apps_data.get('proxy_apps', {}))} proxy apps, {len(apps_data.get('oauth_apps', {}))} oauth apps")
|
||||
|
||||
# Generate Markdown
|
||||
markdown_content = generate_markdown_table(apps_data)
|
||||
|
||||
# Write result
|
||||
wiki_file = "Applications.md"
|
||||
with open(wiki_file, 'w', encoding='utf-8') as f:
|
||||
f.write(markdown_content)
|
||||
|
||||
print(f"✅ Wiki page generated: {wiki_file}")
|
||||
print(f"📊 Total applications: {len(apps_data.get('proxy_apps', {})) + len(apps_data.get('oauth_apps', {}))}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
298
.gitea/scripts/generate-k8s-wiki.py
Normal file
298
.gitea/scripts/generate-k8s-wiki.py
Normal file
@@ -0,0 +1,298 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script for generating Wiki page with Kubernetes services from k8s/ directory
|
||||
"""
|
||||
|
||||
import os
|
||||
import yaml
|
||||
import json
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
|
||||
class K8sService:
|
||||
def __init__(self, name, category, path):
|
||||
self.name = name
|
||||
self.category = category
|
||||
self.path = path
|
||||
self.namespace = None
|
||||
self.deployment_type = "Unknown"
|
||||
self.helm_charts = []
|
||||
self.services = []
|
||||
self.ingresses = []
|
||||
self.external_secrets = []
|
||||
self.deployments = []
|
||||
self.pvcs = []
|
||||
self.argo_app = None
|
||||
self.files = []
|
||||
|
||||
def __repr__(self):
|
||||
return f"K8sService({self.name}, {self.deployment_type})"
|
||||
|
||||
def parse_yaml_file(filepath):
|
||||
"""Parse YAML file and return content"""
|
||||
try:
|
||||
with open(filepath, 'r') as f:
|
||||
# Load all documents in the file
|
||||
docs = list(yaml.safe_load_all(f))
|
||||
return docs if len(docs) > 1 else docs[0] if docs else None
|
||||
except Exception as e:
|
||||
print(f" ⚠️ Error parsing {filepath}: {e}")
|
||||
return None
|
||||
|
||||
def analyze_service_directory(service_path, service_name, category):
|
||||
"""Analyze a service directory and extract information"""
|
||||
service = K8sService(service_name, category, service_path)
|
||||
|
||||
# List all files
|
||||
for file in os.listdir(service_path):
|
||||
if file.endswith('.yaml') or file.endswith('.yml'):
|
||||
service.files.append(file)
|
||||
filepath = os.path.join(service_path, file)
|
||||
|
||||
# Parse YAML content
|
||||
content = parse_yaml_file(filepath)
|
||||
if not content:
|
||||
continue
|
||||
|
||||
# Handle multiple documents in one file
|
||||
documents = content if isinstance(content, list) else [content]
|
||||
|
||||
for doc in documents:
|
||||
if not isinstance(doc, dict) or 'kind' not in doc:
|
||||
continue
|
||||
|
||||
kind = doc['kind']
|
||||
metadata = doc.get('metadata', {})
|
||||
|
||||
# ArgoCD Application
|
||||
if kind == 'Application' and doc.get('apiVersion', '').startswith('argoproj.io'):
|
||||
service.argo_app = {
|
||||
'name': metadata.get('name', ''),
|
||||
'namespace': doc.get('spec', {}).get('destination', {}).get('namespace', ''),
|
||||
'project': doc.get('spec', {}).get('project', ''),
|
||||
'auto_sync': doc.get('spec', {}).get('syncPolicy', {}).get('automated') is not None
|
||||
}
|
||||
service.namespace = service.argo_app['namespace']
|
||||
|
||||
# Kustomization
|
||||
elif kind == 'Kustomization':
|
||||
if 'helmCharts' in doc:
|
||||
service.deployment_type = "Helm Chart"
|
||||
for chart in doc.get('helmCharts', []):
|
||||
service.helm_charts.append({
|
||||
'name': chart.get('name', ''),
|
||||
'repo': chart.get('repo', ''),
|
||||
'version': chart.get('version', ''),
|
||||
'namespace': chart.get('namespace', service.namespace)
|
||||
})
|
||||
else:
|
||||
service.deployment_type = "Kustomize"
|
||||
|
||||
# Deployment
|
||||
elif kind == 'Deployment':
|
||||
service.deployments.append({
|
||||
'name': metadata.get('name', ''),
|
||||
'namespace': metadata.get('namespace', service.namespace),
|
||||
'replicas': doc.get('spec', {}).get('replicas', 1),
|
||||
'containers': [c.get('name', '') for c in doc.get('spec', {}).get('template', {}).get('spec', {}).get('containers', [])]
|
||||
})
|
||||
if service.deployment_type == "Unknown":
|
||||
service.deployment_type = "YAML Manifests"
|
||||
|
||||
# Service
|
||||
elif kind == 'Service':
|
||||
svc_spec = doc.get('spec', {})
|
||||
service.services.append({
|
||||
'name': metadata.get('name', ''),
|
||||
'type': svc_spec.get('type', 'ClusterIP'),
|
||||
'ports': svc_spec.get('ports', [])
|
||||
})
|
||||
|
||||
# Ingress
|
||||
elif kind == 'Ingress':
|
||||
rules = doc.get('spec', {}).get('rules', [])
|
||||
hosts = []
|
||||
for rule in rules:
|
||||
if 'host' in rule:
|
||||
hosts.append(rule['host'])
|
||||
service.ingresses.append({
|
||||
'name': metadata.get('name', ''),
|
||||
'hosts': hosts
|
||||
})
|
||||
|
||||
# ExternalSecret
|
||||
elif kind == 'ExternalSecret':
|
||||
service.external_secrets.append({
|
||||
'name': metadata.get('name', ''),
|
||||
'store': doc.get('spec', {}).get('secretStoreRef', {}).get('name', '')
|
||||
})
|
||||
|
||||
# PersistentVolumeClaim
|
||||
elif kind == 'PersistentVolumeClaim':
|
||||
service.pvcs.append({
|
||||
'name': metadata.get('name', ''),
|
||||
'size': doc.get('spec', {}).get('resources', {}).get('requests', {}).get('storage', '')
|
||||
})
|
||||
|
||||
# If no specific deployment type found but has YAML files
|
||||
if service.deployment_type == "Unknown" and service.files:
|
||||
service.deployment_type = "YAML Manifests"
|
||||
|
||||
return service
|
||||
|
||||
def generate_markdown_table(services):
|
||||
"""Generate markdown table for services"""
|
||||
markdown = []
|
||||
markdown.append("# Kubernetes Services")
|
||||
markdown.append("")
|
||||
markdown.append(f"*Automatically generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}*")
|
||||
markdown.append("")
|
||||
|
||||
# Group by category
|
||||
categories = defaultdict(list)
|
||||
for service in services:
|
||||
categories[service.category].append(service)
|
||||
|
||||
# Statistics
|
||||
markdown.append("## Statistics")
|
||||
markdown.append("")
|
||||
markdown.append(f"- **Total Services**: {len(services)}")
|
||||
markdown.append(f"- **Categories**: {len(categories)}")
|
||||
helm_count = sum(1 for s in services if s.deployment_type == "Helm Chart")
|
||||
kustomize_count = sum(1 for s in services if s.deployment_type == "Kustomize")
|
||||
yaml_count = sum(1 for s in services if s.deployment_type == "YAML Manifests")
|
||||
markdown.append(f"- **Helm Charts**: {helm_count}")
|
||||
markdown.append(f"- **Kustomize**: {kustomize_count}")
|
||||
markdown.append(f"- **YAML Manifests**: {yaml_count}")
|
||||
markdown.append("")
|
||||
|
||||
# Main table
|
||||
markdown.append("## All Services")
|
||||
markdown.append("")
|
||||
markdown.append("| Service | Category | Type | Namespace | Ingresses | Services | Secrets | Auto-Sync |")
|
||||
markdown.append("|---------|----------|------|-----------|-----------|----------|---------|-----------|")
|
||||
|
||||
for category in sorted(categories.keys()):
|
||||
for service in sorted(categories[category], key=lambda x: x.name):
|
||||
# Service name with link to directory
|
||||
name_link = f"[{service.name}](k8s/{service.category}/{service.name}/)"
|
||||
|
||||
# Deployment type with emoji
|
||||
type_emoji = {
|
||||
"Helm Chart": "🎩",
|
||||
"Kustomize": "🔧",
|
||||
"YAML Manifests": "📄",
|
||||
"Unknown": "❓"
|
||||
}
|
||||
type_str = f"{type_emoji.get(service.deployment_type, '')} {service.deployment_type}"
|
||||
|
||||
# Ingresses
|
||||
ingresses = []
|
||||
for ing in service.ingresses:
|
||||
for host in ing['hosts']:
|
||||
ingresses.append(f"[{host}](https://{host})")
|
||||
ingress_str = "<br>".join(ingresses) if ingresses else "-"
|
||||
|
||||
# Services
|
||||
svc_list = []
|
||||
for svc in service.services:
|
||||
ports = [f"{p.get('port', '?')}" for p in svc['ports']]
|
||||
svc_list.append(f"`{svc['name']}:{','.join(ports)}`")
|
||||
svc_str = "<br>".join(svc_list) if svc_list else "-"
|
||||
|
||||
# External Secrets
|
||||
secrets_str = f"{len(service.external_secrets)} secrets" if service.external_secrets else "-"
|
||||
|
||||
# Auto-sync
|
||||
auto_sync = "✅" if service.argo_app and service.argo_app.get('auto_sync') else "❌"
|
||||
|
||||
markdown.append(f"| **{name_link}** | {category} | {type_str} | {service.namespace or '-'} | {ingress_str} | {svc_str} | {secrets_str} | {auto_sync} |")
|
||||
|
||||
markdown.append("")
|
||||
|
||||
# Detailed sections by category
|
||||
for category in sorted(categories.keys()):
|
||||
markdown.append(f"## {category.title()} Services")
|
||||
markdown.append("")
|
||||
|
||||
for service in sorted(categories[category], key=lambda x: x.name):
|
||||
markdown.append(f"### {service.name}")
|
||||
markdown.append("")
|
||||
|
||||
# Basic info
|
||||
markdown.append(f"- **Type**: {service.deployment_type}")
|
||||
markdown.append(f"- **Namespace**: {service.namespace or 'Not specified'}")
|
||||
markdown.append(f"- **Path**: `{service.path}`")
|
||||
|
||||
# Helm charts
|
||||
if service.helm_charts:
|
||||
markdown.append("- **Helm Charts**:")
|
||||
for chart in service.helm_charts:
|
||||
markdown.append(f" - {chart['name']} v{chart['version']} from {chart['repo']}")
|
||||
|
||||
# Deployments
|
||||
if service.deployments:
|
||||
markdown.append("- **Deployments**:")
|
||||
for dep in service.deployments:
|
||||
containers = ', '.join(dep['containers'])
|
||||
markdown.append(f" - {dep['name']} ({dep['replicas']} replicas) - Containers: {containers}")
|
||||
|
||||
# Files
|
||||
if service.files:
|
||||
markdown.append(f"- **Files**: {', '.join(sorted(service.files))}")
|
||||
|
||||
markdown.append("")
|
||||
|
||||
markdown.append("---")
|
||||
markdown.append("*This page is automatically generated from k8s/ directory via CI/CD*")
|
||||
|
||||
return "\n".join(markdown)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: generate-k8s-wiki.py <k8s-directory> [output-file]")
|
||||
sys.exit(1)
|
||||
|
||||
k8s_dir = sys.argv[1]
|
||||
output_file = sys.argv[2] if len(sys.argv) > 2 else "Kubernetes-Services.md"
|
||||
|
||||
if not os.path.exists(k8s_dir):
|
||||
print(f"❌ Directory {k8s_dir} not found")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"📂 Scanning {k8s_dir}...")
|
||||
|
||||
services = []
|
||||
|
||||
# Scan each category directory
|
||||
for category in ['apps', 'core', 'games']:
|
||||
category_path = os.path.join(k8s_dir, category)
|
||||
if not os.path.exists(category_path):
|
||||
print(f" ⚠️ Category {category} not found")
|
||||
continue
|
||||
|
||||
print(f"📁 Processing {category}/...")
|
||||
|
||||
# Scan each service in category
|
||||
for service_name in os.listdir(category_path):
|
||||
service_path = os.path.join(category_path, service_name)
|
||||
if os.path.isdir(service_path):
|
||||
print(f" 🔍 Analyzing {service_name}...")
|
||||
service = analyze_service_directory(service_path, service_name, category)
|
||||
services.append(service)
|
||||
|
||||
print(f"\n✅ Found {len(services)} services")
|
||||
|
||||
# Generate markdown
|
||||
markdown = generate_markdown_table(services)
|
||||
|
||||
# Write output
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
f.write(markdown)
|
||||
|
||||
print(f"📄 Wiki page generated: {output_file}")
|
||||
print(f"📊 Total services: {len(services)}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
161
.gitea/scripts/generate-readme.py
Normal file
161
.gitea/scripts/generate-readme.py
Normal file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate README.md with ArgoCD application status badges.
|
||||
Scans k8s/ directory structure to find all applications and generates badges for them.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
def scan_k8s_directory(k8s_path: str) -> Dict[str, List[str]]:
|
||||
"""
|
||||
Scan k8s/ directory and return applications grouped by category.
|
||||
|
||||
Args:
|
||||
k8s_path: Path to k8s directory
|
||||
|
||||
Returns:
|
||||
Dictionary with categories as keys and lists of app names as values
|
||||
"""
|
||||
apps_by_category = {
|
||||
'core': [],
|
||||
'apps': [],
|
||||
'games': []
|
||||
}
|
||||
|
||||
k8s_dir = Path(k8s_path)
|
||||
|
||||
for category in apps_by_category.keys():
|
||||
category_path = k8s_dir / category
|
||||
if category_path.exists() and category_path.is_dir():
|
||||
# Get all subdirectories (each subdirectory is an app)
|
||||
apps = [
|
||||
d.name for d in category_path.iterdir()
|
||||
if d.is_dir() and not d.name.startswith('.')
|
||||
]
|
||||
apps_by_category[category] = sorted(apps)
|
||||
|
||||
return apps_by_category
|
||||
|
||||
|
||||
def generate_badge_line(app_name: str) -> str:
|
||||
"""
|
||||
Generate markdown line with badge for an application.
|
||||
|
||||
Args:
|
||||
app_name: Name of the application
|
||||
|
||||
Returns:
|
||||
Markdown formatted string with badge
|
||||
"""
|
||||
badge_url = f"https://ag.hexor.cy/api/badge?name={app_name}&revision=true"
|
||||
app_url = f"https://ag.hexor.cy/applications/argocd/{app_name}"
|
||||
return f"| **{app_name}** | []({app_url}) |"
|
||||
|
||||
|
||||
def generate_readme_content(apps_by_category: Dict[str, List[str]]) -> str:
|
||||
"""
|
||||
Generate README.md content with all applications.
|
||||
|
||||
Args:
|
||||
apps_by_category: Dictionary with apps grouped by category
|
||||
|
||||
Returns:
|
||||
Complete README.md content
|
||||
"""
|
||||
lines = [
|
||||
"# homelab",
|
||||
"",
|
||||
"ArgoCD homelab project",
|
||||
"",
|
||||
"## ArgoCD Applications Status",
|
||||
"",
|
||||
"<table>",
|
||||
"<tr>",
|
||||
"<td valign=\"top\" width=\"50%\">",
|
||||
"",
|
||||
"### Core Applications",
|
||||
"",
|
||||
"| Application | Status |",
|
||||
"| :--- | :---: |"
|
||||
]
|
||||
|
||||
# Add core applications
|
||||
for app in apps_by_category.get('core', []):
|
||||
lines.append(generate_badge_line(app))
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"### Games",
|
||||
"",
|
||||
"| Application | Status |",
|
||||
"| :--- | :---: |"
|
||||
])
|
||||
|
||||
# Add games
|
||||
for app in apps_by_category.get('games', []):
|
||||
lines.append(generate_badge_line(app))
|
||||
|
||||
lines.extend([
|
||||
"</td>",
|
||||
"<td valign=\"top\" width=\"50%\">",
|
||||
"",
|
||||
"### Applications",
|
||||
"",
|
||||
"| Application | Status |",
|
||||
"| :--- | :---: |"
|
||||
])
|
||||
|
||||
# Add applications
|
||||
for app in apps_by_category.get('apps', []):
|
||||
lines.append(generate_badge_line(app))
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"</td>",
|
||||
"</tr>",
|
||||
"</table>"
|
||||
])
|
||||
|
||||
return '\n'.join(lines) + '\n'
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: generate-readme.py <k8s-directory> [output-file]")
|
||||
print("Example: generate-readme.py k8s/ README.md")
|
||||
sys.exit(1)
|
||||
|
||||
k8s_path = sys.argv[1]
|
||||
output_file = sys.argv[2] if len(sys.argv) > 2 else "README.md"
|
||||
|
||||
if not os.path.exists(k8s_path):
|
||||
print(f"Error: Directory {k8s_path} does not exist")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"📁 Scanning {k8s_path}...")
|
||||
apps_by_category = scan_k8s_directory(k8s_path)
|
||||
|
||||
# Print statistics
|
||||
total_apps = sum(len(apps) for apps in apps_by_category.values())
|
||||
print(f"✅ Found {total_apps} applications:")
|
||||
for category, apps in apps_by_category.items():
|
||||
if apps:
|
||||
print(f" - {category}: {len(apps)} apps")
|
||||
|
||||
print(f"📝 Generating {output_file}...")
|
||||
readme_content = generate_readme_content(apps_by_category)
|
||||
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
f.write(readme_content)
|
||||
|
||||
print(f"✅ {output_file} generated successfully")
|
||||
print(f" Total lines: {len(readme_content.splitlines())}")
|
||||
print(f" File size: {len(readme_content)} bytes")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
105
.gitea/scripts/process-terraform-output.py
Normal file
105
.gitea/scripts/process-terraform-output.py
Normal file
@@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Process Terraform output to extract applications_for_wiki data
|
||||
Handles various output formats and cleans up invalid JSON
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
def clean_command_prefix(content):
|
||||
"""Remove [command] prefix if present"""
|
||||
if content.startswith('[command]'):
|
||||
lines = content.split('\n', 1)
|
||||
if len(lines) > 1:
|
||||
return lines[1]
|
||||
return content
|
||||
|
||||
def extract_valid_json(content):
|
||||
"""Extract valid JSON from content that might have extra data"""
|
||||
# Find first { and last matching }
|
||||
start = content.find('{')
|
||||
if start < 0:
|
||||
return None
|
||||
|
||||
count = 0
|
||||
end = start
|
||||
for i in range(start, len(content)):
|
||||
if content[i] == '{':
|
||||
count += 1
|
||||
elif content[i] == '}':
|
||||
count -= 1
|
||||
if count == 0:
|
||||
end = i + 1
|
||||
break
|
||||
|
||||
if end > start and count == 0:
|
||||
return content[start:end]
|
||||
return None
|
||||
|
||||
def extract_value(data):
|
||||
"""Extract value from Terraform output format"""
|
||||
if isinstance(data, dict) and 'value' in data:
|
||||
return data['value']
|
||||
return data
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: process-terraform-output.py <input-file> <output-file>")
|
||||
sys.exit(1)
|
||||
|
||||
input_file = sys.argv[1]
|
||||
output_file = sys.argv[2]
|
||||
|
||||
try:
|
||||
# Read input file
|
||||
with open(input_file, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Clean command prefix if present
|
||||
content = clean_command_prefix(content)
|
||||
|
||||
# Try to parse JSON directly
|
||||
try:
|
||||
data = json.loads(content)
|
||||
print("✅ Valid JSON parsed successfully")
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"⚠️ Initial JSON parse failed: {e}")
|
||||
print("🔍 Attempting to extract valid JSON portion...")
|
||||
|
||||
# Try to extract valid JSON
|
||||
valid_json = extract_valid_json(content)
|
||||
if valid_json:
|
||||
try:
|
||||
data = json.loads(valid_json)
|
||||
print("✅ Extracted valid JSON successfully")
|
||||
except json.JSONDecodeError as e2:
|
||||
print(f"❌ Failed to parse extracted JSON: {e2}")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("❌ Could not extract valid JSON from content")
|
||||
sys.exit(1)
|
||||
|
||||
# Extract value if it's wrapped in Terraform output format
|
||||
result = extract_value(data)
|
||||
|
||||
# Write output
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
print(f"✅ Processed output written to {output_file}")
|
||||
|
||||
# Show preview
|
||||
preview = json.dumps(result, indent=2)[:200]
|
||||
print(f"📄 Preview: {preview}...")
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"❌ Input file {input_file} not found")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
147
.gitea/workflows/authentik-apps.yaml
Normal file
147
.gitea/workflows/authentik-apps.yaml
Normal file
@@ -0,0 +1,147 @@
|
||||
name: 'Terraform'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- 'terraform/authentik/**'
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
terraform:
|
||||
name: 'Terraform'
|
||||
runs-on: ubuntu-latest
|
||||
environment: production
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
|
||||
|
||||
- name: Terraform Init
|
||||
env:
|
||||
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
|
||||
run: terraform init
|
||||
working-directory: ./terraform/authentik
|
||||
|
||||
- name: Terraform Format
|
||||
env:
|
||||
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
|
||||
run: terraform fmt -check
|
||||
continue-on-error: true
|
||||
working-directory: ./terraform/authentik
|
||||
|
||||
- name: Terraform Apply
|
||||
env:
|
||||
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
|
||||
run: terraform apply -var-file proxy-apps.tfvars -var-file oauth2-apps.tfvars -var-file terraform.tfvars -var-file groups.tfvars -input=false -auto-approve -parallelism=100
|
||||
working-directory: ./terraform/authentik
|
||||
|
||||
- name: Generate Wiki Content
|
||||
if: success()
|
||||
continue-on-error: true
|
||||
env:
|
||||
TF_VAR_authentik_token: ${{ secrets.AUTHENTIK_TOKEN }}
|
||||
run: |
|
||||
echo "📋 Starting Wiki generation..."
|
||||
cd ./terraform/authentik
|
||||
|
||||
# Get terraform output
|
||||
echo "🔍 Generating Terraform output..."
|
||||
terraform output -json applications_for_wiki > terraform-raw-output.json 2>&1
|
||||
|
||||
# Process output to extract clean JSON
|
||||
echo "📤 Processing Terraform output..."
|
||||
python3 ../../.gitea/scripts/process-terraform-output.py terraform-raw-output.json terraform-output.json
|
||||
|
||||
# Run wiki generation
|
||||
echo "📊 Running wiki generation script..."
|
||||
if python3 ../../.gitea/scripts/generate-apps-wiki.py terraform-output.json; then
|
||||
echo "✅ Wiki content generated successfully"
|
||||
else
|
||||
echo "⚠️ Wiki generation failed, retrying with debug..."
|
||||
python3 ../../.gitea/scripts/generate-apps-wiki.py terraform-output.json --debug || echo "⚠️ Wiki generation failed"
|
||||
fi
|
||||
|
||||
# Check results
|
||||
if [ -f "Applications.md" ]; then
|
||||
echo "✅ Wiki file created: $(wc -l < Applications.md) lines"
|
||||
else
|
||||
echo "⚠️ Wiki content not generated"
|
||||
exit 0
|
||||
fi
|
||||
working-directory: ./
|
||||
|
||||
- name: Upload Wiki to Gitea
|
||||
if: success()
|
||||
continue-on-error: true
|
||||
run: |
|
||||
cd ./terraform/authentik
|
||||
|
||||
# Set variables
|
||||
GITEA_URL="${{ secrets.GT_URL }}"
|
||||
GITEA_TOKEN="${{ secrets.GT_WIKI_TOKEN }}"
|
||||
GITEA_OWNER="${{ secrets.GT_OWNER }}"
|
||||
GITEA_REPO="${{ secrets.GT_REPO }}"
|
||||
|
||||
# Debug variables (without exposing token)
|
||||
echo "🔍 Checking variables..."
|
||||
echo "GITEA_URL: ${GITEA_URL:-NOT SET}"
|
||||
echo "GITEA_OWNER: ${GITEA_OWNER:-NOT SET}"
|
||||
echo "GITEA_REPO: ${GITEA_REPO:-NOT SET}"
|
||||
echo "GITEA_TOKEN: $(if [ -n "$GITEA_TOKEN" ]; then echo "SET"; else echo "NOT SET"; fi)"
|
||||
|
||||
# Check if file exists
|
||||
if [ ! -f "Applications.md" ]; then
|
||||
echo "⚠️ Applications.md not found, skipping wiki update"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "📤 Uploading to Gitea Wiki..."
|
||||
|
||||
# Encode content to base64
|
||||
CONTENT=$(base64 -w 0 Applications.md)
|
||||
|
||||
# Check if wiki page exists
|
||||
WIKI_PAGE_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
"$GITEA_URL/api/v1/repos/$GITEA_OWNER/$GITEA_REPO/wiki/page/Applications" || echo "000")
|
||||
|
||||
if [ "$WIKI_PAGE_EXISTS" = "200" ]; then
|
||||
echo "📝 Updating existing wiki page..."
|
||||
curl -X PATCH \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"title\": \"Applications\",
|
||||
\"content_base64\": \"$CONTENT\",
|
||||
\"message\": \"Update applications list from CI/CD [$(date)]\"
|
||||
}" \
|
||||
"$GITEA_URL/api/v1/repos/$GITEA_OWNER/$GITEA_REPO/wiki/page/Applications" || echo "⚠️ Wiki update failed"
|
||||
else
|
||||
echo "📄 Creating new wiki page..."
|
||||
curl -X POST \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"title\": \"Applications\",
|
||||
\"content_base64\": \"$CONTENT\",
|
||||
\"message\": \"Create applications list from CI/CD [$(date)]\"
|
||||
}" \
|
||||
"$GITEA_URL/api/v1/repos/$GITEA_OWNER/$GITEA_REPO/wiki/new" || echo "⚠️ Wiki creation failed"
|
||||
fi
|
||||
|
||||
echo "✅ Wiki update process completed"
|
||||
working-directory: ./
|
||||
|
||||
111
.gitea/workflows/k8s-wiki.yaml
Normal file
111
.gitea/workflows/k8s-wiki.yaml
Normal file
@@ -0,0 +1,111 @@
|
||||
name: 'Update Kubernetes Services Wiki'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
# paths:
|
||||
# - 'k8s/**'
|
||||
# - '.gitea/scripts/generate-k8s-wiki.py'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update-k8s-wiki:
|
||||
name: 'Generate and Update K8s Wiki'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip install pyyaml
|
||||
|
||||
- name: Generate K8s Services Wiki
|
||||
run: |
|
||||
echo "📋 Starting K8s wiki generation..."
|
||||
python3 .gitea/scripts/generate-k8s-wiki.py k8s/ Kubernetes-Services.md
|
||||
|
||||
if [ -f "Kubernetes-Services.md" ]; then
|
||||
echo "✅ Wiki content generated successfully"
|
||||
echo "📄 File size: $(wc -c < Kubernetes-Services.md) bytes"
|
||||
echo "📄 Lines: $(wc -l < Kubernetes-Services.md)"
|
||||
else
|
||||
echo "❌ Wiki content not generated"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload Wiki to Gitea
|
||||
continue-on-error: true
|
||||
run: |
|
||||
# Set variables
|
||||
GITEA_URL="${{ secrets.GT_URL }}"
|
||||
GITEA_TOKEN="${{ secrets.GT_WIKI_TOKEN }}"
|
||||
GITEA_OWNER="${{ secrets.GT_OWNER }}"
|
||||
GITEA_REPO="${{ secrets.GT_REPO }}"
|
||||
|
||||
# Debug variables (without exposing token)
|
||||
echo "🔍 Checking variables..."
|
||||
echo "GITEA_URL: ${GITEA_URL:-NOT SET}"
|
||||
echo "GITEA_OWNER: ${GITEA_OWNER:-NOT SET}"
|
||||
echo "GITEA_REPO: ${GITEA_REPO:-NOT SET}"
|
||||
echo "GITEA_TOKEN: $(if [ -n "$GITEA_TOKEN" ]; then echo "SET"; else echo "NOT SET"; fi)"
|
||||
|
||||
if [ ! -f "Kubernetes-Services.md" ]; then
|
||||
echo "❌ Kubernetes-Services.md not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📤 Uploading to Gitea Wiki..."
|
||||
|
||||
# Encode content to base64
|
||||
CONTENT=$(base64 -w 0 Kubernetes-Services.md)
|
||||
|
||||
# Check if wiki page exists
|
||||
WIKI_PAGE_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
"$GITEA_URL/api/v1/repos/$GITEA_OWNER/$GITEA_REPO/wiki/page/Kubernetes-Services" || echo "000")
|
||||
|
||||
if [ "$WIKI_PAGE_EXISTS" = "200" ]; then
|
||||
echo "📝 Updating existing wiki page..."
|
||||
curl -X PATCH \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"title\": \"Kubernetes-Services\",
|
||||
\"content_base64\": \"$CONTENT\",
|
||||
\"message\": \"Update K8s services list from CI/CD [$(date)]\"
|
||||
}" \
|
||||
"$GITEA_URL/api/v1/repos/$GITEA_OWNER/$GITEA_REPO/wiki/page/Kubernetes-Services" || echo "⚠️ Wiki update failed"
|
||||
else
|
||||
echo "📄 Creating new wiki page..."
|
||||
curl -X POST \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"title\": \"Kubernetes-Services\",
|
||||
\"content_base64\": \"$CONTENT\",
|
||||
\"message\": \"Create K8s services list from CI/CD [$(date)]\"
|
||||
}" \
|
||||
"$GITEA_URL/api/v1/repos/$GITEA_OWNER/$GITEA_REPO/wiki/new" || echo "⚠️ Wiki creation failed"
|
||||
fi
|
||||
|
||||
echo "✅ Wiki update process completed"
|
||||
echo "🔗 Wiki URL: $GITEA_URL/$GITEA_OWNER/$GITEA_REPO/wiki/Kubernetes-Services"
|
||||
|
||||
- name: Summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "## 📊 K8s Wiki Update Summary" >> $GITHUB_STEP_SUMMARY
|
||||
if [ -f "Kubernetes-Services.md" ]; then
|
||||
echo "- ✅ K8s services analyzed" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ Wiki page generated" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Services found:** $(grep -c '^|' Kubernetes-Services.md || echo 0)" >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "- ❌ Wiki generation failed" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
echo "**Generated at:** $(date)" >> $GITHUB_STEP_SUMMARY
|
||||
@@ -1,9 +1,9 @@
|
||||
name: Check with kubeconform
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
|
||||
paths:
|
||||
- 'k8s/**'
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -16,22 +16,53 @@ jobs:
|
||||
- name: Kubeconform validation
|
||||
id: kubeconform
|
||||
run: |
|
||||
# Create exclusion list - add files that should be skipped from validation
|
||||
EXCLUSIONS=(
|
||||
"./k8s/core/system-upgrade/crd.yaml"
|
||||
# Add more files here as needed
|
||||
# "./path/to/another/file.yaml"
|
||||
)
|
||||
|
||||
# Create a temporary file for storing validation output
|
||||
VALIDATION_OUTPUT=$(mktemp)
|
||||
|
||||
# Run kubeconform and capture output
|
||||
find . -name '*.yaml' \
|
||||
# Function to check if file is in exclusions
|
||||
is_excluded() {
|
||||
local file="$1"
|
||||
for exclusion in "${EXCLUSIONS[@]}"; do
|
||||
if [[ "$file" == "$exclusion" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Find all yaml files and filter out exclusions
|
||||
YAML_FILES=()
|
||||
while IFS= read -r -d '' file; do
|
||||
if ! is_excluded "$file"; then
|
||||
YAML_FILES+=("$file")
|
||||
else
|
||||
echo "⚠️ Skipping excluded file: $file"
|
||||
fi
|
||||
done < <(find . -name '*.yaml' \
|
||||
! -name '*values.yaml' \
|
||||
! -path './.gitea/*' \
|
||||
-print0 \
|
||||
| xargs -0 kubeconform \
|
||||
-summary \
|
||||
-verbose \
|
||||
-output pretty \
|
||||
-ignore-missing-schemas \
|
||||
-schema-location default \
|
||||
-schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' \
|
||||
-schema-location 'https://raw.githubusercontent.com/SchemaStore/schemastore/refs/heads/master/src/schemas/json/kustomization.json' > $VALIDATION_OUTPUT 2>&1 || true
|
||||
-print0)
|
||||
|
||||
# Run kubeconform only if there are files to validate
|
||||
if [ ${#YAML_FILES[@]} -gt 0 ]; then
|
||||
printf '%s\0' "${YAML_FILES[@]}" | xargs -0 kubeconform \
|
||||
-summary \
|
||||
-verbose \
|
||||
-output pretty \
|
||||
-ignore-missing-schemas \
|
||||
-schema-location default \
|
||||
-schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' \
|
||||
-schema-location 'https://raw.githubusercontent.com/SchemaStore/schemastore/refs/heads/master/src/schemas/json/kustomization.json' > $VALIDATION_OUTPUT 2>&1 || true
|
||||
else
|
||||
echo "No files to validate after applying exclusions" > $VALIDATION_OUTPUT
|
||||
fi
|
||||
|
||||
# Display output in logs
|
||||
cat $VALIDATION_OUTPUT
|
||||
@@ -44,7 +75,7 @@ jobs:
|
||||
cat invalid_files.txt
|
||||
exit 1
|
||||
else
|
||||
echo "All manifests are valid!"
|
||||
echo "✅ All manifests are valid!"
|
||||
fi
|
||||
continue-on-error: true
|
||||
|
||||
@@ -69,4 +100,4 @@ jobs:
|
||||
|
||||
Invalid files:
|
||||
${{ env.INVALID_FILES }}
|
||||
<a href="https://gt.hexor.cy/${{ github.repository }}/actions/runs/${{ github.run_number }}">🔗 Check details</a>
|
||||
<a href="https://gt.hexor.cy/${{ github.repository }}/actions/runs/${{ github.run_number }}">🔗 Check details</a>
|
||||
|
||||
164
.gitea/workflows/update-readme.yaml
Normal file
164
.gitea/workflows/update-readme.yaml
Normal file
@@ -0,0 +1,164 @@
|
||||
name: 'Auto-update README'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- 'k8s/**'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
update-readme:
|
||||
name: 'Generate README and Create MR'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config --global user.name "Gitea Actions Bot"
|
||||
git config --global user.email "actions@gitea.local"
|
||||
|
||||
- name: Generate README
|
||||
run: |
|
||||
echo "📋 Starting README generation..."
|
||||
python3 .gitea/scripts/generate-readme.py k8s/ README.md
|
||||
|
||||
if [ -f "README.md" ]; then
|
||||
echo "✅ README generated successfully"
|
||||
echo "📄 File size: $(wc -c < README.md) bytes"
|
||||
echo "📄 Lines: $(wc -l < README.md)"
|
||||
else
|
||||
echo "❌ README not generated"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check for changes
|
||||
id: check_changes
|
||||
run: |
|
||||
if git diff --quiet README.md; then
|
||||
echo "No changes detected in README.md"
|
||||
echo "has_changes=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Changes detected in README.md"
|
||||
echo "has_changes=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Create Pull Request
|
||||
if: steps.check_changes.outputs.has_changes == 'true'
|
||||
run: |
|
||||
# Set variables
|
||||
GITEA_URL="${{ secrets.GT_URL }}"
|
||||
GITEA_TOKEN="${{ secrets.GT_TOKEN }}"
|
||||
GITEA_OWNER="${{ secrets.GT_OWNER }}"
|
||||
GITEA_REPO="${{ secrets.GT_REPO }}"
|
||||
BRANCH_NAME="auto-update-readme-$(date +%Y%m%d-%H%M%S)"
|
||||
|
||||
echo "🔍 Configuration:"
|
||||
echo "GITEA_URL: ${GITEA_URL:-NOT SET}"
|
||||
echo "GITEA_OWNER: ${GITEA_OWNER:-NOT SET}"
|
||||
echo "GITEA_REPO: ${GITEA_REPO:-NOT SET}"
|
||||
echo "BRANCH_NAME: $BRANCH_NAME"
|
||||
|
||||
# Create and push new branch
|
||||
echo "🌿 Creating branch: $BRANCH_NAME"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
git add README.md
|
||||
git commit -m "Auto-update README with current k8s applications" \
|
||||
-m "Generated by CI/CD workflow on $(date +%Y-%m-%d\ %H:%M:%S)" \
|
||||
-m "This PR updates the README.md file with the current list of applications found in the k8s/ directory structure."
|
||||
|
||||
# Push branch to remote
|
||||
echo "📤 Pushing branch to remote..."
|
||||
git push origin "$BRANCH_NAME"
|
||||
|
||||
# Create Pull Request using Gitea API
|
||||
echo "🔀 Creating Pull Request..."
|
||||
|
||||
PR_TITLE="Auto-update README with k8s applications"
|
||||
|
||||
# Create PR body
|
||||
cat > /tmp/pr_body.json <<EOF
|
||||
{
|
||||
"title": "$PR_TITLE",
|
||||
"body": "This PR automatically updates README.md based on the current k8s/ directory structure.\n\n## Changes\n- Updated application list in README.md\n- Applications are now synced with k8s/ folders\n\n## Review\nPlease review and merge if everything looks correct.\n\n---\n🤖 This PR was automatically generated by CI/CD workflow\n⏰ Generated at: $(date '+%Y-%m-%d %H:%M:%S')",
|
||||
"head": "$BRANCH_NAME",
|
||||
"base": "main"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create PR via API
|
||||
echo "Making API request to: $GITEA_URL/api/v1/repos/$GITEA_OWNER/$GITEA_REPO/pulls"
|
||||
echo "Request body:"
|
||||
cat /tmp/pr_body.json
|
||||
|
||||
RESPONSE=$(curl -s -w "\nHTTP_CODE:%{http_code}" -X POST \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @/tmp/pr_body.json \
|
||||
"$GITEA_URL/api/v1/repos/$GITEA_OWNER/$GITEA_REPO/pulls")
|
||||
|
||||
# Extract HTTP code and response body
|
||||
HTTP_CODE=$(echo "$RESPONSE" | grep "HTTP_CODE:" | cut -d':' -f2)
|
||||
RESPONSE_BODY=$(echo "$RESPONSE" | sed '/HTTP_CODE:/d')
|
||||
|
||||
echo "API Response (HTTP $HTTP_CODE):"
|
||||
echo "$RESPONSE_BODY"
|
||||
|
||||
# Extract PR number and URL from response
|
||||
PR_NUMBER=$(echo "$RESPONSE_BODY" | grep -o '"number":[0-9]*' | head -1 | cut -d':' -f2)
|
||||
|
||||
if [ -n "$PR_NUMBER" ] && [ "$HTTP_CODE" = "201" ]; then
|
||||
echo "✅ Pull Request created successfully!"
|
||||
echo "📝 PR #$PR_NUMBER"
|
||||
echo "🔗 URL: $GITEA_URL/$GITEA_OWNER/$GITEA_REPO/pulls/$PR_NUMBER"
|
||||
|
||||
# Save PR info for summary
|
||||
echo "pr_number=$PR_NUMBER" >> $GITHUB_OUTPUT
|
||||
echo "pr_url=$GITEA_URL/$GITEA_OWNER/$GITEA_REPO/pulls/$PR_NUMBER" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "⚠️ Failed to create Pull Request (HTTP $HTTP_CODE)"
|
||||
echo "Response: $RESPONSE_BODY"
|
||||
|
||||
# Check if PR already exists
|
||||
if echo "$RESPONSE_BODY" | grep -q "already exists"; then
|
||||
echo "ℹ️ PR already exists for this branch"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "## 📊 README Update Summary" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
if [ -f "README.md" ]; then
|
||||
echo "- ✅ README generated successfully" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
if [ "${{ steps.check_changes.outputs.has_changes }}" = "true" ]; then
|
||||
echo "- ✅ Changes detected" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ Pull Request created" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
if [ -n "${{ steps.create_pr.outputs.pr_number }}" ]; then
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**PR:** [#${{ steps.create_pr.outputs.pr_number }}](${{ steps.create_pr.outputs.pr_url }})" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
else
|
||||
echo "- ℹ️ No changes detected - README already up to date" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
else
|
||||
echo "- ❌ README generation failed" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Generated at:** $(date)" >> $GITHUB_STEP_SUMMARY
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -10,13 +10,12 @@
|
||||
crash.log
|
||||
crash.*.log
|
||||
|
||||
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
|
||||
# password, private keys, and other secrets. These should not be part of version
|
||||
# control as they are data points which are potentially sensitive and subject
|
||||
# to change depending on the environment.
|
||||
*.tfvars
|
||||
*.tfvars.json
|
||||
!*terraform.tfvars
|
||||
|
||||
# claude ai
|
||||
.claude/
|
||||
# Ignore override files as they are usually used to override resources locally and so
|
||||
# are not checked in
|
||||
override.tf
|
||||
@@ -58,3 +57,4 @@ tags
|
||||
# Persistent undo
|
||||
[._]*.un~
|
||||
|
||||
.DS_Store
|
||||
|
||||
61
README.md
61
README.md
@@ -1,3 +1,62 @@
|
||||
# homelab
|
||||
|
||||
ArgoCD homelab project
|
||||
ArgoCD homelab project
|
||||
|
||||
## ArgoCD Applications Status
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td valign="top" width="50%">
|
||||
|
||||
### Core Applications
|
||||
|
||||
| Application | Status |
|
||||
| :--- | :---: |
|
||||
| **argocd** | [](https://ag.hexor.cy/applications/argocd/argocd) |
|
||||
| **authentik** | [](https://ag.hexor.cy/applications/argocd/authentik) |
|
||||
| **cert-manager** | [](https://ag.hexor.cy/applications/argocd/cert-manager) |
|
||||
| **external-secrets** | [](https://ag.hexor.cy/applications/argocd/external-secrets) |
|
||||
| **kube-system-custom** | [](https://ag.hexor.cy/applications/argocd/kube-system-custom) |
|
||||
| **kubernetes-dashboard** | [](https://ag.hexor.cy/applications/argocd/kubernetes-dashboard) |
|
||||
| **postgresql** | [](https://ag.hexor.cy/applications/argocd/postgresql) |
|
||||
| **prom-stack** | [](https://ag.hexor.cy/applications/argocd/prom-stack) |
|
||||
| **system-upgrade** | [](https://ag.hexor.cy/applications/argocd/system-upgrade) |
|
||||
|
||||
### Games
|
||||
|
||||
| Application | Status |
|
||||
| :--- | :---: |
|
||||
| **beam-ng** | [](https://ag.hexor.cy/applications/argocd/beam-ng) |
|
||||
| **counter-strike-16** | [](https://ag.hexor.cy/applications/argocd/counter-strike-16) |
|
||||
| **minecraft** | [](https://ag.hexor.cy/applications/argocd/minecraft) |
|
||||
</td>
|
||||
<td valign="top" width="50%">
|
||||
|
||||
### Applications
|
||||
|
||||
| Application | Status |
|
||||
| :--- | :---: |
|
||||
| **gitea** | [](https://ag.hexor.cy/applications/argocd/gitea) |
|
||||
| **greece-notifier** | [](https://ag.hexor.cy/applications/argocd/greece-notifier) |
|
||||
| **hexound** | [](https://ag.hexor.cy/applications/argocd/hexound) |
|
||||
| **immich** | [](https://ag.hexor.cy/applications/argocd/immich) |
|
||||
| **iperf3** | [](https://ag.hexor.cy/applications/argocd/iperf3) |
|
||||
| **jellyfin** | [](https://ag.hexor.cy/applications/argocd/jellyfin) |
|
||||
| **k8s-secrets** | [](https://ag.hexor.cy/applications/argocd/k8s-secrets) |
|
||||
| **khm** | [](https://ag.hexor.cy/applications/argocd/khm) |
|
||||
| **ollama** | [](https://ag.hexor.cy/applications/argocd/ollama) |
|
||||
| **paperless** | [](https://ag.hexor.cy/applications/argocd/paperless) |
|
||||
| **pasarguard** | [](https://ag.hexor.cy/applications/argocd/pasarguard) |
|
||||
| **qbittorent-nas** | [](https://ag.hexor.cy/applications/argocd/qbittorent-nas) |
|
||||
| **remnawave** | [](https://ag.hexor.cy/applications/argocd/remnawave) |
|
||||
| **rustdesk** | [](https://ag.hexor.cy/applications/argocd/rustdesk) |
|
||||
| **sonarr-stack** | [](https://ag.hexor.cy/applications/argocd/sonarr-stack) |
|
||||
| **stirling-pdf** | [](https://ag.hexor.cy/applications/argocd/stirling-pdf) |
|
||||
| **syncthing** | [](https://ag.hexor.cy/applications/argocd/syncthing) |
|
||||
| **tg-bots** | [](https://ag.hexor.cy/applications/argocd/tg-bots) |
|
||||
| **vaultwarden** | [](https://ag.hexor.cy/applications/argocd/vaultwarden) |
|
||||
| **vpn** | [](https://ag.hexor.cy/applications/argocd/vpn) |
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
@@ -30,6 +30,27 @@ spec:
|
||||
containers:
|
||||
- name: gitea
|
||||
image: 'gitea/gitea:latest'
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "200m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1500m"
|
||||
env:
|
||||
- name: GITEA__service__REGISTER_MANUAL_CONFIRM
|
||||
value: "true"
|
||||
- name: GITEA__service__ENABLE_CAPTCHA
|
||||
value: "false"
|
||||
- name: GITEA__service__REQUIRE_CAPTCHA_FOR_LOGIN
|
||||
value: "true"
|
||||
- name: GITEA__service__REQUIRE_EXTERNAL_REGISTRATION_CAPTCHA
|
||||
value: "true"
|
||||
- name: GITEA__service__CAPTCHA_TYPE
|
||||
value: "hcaptcha"
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: gitea-recapcha-creds
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 3000
|
||||
@@ -40,6 +61,7 @@ spec:
|
||||
volumeMounts:
|
||||
- name: storage
|
||||
mountPath: /data
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -56,20 +78,65 @@ spec:
|
||||
app: gitea-runner
|
||||
spec:
|
||||
nodeSelector:
|
||||
#kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
kubernetes.io/hostname: nas.homenet
|
||||
kubernetes.io/hostname: home.homenet
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
#path: /var/run/k3s/containerd/containerd.sock
|
||||
path: /var/run/docker.sock
|
||||
type: Socket
|
||||
- name: runner-data
|
||||
emptyDir:
|
||||
sizeLimit: 30Gi
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 3
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- home.homenet
|
||||
- weight: 1
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- master.tail2fe2d.ts.net
|
||||
- weight: 2
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- nas.homenet
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- home.homenet
|
||||
- nas.homenet
|
||||
- master.tail2fe2d.ts.net
|
||||
containers:
|
||||
- name: gitea-runner
|
||||
image: gitea/act_runner:nightly
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "256Mi"
|
||||
ephemeral-storage: "1Gi" # reserve ephemeral storage
|
||||
limits:
|
||||
cpu: "3000m"
|
||||
memory: "4Gi"
|
||||
ephemeral-storage: "28Gi" # hard cap for /data usage
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
- name: runner-data
|
||||
mountPath: /data
|
||||
env:
|
||||
- name: GITEA_INSTANCE_URL
|
||||
value: "https://gt.hexor.cy"
|
||||
@@ -82,4 +149,3 @@ spec:
|
||||
value: "k8s-runner"
|
||||
- name: GITEA_RUNNER_LABELS
|
||||
value: "ubuntu-latest:docker://ghcr.io/catthehacker/ubuntu:act-latest,ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04,ubuntu-20.04:docker://ghcr.io/catthehacker/ubuntu:act-20.04"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: gitea-runner-token
|
||||
@@ -23,3 +23,37 @@ spec:
|
||||
key: e475b5ab-ea3c-48a5-bb4c-a6bc552fc064
|
||||
property: login.password
|
||||
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: gitea-recapcha-creds
|
||||
spec:
|
||||
refreshInterval: 1m
|
||||
target:
|
||||
name: gitea-recapcha-creds
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
GITEA__service__HCAPTCHA_SITEKEY: |-
|
||||
{{ .HCAPTCHA_SITEKEY }}
|
||||
GITEA__service__HCAPTCHA_SECRET: |-
|
||||
{{ .HCAPTCHA_SECRET }}
|
||||
data:
|
||||
- secretKey: HCAPTCHA_SITEKEY
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 89c8d8d2-6b53-42c5-805f-38a341ef163e
|
||||
property: login.username
|
||||
- secretKey: HCAPTCHA_SECRET
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 89c8d8d2-6b53-42c5-805f-38a341ef163e
|
||||
property: login.password
|
||||
21
k8s/apps/greece-notifier/app.yaml
Normal file
21
k8s/apps/greece-notifier/app.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: greece-notifier
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: apps
|
||||
destination:
|
||||
namespace: greece-notifier
|
||||
server: https://kubernetes.default.svc
|
||||
source:
|
||||
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
|
||||
targetRevision: HEAD
|
||||
path: k8s/apps/greece-notifier
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
|
||||
51
k8s/apps/greece-notifier/deployment.yaml
Normal file
51
k8s/apps/greece-notifier/deployment.yaml
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: greece-notifier
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: greece-notifier
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: greece-notifier
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: nas.homenet
|
||||
volumes:
|
||||
- name: data
|
||||
nfs:
|
||||
server: nas.homenet
|
||||
path: /mnt/storage/Storage/k8s/greece-notifier/
|
||||
readOnly: false
|
||||
containers:
|
||||
- name: greece-notifier
|
||||
image: ultradesu/greece-notifier:master
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "256Mi"
|
||||
limits:
|
||||
cpu: "3000m"
|
||||
memory: "1Gi"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "info"
|
||||
- name: ENDPOINT_BID
|
||||
value: "56" # Cyprus id
|
||||
- name: UPDATE_INTERVAL_MIN_SECS
|
||||
value: "270"
|
||||
- name: UPDATE_INTERVAL_MAX_SECS
|
||||
value: "350"
|
||||
- name: TELOXIDE_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: greece-notifier-creds
|
||||
key: TELOXIDE_TOKEN
|
||||
23
k8s/apps/greece-notifier/external-secrets.yaml
Normal file
23
k8s/apps/greece-notifier/external-secrets.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: greece-notifier-creds
|
||||
spec:
|
||||
target:
|
||||
name: greece-notifier-creds
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
TELOXIDE_TOKEN: |-
|
||||
{{ .token }}
|
||||
data:
|
||||
- secretKey: token
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 34e8f207-27ad-4b21-b114-84d3f7460a51
|
||||
property: login.password
|
||||
6
k8s/apps/greece-notifier/kustomization.yaml
Normal file
6
k8s/apps/greece-notifier/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ./external-secrets.yaml
|
||||
- ./deployment.yaml
|
||||
@@ -24,6 +24,13 @@ spec:
|
||||
initContainers:
|
||||
- name: git-cloner
|
||||
image: alpine/git
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "300m"
|
||||
command:
|
||||
- git
|
||||
- clone
|
||||
@@ -36,6 +43,13 @@ spec:
|
||||
containers:
|
||||
- name: hexound
|
||||
image: trafex/php-nginx:3.8.0
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "300m"
|
||||
volumeMounts:
|
||||
- name: hexound-repo
|
||||
mountPath: /var/www/html
|
||||
|
||||
37
k8s/apps/hexound/ingress.yaml
Normal file
37
k8s/apps/hexound/ingress.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: hexound-tls-ingress
|
||||
annotations:
|
||||
ingressClassName: traefik
|
||||
cert-manager.io/cluster-issuer: letsencrypt
|
||||
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
|
||||
acme.cert-manager.io/http01-edit-in-place: "true"
|
||||
spec:
|
||||
rules:
|
||||
- host: hexound.hexor.cy
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: hexound
|
||||
port:
|
||||
number: 80
|
||||
- host: hexound.hexor.ru
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: hexound
|
||||
port:
|
||||
number: 80
|
||||
tls:
|
||||
- secretName: hexound-tls
|
||||
hosts:
|
||||
- hexound.hexor.cy
|
||||
- hexound.hexor.ru
|
||||
@@ -5,4 +5,5 @@ resources:
|
||||
- app.yaml
|
||||
- deployment.yaml
|
||||
- service.yaml
|
||||
- ingress.yaml
|
||||
|
||||
|
||||
@@ -17,6 +17,13 @@ spec:
|
||||
- name: immich-server
|
||||
image: ghcr.io/immich-app/immich-server:release
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "3000m"
|
||||
ports:
|
||||
- containerPort: 2283
|
||||
env:
|
||||
@@ -43,13 +50,20 @@ spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
- weight: 90
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- home.homenet
|
||||
- weight: 10
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- nas.homenet
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
@@ -72,7 +86,7 @@ spec:
|
||||
- name: camera
|
||||
nfs:
|
||||
server: nas.homenet
|
||||
path: /mnt/storage/Storage/Photos/Phone/
|
||||
path: /mnt/storage/Storage/Syncthing-repos/PhoneCamera/
|
||||
readOnly: true
|
||||
- name: localtime
|
||||
hostPath:
|
||||
@@ -127,7 +141,7 @@ spec:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- home.homenet
|
||||
- nas.homenet
|
||||
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 2
|
||||
@@ -140,6 +154,13 @@ spec:
|
||||
- name: immich-ml
|
||||
image: ghcr.io/immich-app/immich-machine-learning:release
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
limits:
|
||||
memory: "8Gi"
|
||||
cpu: "6000m"
|
||||
env:
|
||||
- name: TZ
|
||||
value: Asia/Nicosia
|
||||
@@ -174,6 +195,13 @@ spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:6.2-alpine
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "750m"
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["redis-cli", "ping"]
|
||||
|
||||
21
k8s/apps/iperf3/app.yaml
Normal file
21
k8s/apps/iperf3/app.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: iperf3
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: apps
|
||||
destination:
|
||||
namespace: iperf3
|
||||
server: https://kubernetes.default.svc
|
||||
source:
|
||||
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
|
||||
targetRevision: HEAD
|
||||
path: k8s/apps/iperf3
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
|
||||
92
k8s/apps/iperf3/daemonset.yaml
Normal file
92
k8s/apps/iperf3/daemonset.yaml
Normal file
@@ -0,0 +1,92 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: iperf3-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: iperf3-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: iperf3-server
|
||||
spec:
|
||||
serviceAccountName: iperf3-server
|
||||
subdomain: iperf3
|
||||
initContainers:
|
||||
- name: create-service
|
||||
image: bitnami/kubectl:latest
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
# Clean node name for service name
|
||||
NODE_CLEAN=$(echo "$NODE_NAME" | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]' | tr '_' '-')
|
||||
SERVICE_NAME="iperf3-${NODE_CLEAN}"
|
||||
|
||||
# Create service for this pod
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: iperf3
|
||||
labels:
|
||||
app: iperf3-node-service
|
||||
target-node: "${NODE_NAME}"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: iperf3
|
||||
port: 5201
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: iperf3
|
||||
labels:
|
||||
app: iperf3-node-service
|
||||
target-node: "${NODE_NAME}"
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: ${POD_IP}
|
||||
ports:
|
||||
- name: iperf3
|
||||
port: 5201
|
||||
protocol: TCP
|
||||
EOF
|
||||
containers:
|
||||
- name: iperf3-server
|
||||
image: networkstatic/iperf3:latest
|
||||
args: ["-s"]
|
||||
ports:
|
||||
- containerPort: 5201
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "750m"
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
92
k8s/apps/iperf3/iperf3-exporter-daemonset.yaml
Normal file
92
k8s/apps/iperf3/iperf3-exporter-daemonset.yaml
Normal file
@@ -0,0 +1,92 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: iperf3-exporter
|
||||
labels:
|
||||
app: iperf3-exporter
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: iperf3-exporter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: iperf3-exporter
|
||||
spec:
|
||||
serviceAccountName: iperf3-server
|
||||
initContainers:
|
||||
- name: create-exporter-service
|
||||
image: bitnami/kubectl:latest
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
NODE_CLEAN=$(echo "$NODE_NAME" | cut -d'.' -f1 | tr '[:upper:]' '[:lower:]' | tr '_' '-')
|
||||
SERVICE_NAME="iperf3-exporter-${NODE_CLEAN}"
|
||||
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: iperf3
|
||||
labels:
|
||||
app: iperf3-exporter-service
|
||||
target-node: "${NODE_NAME}"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9579
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: iperf3
|
||||
labels:
|
||||
app: iperf3-exporter-service
|
||||
target-node: "${NODE_NAME}"
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: ${POD_IP}
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9579
|
||||
protocol: TCP
|
||||
EOF
|
||||
containers:
|
||||
- name: iperf3-exporter
|
||||
image: ghcr.io/edgard/iperf3_exporter:1.2.2
|
||||
ports:
|
||||
- containerPort: 9579
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "300m"
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
15
k8s/apps/iperf3/iperf3-exporter-service.yaml
Normal file
15
k8s/apps/iperf3/iperf3-exporter-service.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: iperf3-exporter
|
||||
labels:
|
||||
app: iperf3-exporter
|
||||
spec:
|
||||
selector:
|
||||
app: iperf3-exporter
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 9579
|
||||
targetPort: 9579
|
||||
11
k8s/apps/iperf3/kustomization.yaml
Normal file
11
k8s/apps/iperf3/kustomization.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- rbac.yaml
|
||||
- daemonset.yaml
|
||||
- service-headless.yaml
|
||||
- iperf3-exporter-daemonset.yaml
|
||||
- iperf3-exporter-service.yaml
|
||||
- servicemonitor.yaml
|
||||
|
||||
36
k8s/apps/iperf3/rbac.yaml
Normal file
36
k8s/apps/iperf3/rbac.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: iperf3-server
|
||||
namespace: iperf3
|
||||
labels:
|
||||
app: iperf3-server
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: iperf3-service-manager
|
||||
namespace: iperf3
|
||||
labels:
|
||||
app: iperf3-server
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "endpoints"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: iperf3-service-manager
|
||||
namespace: iperf3
|
||||
labels:
|
||||
app: iperf3-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: iperf3-service-manager
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: iperf3-server
|
||||
namespace: iperf3
|
||||
14
k8s/apps/iperf3/service-headless.yaml
Normal file
14
k8s/apps/iperf3/service-headless.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: iperf3
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: iperf3-server
|
||||
ports:
|
||||
- name: iperf3
|
||||
protocol: TCP
|
||||
port: 5201
|
||||
targetPort: 5201
|
||||
122
k8s/apps/iperf3/servicemonitor.yaml
Normal file
122
k8s/apps/iperf3/servicemonitor.yaml
Normal file
@@ -0,0 +1,122 @@
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: iperf3-exporter
|
||||
labels:
|
||||
app: iperf3-exporter
|
||||
release: prometheus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: iperf3-exporter
|
||||
endpoints:
|
||||
- port: metrics
|
||||
path: /probe
|
||||
interval: 5m
|
||||
scrapeTimeout: 30s
|
||||
params:
|
||||
target: ['iperf3-ch.iperf3.svc.cluster.local:5201']
|
||||
period: ['10s']
|
||||
streams: ['4']
|
||||
relabelings:
|
||||
- sourceLabels: [__param_target]
|
||||
targetLabel: instance
|
||||
- targetLabel: __address__
|
||||
replacement: iperf3-exporter-ch.iperf3.svc:9579
|
||||
- port: metrics
|
||||
path: /probe
|
||||
interval: 5m
|
||||
scrapeTimeout: 30s
|
||||
params:
|
||||
target: ['iperf3-us.iperf3.svc.cluster.local:5201']
|
||||
period: ['10s']
|
||||
streams: ['4']
|
||||
relabelings:
|
||||
- sourceLabels: [__param_target]
|
||||
targetLabel: instance
|
||||
- targetLabel: __address__
|
||||
replacement: iperf3-exporter-us.iperf3.svc:9579
|
||||
- port: metrics
|
||||
path: /probe
|
||||
interval: 5m
|
||||
scrapeTimeout: 30s
|
||||
params:
|
||||
target: ['iperf3-iris.iperf3.svc.cluster.local:5201']
|
||||
period: ['10s']
|
||||
streams: ['4']
|
||||
relabelings:
|
||||
- sourceLabels: [__param_target]
|
||||
targetLabel: instance
|
||||
- targetLabel: __address__
|
||||
replacement: iperf3-exporter-iris.iperf3.svc:9579
|
||||
- port: metrics
|
||||
path: /probe
|
||||
interval: 5m
|
||||
scrapeTimeout: 30s
|
||||
params:
|
||||
target: ['iperf3-home.iperf3.svc.cluster.local:5201']
|
||||
period: ['10s']
|
||||
streams: ['4']
|
||||
relabelings:
|
||||
- sourceLabels: [__param_target]
|
||||
targetLabel: instance
|
||||
- targetLabel: __address__
|
||||
replacement: iperf3-exporter-home.iperf3.svc:9579
|
||||
- port: metrics
|
||||
path: /probe
|
||||
interval: 5m
|
||||
scrapeTimeout: 30s
|
||||
params:
|
||||
target: ['iperf3-master.iperf3.svc.cluster.local:5201']
|
||||
period: ['10s']
|
||||
streams: ['4']
|
||||
relabelings:
|
||||
- sourceLabels: [__param_target]
|
||||
targetLabel: instance
|
||||
- targetLabel: __address__
|
||||
replacement: iperf3-exporter-master.iperf3.svc:9579
|
||||
- port: metrics
|
||||
path: /probe
|
||||
interval: 5m
|
||||
scrapeTimeout: 30s
|
||||
params:
|
||||
target: ['iperf3-it.iperf3.svc.cluster.local:5201']
|
||||
period: ['10s']
|
||||
streams: ['4']
|
||||
relabelings:
|
||||
- sourceLabels: [__param_target]
|
||||
targetLabel: instance
|
||||
- targetLabel: __address__
|
||||
replacement: iperf3-exporter-it.iperf3.svc:9579
|
||||
- port: metrics
|
||||
path: /probe
|
||||
interval: 5m
|
||||
scrapeTimeout: 30s
|
||||
params:
|
||||
target: ['iperf3-nas.iperf3.svc.cluster.local:5201']
|
||||
period: ['10s']
|
||||
streams: ['4']
|
||||
relabelings:
|
||||
- sourceLabels: [__param_target]
|
||||
targetLabel: instance
|
||||
- targetLabel: __address__
|
||||
replacement: iperf3-exporter-nas.iperf3.svc:9579
|
||||
- port: metrics
|
||||
path: /probe
|
||||
interval: 5m
|
||||
scrapeTimeout: 30s
|
||||
params:
|
||||
target: ['iperf3-spb.iperf3.svc.cluster.local:5201']
|
||||
period: ['10s']
|
||||
streams: ['4']
|
||||
relabelings:
|
||||
- sourceLabels: [__param_target]
|
||||
targetLabel: instance
|
||||
- targetLabel: __address__
|
||||
replacement: iperf3-exporter-spb.iperf3.svc:9579
|
||||
metricRelabelings:
|
||||
- sourceLabels: [__name__]
|
||||
regex: iperf3_(.+)
|
||||
targetLabel: __name__
|
||||
replacement: network_${1}
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: vpn-creds
|
||||
@@ -76,11 +76,14 @@ spec:
|
||||
secretKeyRef:
|
||||
name: vpn-creds
|
||||
key: ss_link
|
||||
command: ["/bin/bash", "-c", "rm /etc/shadowsocks-rust/config.json && sslocal --online-config-url $SS_LINK --local-addr 127.0.0.1:8081 -U --protocol http"]
|
||||
command: ["/bin/bash", "-c", "rm /etc/shadowsocks-rust/config.json && sslocal --server-url $SS_LINK --local-addr 127.0.0.1:8081 -U --protocol http"]
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "200m"
|
||||
cpu: "300m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "300m"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
image:
|
||||
tag: 10.10.7
|
||||
tag: 10.11.4
|
||||
resources:
|
||||
requests:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
limits:
|
||||
memory: "8Gi"
|
||||
cpu: "6000m"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
persistence:
|
||||
@@ -29,8 +36,40 @@ ingress:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
- host: us.hexor.cy
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
- host: ch.hexor.cy
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
- host: jp.hexor.cy
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
- host: spb.hexor.cy
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
- host: cy.hexor.cy
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
- host: am.hexor.cy
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
- host: de.hexor.cy
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
- host: it.hexor.cy
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls:
|
||||
- secretName: jellyfin-tls
|
||||
hosts:
|
||||
- 'jf.hexor.cy'
|
||||
- '*.hexor.cy'
|
||||
|
||||
|
||||
21
k8s/apps/k8s-secrets/app.yaml
Normal file
21
k8s/apps/k8s-secrets/app.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: k8s-secrets
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: apps
|
||||
destination:
|
||||
namespace: k8s-secret
|
||||
server: https://kubernetes.default.svc
|
||||
source:
|
||||
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
|
||||
targetRevision: HEAD
|
||||
path: k8s/apps/k8s-secrets
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
|
||||
63
k8s/apps/k8s-secrets/deployment.yaml
Normal file
63
k8s/apps/k8s-secrets/deployment.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: secret-reader
|
||||
labels:
|
||||
app: secret-reader
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: secret-reader
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: secret-reader
|
||||
spec:
|
||||
serviceAccountName: secret-reader
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
containers:
|
||||
- name: secret-reader
|
||||
image: ultradesu/k8s-secrets:0.1.1
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--secrets"
|
||||
- "openai-creds"
|
||||
- "--namespace"
|
||||
- "k8s-secret"
|
||||
- "--port"
|
||||
- "3000"
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
name: http
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "info"
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "150m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: http
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: http
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
44
k8s/apps/k8s-secrets/external-secret.yaml
Normal file
44
k8s/apps/k8s-secrets/external-secret.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: openai-creds
|
||||
spec:
|
||||
target:
|
||||
name: openai-creds
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
USER: |-
|
||||
{{ .user }}
|
||||
PASS: |-
|
||||
{{ .pass }}
|
||||
TOTP: |-
|
||||
{{ .totp }}
|
||||
data:
|
||||
- secretKey: user
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: a485f323-fd47-40ee-a5cf-40891b1f963c
|
||||
property: login.username
|
||||
- secretKey: pass
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: a485f323-fd47-40ee-a5cf-40891b1f963c
|
||||
property: login.password
|
||||
- secretKey: totp
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: a485f323-fd47-40ee-a5cf-40891b1f963c
|
||||
property: login.totp
|
||||
|
||||
20
k8s/apps/k8s-secrets/rbac.yaml
Normal file
20
k8s/apps/k8s-secrets/rbac.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: secret-reader
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: secret-reader
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: secret-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: secret-reader
|
||||
6
k8s/apps/k8s-secrets/service-account.yaml
Normal file
6
k8s/apps/k8s-secrets/service-account.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: secret-reader
|
||||
labels:
|
||||
app: secret-reader
|
||||
15
k8s/apps/k8s-secrets/service.yaml
Normal file
15
k8s/apps/k8s-secrets/service.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: secret-reader
|
||||
labels:
|
||||
app: secret-reader
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: secret-reader
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 3000
|
||||
protocol: TCP
|
||||
name: http
|
||||
@@ -23,6 +23,13 @@ spec:
|
||||
- name: khm
|
||||
image: 'ultradesu/khm:latest'
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "750m"
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: khm-pg-creds
|
||||
|
||||
21
k8s/apps/ollama/app.yaml
Normal file
21
k8s/apps/ollama/app.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: ollama
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: apps
|
||||
destination:
|
||||
namespace: ollama
|
||||
server: https://kubernetes.default.svc
|
||||
source:
|
||||
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
|
||||
targetRevision: HEAD
|
||||
path: k8s/apps/ollama
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
|
||||
33
k8s/apps/ollama/external-secrets.yaml
Normal file
33
k8s/apps/ollama/external-secrets.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: oidc-secret
|
||||
spec:
|
||||
target:
|
||||
name: oidc-secret
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
OAUTH_CLIENT_SECRET: |-
|
||||
{{ .OAUTH_CLIENT_SECRET }}
|
||||
OAUTH_CLIENT_ID: |-
|
||||
{{ .OAUTH_CLIENT_ID }}
|
||||
data:
|
||||
- secretKey: OAUTH_CLIENT_SECRET
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
|
||||
property: fields[0].value
|
||||
- secretKey: OAUTH_CLIENT_ID
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 97959a8b-e3b2-4b34-bc54-ddb6476a12ea
|
||||
property: fields[1].value
|
||||
21
k8s/apps/ollama/kustomization.yaml
Normal file
21
k8s/apps/ollama/kustomization.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- external-secrets.yaml
|
||||
|
||||
helmCharts:
|
||||
- name: ollama
|
||||
repo: https://otwld.github.io/ollama-helm/
|
||||
version: 0.4.0
|
||||
releaseName: ollama
|
||||
namespace: ollama
|
||||
valuesFile: ollama-values.yaml
|
||||
includeCRDs: true
|
||||
- name: open-webui
|
||||
repo: https://helm.openwebui.com/
|
||||
version: 8.14.0
|
||||
releaseName: openweb-ui
|
||||
namespace: ollama
|
||||
valuesFile: openweb-ui-values.yaml
|
||||
includeCRDs: true
|
||||
8
k8s/apps/ollama/ollama-values.yaml
Normal file
8
k8s/apps/ollama/ollama-values.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
image:
|
||||
repository: ollama/ollama
|
||||
pullPolicy: Always
|
||||
tag: "latest"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
ingress:
|
||||
enabled: false
|
||||
52
k8s/apps/ollama/openweb-ui-values.yaml
Normal file
52
k8s/apps/ollama/openweb-ui-values.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
clusterDomain: ai.hexor.cy
|
||||
|
||||
extraEnvVars:
|
||||
GLOBAL_LOG_LEVEL: debug
|
||||
OAUTH_PROVIDER_NAME: authentik
|
||||
OPENID_PROVIDER_URL: https://idm.hexor.cy/application/o/openwebui/.well-known/openid-configuration
|
||||
OPENID_REDIRECT_URI: https://ai.hexor.cy/oauth/oidc/callback
|
||||
WEBUI_URL: https://ai.hexor.cy
|
||||
# Allows auto-creation of new users using OAuth. Must be paired with ENABLE_LOGIN_FORM=false.
|
||||
ENABLE_OAUTH_SIGNUP: true
|
||||
# Disables user/password login form. Required when ENABLE_OAUTH_SIGNUP=true.
|
||||
ENABLE_LOGIN_FORM: false
|
||||
OAUTH_MERGE_ACCOUNTS_BY_EMAIL: true
|
||||
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: oidc-secret
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
ollamaUrls:
|
||||
- http://ollama.ollama.svc:11434
|
||||
ollama:
|
||||
enabled: false
|
||||
ollama:
|
||||
gpu:
|
||||
enabled: false
|
||||
models:
|
||||
pull:
|
||||
- qwen3-vl:8b
|
||||
run:
|
||||
- qwen3-vl:8b
|
||||
|
||||
pipelines:
|
||||
enabled: true
|
||||
|
||||
tika:
|
||||
enabled: true
|
||||
|
||||
websocket:
|
||||
enabled: true
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
class: traefik
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt
|
||||
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
|
||||
host: "ai.hexor.cy"
|
||||
tls:
|
||||
- hosts:
|
||||
- '*.hexor.cy'
|
||||
secretName: ollama-tls
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: postgres-creds
|
||||
name: postgres-and-oauth-creds
|
||||
spec:
|
||||
target:
|
||||
name: postgres-creds
|
||||
|
||||
@@ -8,12 +8,12 @@ nodeSelector:
|
||||
kubernetes.io/hostname: nas.homenet
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
memory: "512Mi"
|
||||
cpu: "200m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1500m"
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
|
||||
@@ -1,8 +1,22 @@
|
||||
image:
|
||||
tag: 2.15.3
|
||||
tag: 2.19.3
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "3000m"
|
||||
initContainers:
|
||||
install-tesseract-langs:
|
||||
image: ghcr.io/paperless-ngx/paperless-ngx:2.15.1
|
||||
image: ghcr.io/paperless-ngx/paperless-ngx:2.18.2
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "750m"
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- apt-get update && apt-get install -y --reinstall tesseract-ocr-rus tesseract-ocr-jpn tesseract-ocr-chi-sim tesseract-ocr-eng tesseract-ocr-ell && cp -v -r /usr/share/tesseract-ocr/5/tessdata/* /custom-tessdata/
|
||||
|
||||
@@ -8,12 +8,12 @@ nodeSelector:
|
||||
kubernetes.io/hostname: nas.homenet
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "750m"
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
|
||||
21
k8s/apps/pasarguard/app.yaml
Normal file
21
k8s/apps/pasarguard/app.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: pasarguard
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: apps
|
||||
destination:
|
||||
namespace: pasarguard
|
||||
server: https://kubernetes.default.svc
|
||||
source:
|
||||
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
|
||||
targetRevision: HEAD
|
||||
path: k8s/apps/pasarguard
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
|
||||
14
k8s/apps/pasarguard/certificate.yaml
Normal file
14
k8s/apps/pasarguard/certificate.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: pasarguard-tls
|
||||
labels:
|
||||
app: pasarguard
|
||||
spec:
|
||||
secretName: pasarguard-tls
|
||||
issuerRef:
|
||||
name: letsencrypt
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- ps.hexor.cy
|
||||
212
k8s/apps/pasarguard/configmap-scripts-ingress.yaml
Normal file
212
k8s/apps/pasarguard/configmap-scripts-ingress.yaml
Normal file
@@ -0,0 +1,212 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: pasarguard-scripts-ingress
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
data:
|
||||
init-uuid-ingress.sh: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
echo "Started"
|
||||
# NODE_NAME is already set via environment variable
|
||||
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
||||
|
||||
# Get DNS name from node label xray-public-address
|
||||
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-public-address}')
|
||||
|
||||
if [ -z "${DNS_NAME}" ]; then
|
||||
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-public-address'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Node: ${NODE_NAME}"
|
||||
echo "DNS Name from label: ${DNS_NAME}"
|
||||
|
||||
# Use DNS name for ConfigMap name to ensure uniqueness
|
||||
CONFIGMAP_NAME="node-uuid-ingress-${DNS_NAME//./-}"
|
||||
|
||||
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
|
||||
|
||||
# Check if ConfigMap exists and get UUID
|
||||
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "ConfigMap exists, reading UUID..."
|
||||
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
|
||||
|
||||
if [ -z "${API_KEY}" ]; then
|
||||
echo "UUID not found in ConfigMap, generating new one..."
|
||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
|
||||
else
|
||||
echo "Using existing UUID from ConfigMap"
|
||||
fi
|
||||
else
|
||||
echo "ConfigMap does not exist, creating new one..."
|
||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
|
||||
--from-literal=API_KEY="${API_KEY}" \
|
||||
--from-literal=NODE_NAME="${NODE_NAME}"
|
||||
fi
|
||||
|
||||
# Save UUID and node info to shared volume for the main container
|
||||
echo -n "${API_KEY}" > /shared/api-key
|
||||
echo -n "${NODE_NAME}" > /shared/node-name
|
||||
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
|
||||
echo "UUID initialized: ${API_KEY}"
|
||||
echo "Node name: ${NODE_NAME}"
|
||||
echo "ConfigMap: ${CONFIGMAP_NAME}"
|
||||
|
||||
# Create Certificate for this node using DNS name from label
|
||||
CERT_NAME="pasarguard-node-ingress-${DNS_NAME//./-}"
|
||||
|
||||
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
|
||||
|
||||
# Check if Certificate already exists
|
||||
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "Certificate does not exist, creating..."
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ${CERT_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
secretName: ${CERT_NAME}-tls
|
||||
issuerRef:
|
||||
name: letsencrypt
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- ${DNS_NAME}
|
||||
EOF
|
||||
else
|
||||
echo "Certificate already exists"
|
||||
fi
|
||||
|
||||
# Wait for certificate to be ready
|
||||
|
||||
echo "Waiting for certificate to be ready..."
|
||||
for i in {1..600}; do
|
||||
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "Certificate secret is ready!"
|
||||
break
|
||||
fi
|
||||
echo "Waiting for certificate... ($i/600)"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "WARNING: Certificate secret not ready after 600 seconds"
|
||||
else
|
||||
# Extract certificate and key from secret to shared volume
|
||||
echo "Extracting certificate and key..."
|
||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
|
||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
|
||||
echo "Certificate and key extracted successfully."
|
||||
cat /shared/tls.crt
|
||||
fi
|
||||
|
||||
# Create ClusterIP Service for this node (pod selector based)
|
||||
NODE_SHORT_NAME="${NODE_NAME%%.*}"
|
||||
SERVICE_NAME="${NODE_SHORT_NAME}-ingress"
|
||||
|
||||
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME})"
|
||||
|
||||
# Create Service with pod selector including node name
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
node: ${NODE_NAME}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: pasarguard-node-ingress
|
||||
node-name: ${NODE_SHORT_NAME}
|
||||
ports:
|
||||
- name: proxy
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
- name: api
|
||||
port: 62050
|
||||
protocol: TCP
|
||||
targetPort: 62050
|
||||
EOF
|
||||
|
||||
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local"
|
||||
|
||||
# Create IngressRouteTCP for this DNS name with TLS passthrough
|
||||
INGRESS_NAME="pasarguard-tcp-${DNS_NAME//./-}"
|
||||
|
||||
echo "Creating IngressRouteTCP: ${INGRESS_NAME} for ${DNS_NAME}"
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: ${INGRESS_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
node: ${NODE_NAME}
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: HostSNI(\`${DNS_NAME}\`)
|
||||
services:
|
||||
- name: ${SERVICE_NAME}
|
||||
port: 443
|
||||
tls:
|
||||
passthrough: true
|
||||
EOF
|
||||
|
||||
echo "IngressRouteTCP created: ${INGRESS_NAME}"
|
||||
echo "Traffic to ${DNS_NAME}:443 will be routed to ${SERVICE_NAME}:443"
|
||||
|
||||
# Create second IngressRouteTCP for API port 62051
|
||||
INGRESS_API_NAME="pasarguard-api-${DNS_NAME//./-}"
|
||||
|
||||
echo "Creating IngressRouteTCP for API: ${INGRESS_API_NAME} for ${DNS_NAME}:62051"
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: ${INGRESS_API_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
node: ${NODE_NAME}
|
||||
spec:
|
||||
entryPoints:
|
||||
- pasarguard-api
|
||||
routes:
|
||||
- match: HostSNI(\`${DNS_NAME}\`)
|
||||
services:
|
||||
- name: ${SERVICE_NAME}
|
||||
port: 62050
|
||||
tls:
|
||||
passthrough: true
|
||||
EOF
|
||||
|
||||
echo "IngressRouteTCP API created: ${INGRESS_API_NAME}"
|
||||
echo "Traffic to ${DNS_NAME}:62051 will be routed to ${SERVICE_NAME}:62050"
|
||||
|
||||
pasarguard-start.sh: |
|
||||
#!/bin/sh
|
||||
# Read API_KEY from shared volume created by init container
|
||||
if [ -f /shared/api-key ]; then
|
||||
export API_KEY=$(cat /shared/api-key)
|
||||
echo "Loaded API_KEY from shared volume"
|
||||
else
|
||||
echo "WARNING: API_KEY file not found, using default"
|
||||
fi
|
||||
|
||||
cd /app
|
||||
exec ./main
|
||||
264
k8s/apps/pasarguard/configmap-scripts.yaml
Normal file
264
k8s/apps/pasarguard/configmap-scripts.yaml
Normal file
@@ -0,0 +1,264 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: pasarguard-scripts
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
data:
|
||||
init-uuid.sh: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
echo "Started"
|
||||
# NODE_NAME is already set via environment variable
|
||||
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
||||
|
||||
# Get DNS name from node label xray-node-address
|
||||
DNS_NAME=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.metadata.labels.xray-node-address}')
|
||||
|
||||
if [ -z "${DNS_NAME}" ]; then
|
||||
echo "ERROR: Node ${NODE_NAME} does not have label 'xray-node-address'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Node: ${NODE_NAME}"
|
||||
echo "DNS Name from label: ${DNS_NAME}"
|
||||
|
||||
# Use DNS name for ConfigMap name to ensure uniqueness
|
||||
CONFIGMAP_NAME="node-uuid-${DNS_NAME//./-}"
|
||||
|
||||
echo "Checking ConfigMap: ${CONFIGMAP_NAME}"
|
||||
|
||||
# Check if ConfigMap exists and get UUID
|
||||
if kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "ConfigMap exists, reading UUID..."
|
||||
API_KEY=$(kubectl get configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" -o jsonpath='{.data.API_KEY}')
|
||||
|
||||
if [ -z "${API_KEY}" ]; then
|
||||
echo "UUID not found in ConfigMap, generating new one..."
|
||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||
kubectl patch configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" --type merge -p "{\"data\":{\"API_KEY\":\"${API_KEY}\"}}"
|
||||
else
|
||||
echo "Using existing UUID from ConfigMap"
|
||||
fi
|
||||
else
|
||||
echo "ConfigMap does not exist, creating new one..."
|
||||
API_KEY=$(cat /proc/sys/kernel/random/uuid)
|
||||
kubectl create configmap "${CONFIGMAP_NAME}" -n "${NAMESPACE}" \
|
||||
--from-literal=API_KEY="${API_KEY}" \
|
||||
--from-literal=NODE_NAME="${NODE_NAME}"
|
||||
fi
|
||||
|
||||
# Save UUID and node info to shared volume for the main container
|
||||
echo -n "${API_KEY}" > /shared/api-key
|
||||
echo -n "${NODE_NAME}" > /shared/node-name
|
||||
echo -n "${CONFIGMAP_NAME}" > /shared/configmap-name
|
||||
echo "UUID initialized: ${API_KEY}"
|
||||
echo "Node name: ${NODE_NAME}"
|
||||
echo "ConfigMap: ${CONFIGMAP_NAME}"
|
||||
|
||||
# Create Certificate for this node using DNS name from label
|
||||
CERT_NAME="pasarguard-node-${DNS_NAME//./-}"
|
||||
|
||||
echo "Creating Certificate: ${CERT_NAME} for ${DNS_NAME}"
|
||||
|
||||
# Check if Certificate already exists
|
||||
if ! kubectl get certificate "${CERT_NAME}" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "Certificate does not exist, creating..."
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ${CERT_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
secretName: ${CERT_NAME}-tls
|
||||
issuerRef:
|
||||
name: letsencrypt
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- ${DNS_NAME}
|
||||
EOF
|
||||
else
|
||||
echo "Certificate already exists"
|
||||
fi
|
||||
|
||||
# Wait for certificate to be ready
|
||||
|
||||
echo "Waiting for certificate to be ready..."
|
||||
for i in {1..600}; do
|
||||
if kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "Certificate secret is ready!"
|
||||
break
|
||||
fi
|
||||
echo "Waiting for certificate... ($i/600)"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if ! kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" &>/dev/null; then
|
||||
echo "WARNING: Certificate secret not ready after 600 seconds"
|
||||
else
|
||||
# Extract certificate and key from secret to shared volume
|
||||
echo "Extracting certificate and key..."
|
||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.crt}' | base64 -d > /shared/tls.crt
|
||||
kubectl get secret "${CERT_NAME}-tls" -n "${NAMESPACE}" -o jsonpath='{.data.tls\.key}' | base64 -d > /shared/tls.key
|
||||
echo "Certificate and key extracted successfully."
|
||||
cat /shared/tls.crt
|
||||
fi
|
||||
|
||||
# Create individual Service and Endpoints for this node
|
||||
# Take only first part of node name before first dot
|
||||
NODE_SHORT_NAME="${NODE_NAME%%.*}"
|
||||
SERVICE_NAME="${NODE_SHORT_NAME}"
|
||||
|
||||
# Get node internal IP (take only first IP if multiple)
|
||||
NODE_IP=$(kubectl get node "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
|
||||
|
||||
echo "Creating Service: ${SERVICE_NAME} for node ${NODE_NAME} (short: ${NODE_SHORT_NAME}) with IP ${NODE_IP}"
|
||||
|
||||
# Create Service without selector
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
node: ${NODE_NAME}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: api
|
||||
port: 62050
|
||||
protocol: TCP
|
||||
targetPort: 62050
|
||||
- name: metrics
|
||||
port: 9550
|
||||
protocol: TCP
|
||||
targetPort: 9550
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: ${SERVICE_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
node: ${NODE_NAME}
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: ${NODE_IP}
|
||||
nodeName: ${NODE_NAME}
|
||||
ports:
|
||||
- name: api
|
||||
port: 62050
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9550
|
||||
protocol: TCP
|
||||
EOF
|
||||
|
||||
echo "Service created: ${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local -> ${NODE_IP}:62050"
|
||||
|
||||
exporter-start.sh: |
|
||||
#!/bin/sh
|
||||
# Install required tools
|
||||
apk add --no-cache wget curl iproute2-ss bash
|
||||
|
||||
# Download v2ray-exporter
|
||||
echo "Downloading v2ray-exporter..."
|
||||
ARCH=$(uname -m)
|
||||
case $ARCH in
|
||||
x86_64)
|
||||
BINARY_ARCH="amd64"
|
||||
;;
|
||||
aarch64|arm64)
|
||||
BINARY_ARCH="arm64"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported architecture: $ARCH"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Detected architecture: $ARCH, using binary: v2ray-exporter_linux_$BINARY_ARCH"
|
||||
wget -L -O /tmp/v2ray-exporter "https://github.com/wi1dcard/v2ray-exporter/releases/download/v0.6.0/v2ray-exporter_linux_$BINARY_ARCH"
|
||||
mv /tmp/v2ray-exporter /usr/local/bin/v2ray-exporter
|
||||
chmod +x /usr/local/bin/v2ray-exporter
|
||||
|
||||
# Wait for initial API port file
|
||||
echo "Waiting for initial xray API port file..."
|
||||
while [ ! -f /shared/xray-api-port ]; do
|
||||
echo "Waiting for API port file..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Main loop - restart exporter if it crashes or port changes
|
||||
while true; do
|
||||
if [ -f /shared/xray-api-port ]; then
|
||||
API_PORT=$(cat /shared/xray-api-port)
|
||||
if [ -n "$API_PORT" ]; then
|
||||
echo "Starting v2ray-exporter with endpoint 127.0.0.1:$API_PORT"
|
||||
/usr/local/bin/v2ray-exporter --v2ray-endpoint "127.0.0.1:$API_PORT" --listen ":9550" &
|
||||
EXPORTER_PID=$!
|
||||
|
||||
# Wait for exporter to exit or port file to change
|
||||
while kill -0 $EXPORTER_PID 2>/dev/null; do
|
||||
if [ -f /shared/xray-api-port ]; then
|
||||
NEW_PORT=$(cat /shared/xray-api-port)
|
||||
if [ "$NEW_PORT" != "$API_PORT" ]; then
|
||||
echo "API port changed from $API_PORT to $NEW_PORT, restarting exporter"
|
||||
kill $EXPORTER_PID 2>/dev/null
|
||||
wait $EXPORTER_PID 2>/dev/null
|
||||
break
|
||||
fi
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "Exporter stopped, restarting..."
|
||||
wait $EXPORTER_PID 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
pasarguard-start.sh: |
|
||||
#!/bin/sh
|
||||
# Read API_KEY from shared volume created by init container
|
||||
if [ -f /shared/api-key ]; then
|
||||
export API_KEY=$(cat /shared/api-key)
|
||||
echo "Loaded API_KEY from shared volume"
|
||||
else
|
||||
echo "WARNING: API_KEY file not found, using default"
|
||||
fi
|
||||
|
||||
cd /app
|
||||
|
||||
# Start main process in background
|
||||
./main &
|
||||
MAIN_PID=$!
|
||||
|
||||
# Start continuous port monitoring in background
|
||||
{
|
||||
sleep 10 # Wait for xray to start initially
|
||||
LAST_PORT=""
|
||||
|
||||
while true; do
|
||||
API_PORT=$(netstat -tlpn | grep xray | grep 127.0.0.1 | awk '{print $4}' | cut -d: -f2 | head -1)
|
||||
if [ -n "$API_PORT" ] && [ "$API_PORT" != "$LAST_PORT" ]; then
|
||||
echo "Found xray API port: $API_PORT"
|
||||
echo -n "$API_PORT" > /shared/xray-api-port
|
||||
LAST_PORT="$API_PORT"
|
||||
fi
|
||||
sleep 5 # Check every 5 seconds
|
||||
done
|
||||
} &
|
||||
PORT_MONITOR_PID=$!
|
||||
|
||||
# Wait for main process to finish
|
||||
wait $MAIN_PID
|
||||
|
||||
# Clean up port monitor
|
||||
kill $PORT_MONITOR_PID 2>/dev/null
|
||||
211
k8s/apps/pasarguard/daemonset-ingress.yaml
Normal file
211
k8s/apps/pasarguard/daemonset-ingress.yaml
Normal file
@@ -0,0 +1,211 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: pasarguard-node-ingress
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: pasarguard-node-ingress-configmap
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "create", "update", "patch"]
|
||||
- apiGroups: ["cert-manager.io"]
|
||||
resources: ["certificates"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "endpoints"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["traefik.io", "traefik.containo.us"]
|
||||
resources: ["ingressroutetcps"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "patch", "update"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: pasarguard-node-ingress-configmap
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: pasarguard-node-ingress-configmap
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: pasarguard-node-ingress
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: pasarguard-node-ingress-reader
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: pasarguard-node-ingress-reader
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: pasarguard-node-ingress-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: pasarguard-node-ingress
|
||||
namespace: pasarguard
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: pasarguard-node-ingress
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: pasarguard-node-ingress
|
||||
revisionHistoryLimit: 3
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: pasarguard-node-ingress
|
||||
spec:
|
||||
serviceAccountName: pasarguard-node-ingress
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: xray-public-address
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: label-pod
|
||||
image: bitnami/kubectl:latest
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
# Add node label to pod
|
||||
NODE_SHORT=$(echo ${NODE_NAME} | cut -d. -f1)
|
||||
kubectl label pod ${POD_NAME} -n ${POD_NAMESPACE} node-name=${NODE_SHORT} --overwrite
|
||||
- name: init-uuid
|
||||
image: bitnami/kubectl:latest
|
||||
env:
|
||||
- name: GODEBUG
|
||||
value: "x509sha1=1"
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
command:
|
||||
- /bin/bash
|
||||
- /scripts/init-uuid-ingress.sh
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /shared
|
||||
- name: scripts
|
||||
mountPath: /scripts
|
||||
containers:
|
||||
- name: pasarguard-node
|
||||
image: 'pasarguard/node:v0.1.3'
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sh
|
||||
- /scripts/pasarguard-start.sh
|
||||
ports:
|
||||
- name: api
|
||||
containerPort: 62050
|
||||
protocol: TCP
|
||||
- name: proxy
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: NODE_HOST
|
||||
value: "0.0.0.0"
|
||||
- name: SERVICE_PORT
|
||||
value: "62050"
|
||||
- name: SERVICE_PROTOCOL
|
||||
value: "grpc"
|
||||
- name: DEBUG
|
||||
value: "true"
|
||||
- name: SSL_CERT_FILE
|
||||
value: "/shared/tls.crt"
|
||||
- name: SSL_KEY_FILE
|
||||
value: "/shared/tls.key"
|
||||
- name: XRAY_EXECUTABLE_PATH
|
||||
value: "/usr/local/bin/xray"
|
||||
- name: XRAY_ASSETS_PATH
|
||||
value: "/usr/local/share/xray"
|
||||
- name: API_KEY
|
||||
value: "change-this-to-a-secure-uuid"
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 62050
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 62050
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "750m"
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /shared
|
||||
readOnly: false
|
||||
- name: scripts
|
||||
mountPath: /scripts
|
||||
volumes:
|
||||
- name: shared-data
|
||||
emptyDir: {}
|
||||
- name: scripts
|
||||
configMap:
|
||||
name: pasarguard-scripts-ingress
|
||||
defaultMode: 0755
|
||||
221
k8s/apps/pasarguard/daemonset.yaml
Normal file
221
k8s/apps/pasarguard/daemonset.yaml
Normal file
@@ -0,0 +1,221 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: pasarguard-node
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: pasarguard-node-configmap
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "create", "update", "patch"]
|
||||
- apiGroups: ["cert-manager.io"]
|
||||
resources: ["certificates"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "endpoints"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: pasarguard-node-configmap
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: pasarguard-node-configmap
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: pasarguard-node
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: pasarguard-node-reader
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: pasarguard-node-reader
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: pasarguard-node-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: pasarguard-node
|
||||
namespace: pasarguard
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: pasarguard-node
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: pasarguard-node
|
||||
revisionHistoryLimit: 3
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
spec:
|
||||
serviceAccountName: pasarguard-node
|
||||
hostNetwork: true
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: xray-node-address
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: init-uuid
|
||||
image: bitnami/kubectl:latest
|
||||
env:
|
||||
- name: GODEBUG
|
||||
value: "x509sha1=1"
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
command:
|
||||
- /bin/bash
|
||||
- /scripts/init-uuid.sh
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /shared
|
||||
- name: scripts
|
||||
mountPath: /scripts
|
||||
containers:
|
||||
- name: pasarguard-node
|
||||
image: 'pasarguard/node:v0.1.3'
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sh
|
||||
- /scripts/pasarguard-start.sh
|
||||
ports:
|
||||
- name: api
|
||||
containerPort: 62050
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: NODE_HOST
|
||||
value: "0.0.0.0"
|
||||
- name: SERVICE_PORT
|
||||
value: "62050"
|
||||
- name: SERVICE_PROTOCOL
|
||||
value: "grpc"
|
||||
- name: DEBUG
|
||||
value: "true"
|
||||
- name: SSL_CERT_FILE
|
||||
value: "/shared/tls.crt"
|
||||
- name: SSL_KEY_FILE
|
||||
value: "/shared/tls.key"
|
||||
- name: XRAY_EXECUTABLE_PATH
|
||||
value: "/usr/local/bin/xray"
|
||||
- name: XRAY_ASSETS_PATH
|
||||
value: "/usr/local/share/xray"
|
||||
- name: API_KEY
|
||||
value: "change-this-to-a-secure-uuid"
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 62050
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 62050
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "750m"
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /shared
|
||||
readOnly: false
|
||||
- name: scripts
|
||||
mountPath: /scripts
|
||||
|
||||
- name: xray-exporter
|
||||
image: alpine:3.18
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- /scripts/exporter-start.sh
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 9550
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /scrape
|
||||
port: metrics
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 10
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /scrape
|
||||
port: metrics
|
||||
initialDelaySeconds: 45
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "150m"
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /shared
|
||||
readOnly: true
|
||||
- name: scripts
|
||||
mountPath: /scripts
|
||||
volumes:
|
||||
- name: shared-data
|
||||
emptyDir: {}
|
||||
- name: scripts
|
||||
configMap:
|
||||
name: pasarguard-scripts
|
||||
defaultMode: 0755
|
||||
84
k8s/apps/pasarguard/deployment.yaml
Normal file
84
k8s/apps/pasarguard/deployment.yaml
Normal file
@@ -0,0 +1,84 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: pasarguard
|
||||
labels:
|
||||
app: pasarguard
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: pasarguard
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: pasarguard
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
initContainers:
|
||||
- name: download-template
|
||||
image: busybox:latest
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
wget -O /templates/subscription/index.html https://github.com/PasarGuard/subscription-template/releases/latest/download/index.html
|
||||
volumeMounts:
|
||||
- name: subscription-template
|
||||
mountPath: /templates/subscription
|
||||
containers:
|
||||
- name: pasarguard-web
|
||||
image: 'pasarguard/panel:latest'
|
||||
imagePullPolicy: Always
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: pasarguard-secrets
|
||||
env:
|
||||
- name: UVICORN_HOST
|
||||
value: "0.0.0.0"
|
||||
- name: UVICORN_PORT
|
||||
value: "8000"
|
||||
- name: DOCS
|
||||
value: "true"
|
||||
- name: UVICORN_SSL_CERTFILE
|
||||
value: "/app/tls/tls.crt"
|
||||
- name: UVICORN_SSL_KEYFILE
|
||||
value: "/app/tls/tls.key"
|
||||
- name: CUSTOM_TEMPLATES_DIRECTORY
|
||||
value: "/code/app/templates/"
|
||||
- name: SUBSCRIPTION_PAGE_TEMPLATE
|
||||
value: "subscription/index.html"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8000
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: tls
|
||||
mountPath: /app/tls
|
||||
readOnly: true
|
||||
- name: subscription-template
|
||||
mountPath: /code/app/templates/subscription
|
||||
volumes:
|
||||
- name: tls
|
||||
secret:
|
||||
secretName: pasarguard-tls
|
||||
- name: subscription-template
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pasarguard
|
||||
spec:
|
||||
selector:
|
||||
app: pasarguard
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 8000
|
||||
44
k8s/apps/pasarguard/external-secrets.yaml
Normal file
44
k8s/apps/pasarguard/external-secrets.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: pasarguard-secrets
|
||||
spec:
|
||||
target:
|
||||
name: pasarguard-secrets
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
SUDO_PASSWORD: |-
|
||||
{{ .admin_password }}
|
||||
SUDO_USERNAME: |-
|
||||
{{ .admin_username }}
|
||||
SQLALCHEMY_DATABASE_URL : |-
|
||||
postgresql+asyncpg://pasarguard:{{ .pg_pass }}@psql.psql.svc:5432/pasarguard
|
||||
|
||||
data:
|
||||
- secretKey: pg_pass
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 2a9deb39-ef22-433e-a1be-df1555625e22
|
||||
property: fields[9].value
|
||||
- secretKey: admin_password
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 35ec5880-2576-401b-a89a-3c9d56b9c1de
|
||||
property: login.password
|
||||
- secretKey: admin_username
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 35ec5880-2576-401b-a89a-3c9d56b9c1de
|
||||
property: login.username
|
||||
14
k8s/apps/pasarguard/kustomization.yaml
Normal file
14
k8s/apps/pasarguard/kustomization.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ./app.yaml
|
||||
- ./external-secrets.yaml
|
||||
- ./deployment.yaml
|
||||
- ./daemonset.yaml
|
||||
- ./certificate.yaml
|
||||
- ./configmap-scripts.yaml
|
||||
- ./servicemonitor.yaml
|
||||
- ./configmap-scripts-ingress.yaml
|
||||
# - ./daemonset-ingress.yaml
|
||||
# - ./traefik-pasarguard-entrypoint.yaml
|
||||
21
k8s/apps/pasarguard/servicemonitor.yaml
Normal file
21
k8s/apps/pasarguard/servicemonitor.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: pasarguard-node-metrics
|
||||
labels:
|
||||
app: pasarguard-node
|
||||
release: prometheus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: pasarguard-node
|
||||
endpoints:
|
||||
- port: metrics
|
||||
path: /scrape
|
||||
interval: 30s
|
||||
scrapeTimeout: 10s
|
||||
honorLabels: true
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- pasarguard
|
||||
66
k8s/apps/pasarguard/traefik-pasarguard-entrypoint.yaml
Normal file
66
k8s/apps/pasarguard/traefik-pasarguard-entrypoint.yaml
Normal file
@@ -0,0 +1,66 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: kube-system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: traefik
|
||||
args:
|
||||
- --entryPoints.metrics.address=:9100/tcp
|
||||
- --entryPoints.traefik.address=:8080/tcp
|
||||
- --entryPoints.web.address=:8000/tcp
|
||||
- --entryPoints.websecure.address=:8443/tcp
|
||||
- --entryPoints.pasarguard-api.address=:62051/tcp
|
||||
- --api.dashboard=true
|
||||
- --ping=true
|
||||
- --metrics.prometheus=true
|
||||
- --metrics.prometheus.entrypoint=metrics
|
||||
- --providers.kubernetescrd
|
||||
- --providers.kubernetescrd.allowEmptyServices=true
|
||||
- --providers.kubernetesingress
|
||||
- --providers.kubernetesingress.allowEmptyServices=true
|
||||
- --providers.kubernetesingress.ingressendpoint.publishedservice=kube-system/traefik
|
||||
- --entryPoints.websecure.http.tls=true
|
||||
- --log.level=INFO
|
||||
- --entryPoints.web.transport.respondingTimeouts.readTimeout=0s
|
||||
- --entryPoints.websecure.transport.respondingTimeouts.readTimeout=0s
|
||||
ports:
|
||||
- containerPort: 9100
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
- containerPort: 8080
|
||||
name: traefik
|
||||
protocol: TCP
|
||||
- containerPort: 8000
|
||||
name: web
|
||||
protocol: TCP
|
||||
- containerPort: 8443
|
||||
name: websecure
|
||||
protocol: TCP
|
||||
- containerPort: 62051
|
||||
name: pasarguard-api
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: web
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: web
|
||||
- name: websecure
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: websecure
|
||||
- name: pasarguard-api
|
||||
port: 62051
|
||||
protocol: TCP
|
||||
targetPort: pasarguard-api
|
||||
21
k8s/apps/remnawave/app.yaml
Normal file
21
k8s/apps/remnawave/app.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: remnawave
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: apps
|
||||
destination:
|
||||
namespace: remnawave
|
||||
server: https://kubernetes.default.svc
|
||||
source:
|
||||
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
|
||||
targetRevision: HEAD
|
||||
path: k8s/apps/remnawave
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
|
||||
71
k8s/apps/remnawave/deployment.yaml
Normal file
71
k8s/apps/remnawave/deployment.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: remnawave
|
||||
labels:
|
||||
app: remnawave
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: remnawave
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: remnawave
|
||||
spec:
|
||||
containers:
|
||||
- name: remnawave
|
||||
image: 'remnawave/backend:2'
|
||||
imagePullPolicy: Always
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: remnawave-secrets
|
||||
env:
|
||||
- name: REDIS_URL
|
||||
value: "redis://remnawave-redis:6379"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
containerPort: 3001
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 3001
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 3001
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: remnawave
|
||||
spec:
|
||||
selector:
|
||||
app: remnawave
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 3000
|
||||
targetPort: 3000
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 3001
|
||||
targetPort: 3001
|
||||
70
k8s/apps/remnawave/external-secrets.yaml
Normal file
70
k8s/apps/remnawave/external-secrets.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: remnawave-secrets
|
||||
spec:
|
||||
target:
|
||||
name: remnawave-secrets
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
METRICS_USER: admin
|
||||
FRONT_END_DOMAIN: rw.hexor.cy
|
||||
SUB_PUBLIC_DOMAIN: sub.hexor.cy
|
||||
REDIS_HOST: remnawave-redis
|
||||
REDIS_PORT: "6379"
|
||||
|
||||
DATABASE_URL: |-
|
||||
postgresql://remnawave:{{ .pg_pass }}@psql.psql.svc:5432/remnawave
|
||||
JWT_AUTH_SECRET: |-
|
||||
{{ .jwt_auth_secret }}
|
||||
JWT_API_TOKENS_SECRET: |-
|
||||
{{ .jwt_api_tokens_secret }}
|
||||
METRICS_PASS: |-
|
||||
{{ .metrics_pass }}
|
||||
WEBHOOK_SECRET_HEADER: |-
|
||||
{{ .webhook_secret }}
|
||||
|
||||
data:
|
||||
- secretKey: pg_pass
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 2a9deb39-ef22-433e-a1be-df1555625e22
|
||||
property: fields[10].value
|
||||
- secretKey: jwt_auth_secret
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 0d090436-5e82-453a-914c-19cec2abded1
|
||||
property: fields[0].value
|
||||
- secretKey: jwt_api_tokens_secret
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 0d090436-5e82-453a-914c-19cec2abded1
|
||||
property: fields[1].value
|
||||
- secretKey: metrics_pass
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 0d090436-5e82-453a-914c-19cec2abded1
|
||||
property: fields[2].value
|
||||
- secretKey: webhook_secret
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 0d090436-5e82-453a-914c-19cec2abded1
|
||||
property: fields[3].value
|
||||
12
k8s/apps/remnawave/kustomization.yaml
Normal file
12
k8s/apps/remnawave/kustomization.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ./external-secrets.yaml
|
||||
- ./deployment.yaml
|
||||
- ./redis-deployment.yaml
|
||||
- ./subscription-page-configmap.yaml
|
||||
- ./subscription-page-deployment.yaml
|
||||
- ./servicemonitor.yaml
|
||||
- ./user-ui-ingress.yaml
|
||||
- ./panel-ingress.yaml
|
||||
37
k8s/apps/remnawave/panel-ingress.yaml
Normal file
37
k8s/apps/remnawave/panel-ingress.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: panel-ui
|
||||
annotations:
|
||||
ingressClassName: traefik
|
||||
cert-manager.io/cluster-issuer: letsencrypt
|
||||
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
|
||||
acme.cert-manager.io/http01-edit-in-place: "true"
|
||||
spec:
|
||||
rules:
|
||||
- host: rw.hexor.cy
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: remnawave
|
||||
port:
|
||||
number: 3000
|
||||
- host: rw.hexor.ru
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: remnawave
|
||||
port:
|
||||
number: 3000
|
||||
tls:
|
||||
- secretName: remnawave-panel-tls
|
||||
hosts:
|
||||
- rw.hexor.cy
|
||||
- rw.hexor.ru
|
||||
71
k8s/apps/remnawave/redis-deployment.yaml
Normal file
71
k8s/apps/remnawave/redis-deployment.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: remnawave-redis
|
||||
labels:
|
||||
app: remnawave-redis
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: remnawave-redis
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: remnawave-redis
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: 'valkey/valkey:8.1-alpine'
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- name: redis
|
||||
containerPort: 6379
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- valkey-cli
|
||||
- ping
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- valkey-cli
|
||||
- ping
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
volumeMounts:
|
||||
- name: redis-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "200m"
|
||||
volumes:
|
||||
- name: redis-data
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: remnawave-redis
|
||||
spec:
|
||||
selector:
|
||||
app: remnawave-redis
|
||||
ports:
|
||||
- name: redis
|
||||
protocol: TCP
|
||||
port: 6379
|
||||
targetPort: 6379
|
||||
21
k8s/apps/remnawave/servicemonitor.yaml
Normal file
21
k8s/apps/remnawave/servicemonitor.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: remnawave-metrics
|
||||
labels:
|
||||
app: remnawave
|
||||
release: prometheus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: remnawave
|
||||
endpoints:
|
||||
- port: metrics
|
||||
path: /metrics
|
||||
interval: 30s
|
||||
scrapeTimeout: 10s
|
||||
honorLabels: true
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- remnawave
|
||||
27
k8s/apps/remnawave/subscription-page-configmap.yaml
Normal file
27
k8s/apps/remnawave/subscription-page-configmap.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: remnawave-subscription-page-config
|
||||
labels:
|
||||
app: remnawave-subscription-page
|
||||
data:
|
||||
APP_PORT: "3010"
|
||||
REMNAWAVE_PANEL_URL: "https://rw.hexor.cy"
|
||||
META_TITLE: "RemnaWave Subscription"
|
||||
META_DESCRIPTION: "Your VPN subscription portal"
|
||||
META_KEYWORDS: "vpn,subscription,remnawave"
|
||||
META_AUTHOR: "RemnaWave"
|
||||
ENABLE_ANALYTICS: "false"
|
||||
ANALYTICS_MEASUREMENT_ID: ""
|
||||
CUSTOM_SUB_PREFIX: ""
|
||||
THEME: "dark"
|
||||
CUSTOM_LOGO_URL: ""
|
||||
SHOW_SUBSCRIPTION_INFO: "true"
|
||||
SHOW_CONNECTION_INFO: "true"
|
||||
SHOW_QR_CODE: "true"
|
||||
QR_CODE_SIZE: "256"
|
||||
REFRESH_INTERVAL: "30000"
|
||||
SUBSCRIPTION_TEXT_COLOR: "#ffffff"
|
||||
BACKGROUND_COLOR: "#1a1a1a"
|
||||
ACCENT_COLOR: "#007bff"
|
||||
52
k8s/apps/remnawave/subscription-page-deployment.yaml
Normal file
52
k8s/apps/remnawave/subscription-page-deployment.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: remnawave-subscription-page
|
||||
labels:
|
||||
app: remnawave-subscription-page
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: remnawave-subscription-page
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: remnawave-subscription-page
|
||||
spec:
|
||||
containers:
|
||||
- name: subscription-page
|
||||
image: 'remnawave/subscription-page:latest'
|
||||
imagePullPolicy: Always
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: remnawave-subscription-page-config
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 3010
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "200m"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: remnawave-subscription-page
|
||||
labels:
|
||||
app: remnawave-subscription-page
|
||||
spec:
|
||||
selector:
|
||||
app: remnawave-subscription-page
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 3010
|
||||
targetPort: 3010
|
||||
37
k8s/apps/remnawave/user-ui-ingress.yaml
Normal file
37
k8s/apps/remnawave/user-ui-ingress.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: user-ui
|
||||
annotations:
|
||||
ingressClassName: traefik
|
||||
cert-manager.io/cluster-issuer: letsencrypt
|
||||
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
|
||||
acme.cert-manager.io/http01-edit-in-place: "true"
|
||||
spec:
|
||||
rules:
|
||||
- host: sub.hexor.cy
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: remnawave-subscription-page
|
||||
port:
|
||||
number: 3010
|
||||
- host: sub.hexor.ru
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: remnawave-subscription-page
|
||||
port:
|
||||
number: 3010
|
||||
tls:
|
||||
- secretName: remnawave-user-ui-tls
|
||||
hosts:
|
||||
- sub.hexor.cy
|
||||
- sub.hexor.ru
|
||||
134
k8s/apps/rustdesk/deployment.yaml
Normal file
134
k8s/apps/rustdesk/deployment.yaml
Normal file
@@ -0,0 +1,134 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rustdesk-hbbs
|
||||
labels:
|
||||
app: rustdesk-hbbs
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rustdesk-hbbs
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rustdesk-hbbs
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
containers:
|
||||
- name: hbbs
|
||||
image: rustdesk/rustdesk-server:latest
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "750m"
|
||||
command: ["hbbs"]
|
||||
args:
|
||||
- "--relay-servers"
|
||||
- "rd.hexor.cy:21117"
|
||||
- "--port"
|
||||
- "21116"
|
||||
ports:
|
||||
- name: registry
|
||||
containerPort: 21116
|
||||
protocol: TCP
|
||||
- name: nat
|
||||
containerPort: 21115
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: keys
|
||||
mountPath: /data
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: /data-persistent
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "info"
|
||||
- name: DB_URL
|
||||
value: "/data-persistent/db_v2.sqlite3"
|
||||
workingDir: /data
|
||||
volumes:
|
||||
- name: keys
|
||||
secret:
|
||||
secretName: rustdesk-keys
|
||||
items:
|
||||
- key: id_ed25519
|
||||
path: id_ed25519
|
||||
mode: 0600
|
||||
- key: id_ed25519.pub
|
||||
path: id_ed25519.pub
|
||||
mode: 0644
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /k8s/rustdesk/hbbs
|
||||
type: DirectoryOrCreate
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rustdesk-hbbr
|
||||
labels:
|
||||
app: rustdesk-hbbr
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rustdesk-hbbr
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rustdesk-hbbr
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
containers:
|
||||
- name: hbbr
|
||||
image: rustdesk/rustdesk-server:latest
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "750m"
|
||||
command: ["hbbr"]
|
||||
args:
|
||||
- "--port"
|
||||
- "21117"
|
||||
ports:
|
||||
- name: relay
|
||||
containerPort: 21117
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: keys
|
||||
mountPath: /data
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: /data-persistent
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "info"
|
||||
workingDir: /data
|
||||
volumes:
|
||||
- name: keys
|
||||
secret:
|
||||
secretName: rustdesk-keys
|
||||
items:
|
||||
- key: id_ed25519
|
||||
path: id_ed25519
|
||||
mode: 0600
|
||||
- key: id_ed25519.pub
|
||||
path: id_ed25519.pub
|
||||
mode: 0644
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /k8s/rustdesk/hbbr
|
||||
type: DirectoryOrCreate
|
||||
@@ -1,66 +1,34 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: postgres-creds
|
||||
name: rustdesk-keys
|
||||
spec:
|
||||
target:
|
||||
name: postgres-creds
|
||||
name: rustdesk-keys
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
psql_user: paperless
|
||||
psql_pass: |-
|
||||
{{ .psql_pass }}
|
||||
oauth_config: |-
|
||||
{
|
||||
"openid_connect": {
|
||||
"APPS": [
|
||||
{
|
||||
"provider_id": "authentik",
|
||||
"name": "Authentik",
|
||||
"client_id": "{{ .oauth_id }}",
|
||||
"secret": "{{ .oauth_secret }}",
|
||||
"settings": {
|
||||
"server_url": "{{ .server_url }}"
|
||||
}
|
||||
}
|
||||
],
|
||||
"OAUTH_PKCE_ENABLED": "True"
|
||||
}
|
||||
}
|
||||
id_ed25519: |-
|
||||
{{ .private_key }}
|
||||
id_ed25519.pub: |-
|
||||
{{ .public_key }}
|
||||
data:
|
||||
- secretKey: psql_pass
|
||||
- secretKey: private_key
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 2a9deb39-ef22-433e-a1be-df1555625e22
|
||||
property: fields[5].value
|
||||
- secretKey: oauth_id
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 07d4efd9-597c-4a4c-a78d-13bfc43e6055
|
||||
key: f5591dfd-a0ab-4101-a2d7-e06380d3dcc9
|
||||
property: fields[0].value
|
||||
- secretKey: oauth_secret
|
||||
- secretKey: public_key
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 07d4efd9-597c-4a4c-a78d-13bfc43e6055
|
||||
key: f5591dfd-a0ab-4101-a2d7-e06380d3dcc9
|
||||
property: fields[1].value
|
||||
- secretKey: server_url
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 07d4efd9-597c-4a4c-a78d-13bfc43e6055
|
||||
property: fields[2].value
|
||||
|
||||
|
||||
66
k8s/apps/rustdesk/external-secrets.yaml.backup
Normal file
66
k8s/apps/rustdesk/external-secrets.yaml.backup
Normal file
@@ -0,0 +1,66 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: postgres-creds
|
||||
spec:
|
||||
target:
|
||||
name: postgres-creds
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
psql_user: paperless
|
||||
psql_pass: |-
|
||||
{{ .psql_pass }}
|
||||
oauth_config: |-
|
||||
{
|
||||
"openid_connect": {
|
||||
"APPS": [
|
||||
{
|
||||
"provider_id": "authentik",
|
||||
"name": "Authentik",
|
||||
"client_id": "{{ .oauth_id }}",
|
||||
"secret": "{{ .oauth_secret }}",
|
||||
"settings": {
|
||||
"server_url": "{{ .server_url }}"
|
||||
}
|
||||
}
|
||||
],
|
||||
"OAUTH_PKCE_ENABLED": "True"
|
||||
}
|
||||
}
|
||||
data:
|
||||
- secretKey: psql_pass
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 2a9deb39-ef22-433e-a1be-df1555625e22
|
||||
property: fields[5].value
|
||||
- secretKey: oauth_id
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 07d4efd9-597c-4a4c-a78d-13bfc43e6055
|
||||
property: fields[0].value
|
||||
- secretKey: oauth_secret
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 07d4efd9-597c-4a4c-a78d-13bfc43e6055
|
||||
property: fields[1].value
|
||||
- secretKey: server_url
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 07d4efd9-597c-4a4c-a78d-13bfc43e6055
|
||||
property: fields[2].value
|
||||
|
||||
@@ -3,14 +3,8 @@ kind: Kustomization
|
||||
|
||||
resources:
|
||||
- app.yaml
|
||||
#- external-secrets.yaml
|
||||
|
||||
helmCharts:
|
||||
- name: rustdesk-server-oss
|
||||
repo: https://schich.tel/helm-charts
|
||||
version: 0.2.2
|
||||
releaseName: rustdesk
|
||||
namespace: rustdesk
|
||||
valuesFile: values.yaml
|
||||
includeCRDs: true
|
||||
- deployment.yaml
|
||||
- service.yaml
|
||||
- external-secrets.yaml
|
||||
- network-policy.yaml
|
||||
|
||||
|
||||
73
k8s/apps/rustdesk/network-policy.yaml
Normal file
73
k8s/apps/rustdesk/network-policy.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: rustdesk-network-policy
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: rustdesk-hbbs
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
# Allow all incoming connections to RustDesk ports
|
||||
- from: []
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 21115
|
||||
- protocol: TCP
|
||||
port: 21116
|
||||
- protocol: UDP
|
||||
port: 21116
|
||||
egress:
|
||||
# Allow DNS
|
||||
- to: []
|
||||
ports:
|
||||
- protocol: UDP
|
||||
port: 53
|
||||
- protocol: TCP
|
||||
port: 53
|
||||
# Allow communication between HBBS and HBBR
|
||||
- to:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app: rustdesk-hbbr
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 21117
|
||||
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: rustdesk-hbbr-network-policy
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: rustdesk-hbbr
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
# Allow all incoming connections to relay port
|
||||
- from: []
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 21117
|
||||
# Allow connections from HBBS
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app: rustdesk-hbbs
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 21117
|
||||
egress:
|
||||
# Allow DNS
|
||||
- to: []
|
||||
ports:
|
||||
- protocol: UDP
|
||||
port: 53
|
||||
- protocol: TCP
|
||||
port: 53
|
||||
57
k8s/apps/rustdesk/service.yaml
Normal file
57
k8s/apps/rustdesk/service.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rustdesk-hbbs
|
||||
labels:
|
||||
app: rustdesk-hbbs
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
externalTrafficPolicy: Local
|
||||
selector:
|
||||
app: rustdesk-hbbs
|
||||
ports:
|
||||
- name: registry-tcp
|
||||
port: 21116
|
||||
targetPort: 21116
|
||||
protocol: TCP
|
||||
- name: nat
|
||||
port: 21115
|
||||
targetPort: 21115
|
||||
protocol: TCP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rustdesk-hbbs-udp
|
||||
labels:
|
||||
app: rustdesk-hbbs
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
externalTrafficPolicy: Local
|
||||
selector:
|
||||
app: rustdesk-hbbs
|
||||
ports:
|
||||
- name: registry-udp
|
||||
port: 21116
|
||||
targetPort: 21116
|
||||
protocol: UDP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rustdesk-hbbr
|
||||
labels:
|
||||
app: rustdesk-hbbr
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
externalTrafficPolicy: Local
|
||||
selector:
|
||||
app: rustdesk-hbbr
|
||||
ports:
|
||||
- name: relay
|
||||
port: 21117
|
||||
targetPort: 21117
|
||||
protocol: TCP
|
||||
@@ -1,87 +0,0 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: docker.io/rustdesk/rustdesk-server
|
||||
pullPolicy: IfNotPresent
|
||||
tag: 1
|
||||
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
className: "traefik"
|
||||
annotations:
|
||||
ingressClassName: traefik
|
||||
cert-manager.io/cluster-issuer: letsencrypt
|
||||
traefik.ingress.kubernetes.io/router.middlewares: kube-system-https-redirect@kubernetescrd
|
||||
acme.cert-manager.io/http01-edit-in-place: "true"
|
||||
hosts:
|
||||
- rd.hexor.cy
|
||||
tls:
|
||||
- secretName: rustdesk-tls
|
||||
hosts:
|
||||
- rd.hexor.cy
|
||||
|
||||
service:
|
||||
type: LoadBalancer
|
||||
externalTrafficPolicy: Cluster
|
||||
loadBalancerIP: null
|
||||
enableWebClientSupport: false
|
||||
hbbr:
|
||||
replayPort:
|
||||
port: 21117
|
||||
targetPort: 21117
|
||||
clientPort:
|
||||
port: 21119
|
||||
targetPort: 21119
|
||||
hbbs:
|
||||
natPort:
|
||||
port: 21115
|
||||
targetPort: 21115
|
||||
registryPort:
|
||||
port: 21116
|
||||
targetPort: 21116
|
||||
heartbeatPort:
|
||||
port: 21116
|
||||
targetPort: 21116
|
||||
webPort:
|
||||
port: 21118
|
||||
targetPort: 21118
|
||||
|
||||
resources:
|
||||
hbbrResource: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
hbbsResource: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volume: {}
|
||||
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "750m"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
env:
|
||||
TZ: Asia/Nicosia
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "750m"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
env:
|
||||
TZ: Asia/Nicosia
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "200m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1500m"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
|
||||
|
||||
@@ -28,12 +28,12 @@ ingress:
|
||||
tlsSecret: pdf-hexor-cy-tls
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 256Mi
|
||||
memory: "512Mi"
|
||||
cpu: "200m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1500m"
|
||||
|
||||
probes:
|
||||
liveness:
|
||||
|
||||
@@ -4,6 +4,8 @@ kind: Kustomization
|
||||
|
||||
resources:
|
||||
- app.yaml
|
||||
- nginx-router.yaml
|
||||
- traefik-simple.yaml
|
||||
|
||||
helmCharts:
|
||||
- name: syncthing
|
||||
@@ -21,3 +23,11 @@ helmCharts:
|
||||
namespace: syncthing
|
||||
valuesFile: syncthing-khv.yaml
|
||||
includeCRDs: true
|
||||
|
||||
- name: syncthing
|
||||
repo: https://k8s-home-lab.github.io/helm-charts
|
||||
version: 4.0.0
|
||||
releaseName: syncthing-nas
|
||||
namespace: syncthing
|
||||
valuesFile: syncthing-nas.yaml
|
||||
includeCRDs: true
|
||||
276
k8s/apps/syncthing/nginx-router.yaml
Normal file
276
k8s/apps/syncthing/nginx-router.yaml
Normal file
@@ -0,0 +1,276 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-config
|
||||
namespace: syncthing
|
||||
data:
|
||||
default.conf: |
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
# Landing page
|
||||
location = / {
|
||||
root /usr/share/nginx/html;
|
||||
try_files /index.html =404;
|
||||
}
|
||||
|
||||
# NAS instance
|
||||
location /nas {
|
||||
rewrite ^/nas$ /nas/ permanent;
|
||||
}
|
||||
|
||||
# NAS API endpoints
|
||||
location ~ ^/nas/(rest|meta)/ {
|
||||
rewrite ^/nas/(.*) /$1 break;
|
||||
proxy_pass http://syncthing-nas:8384;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Handle websockets
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
|
||||
location /nas/ {
|
||||
proxy_pass http://syncthing-nas:8384/;
|
||||
|
||||
# Important: tell syncthing that we're using /nas as base path
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Handle websockets
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_read_timeout 86400;
|
||||
|
||||
# Sub filter to fix asset paths
|
||||
sub_filter 'href="/' 'href="/nas/';
|
||||
sub_filter 'src="/' 'src="/nas/';
|
||||
sub_filter 'url(/' 'url(/nas/';
|
||||
sub_filter '"/meta' '"/nas/meta';
|
||||
sub_filter '"/rest' '"/nas/rest';
|
||||
sub_filter '"/vendor' '"/nas/vendor';
|
||||
sub_filter '"/theme-assets' '"/nas/theme-assets';
|
||||
sub_filter '"/syncthing' '"/nas/syncthing';
|
||||
sub_filter_once off;
|
||||
sub_filter_types text/html text/css application/javascript;
|
||||
}
|
||||
|
||||
|
||||
# Master instance
|
||||
location /master {
|
||||
rewrite ^/master$ /master/ permanent;
|
||||
}
|
||||
|
||||
# Master API endpoints
|
||||
location ~ ^/master/(rest|meta)/ {
|
||||
rewrite ^/master/(.*) /$1 break;
|
||||
proxy_pass http://syncthing-master:8384;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Handle websockets
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
|
||||
location /master/ {
|
||||
proxy_pass http://syncthing-master:8384/;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
sub_filter 'href="/' 'href="/master/';
|
||||
sub_filter 'src="/' 'src="/master/';
|
||||
sub_filter 'url(/' 'url(/master/';
|
||||
sub_filter '"/meta' '"/master/meta';
|
||||
sub_filter '"/rest' '"/master/rest';
|
||||
sub_filter '"/vendor' '"/master/vendor';
|
||||
sub_filter '"/theme-assets' '"/master/theme-assets';
|
||||
sub_filter '"/syncthing' '"/master/syncthing';
|
||||
sub_filter_once off;
|
||||
sub_filter_types text/html text/css application/javascript;
|
||||
}
|
||||
|
||||
|
||||
# Iris instance
|
||||
location /iris {
|
||||
rewrite ^/iris$ /iris/ permanent;
|
||||
}
|
||||
|
||||
# Iris API endpoints
|
||||
location ~ ^/iris/(rest|meta)/ {
|
||||
rewrite ^/iris/(.*) /$1 break;
|
||||
proxy_pass http://syncthing-khv:8384;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Handle websockets
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
|
||||
location /iris/ {
|
||||
proxy_pass http://syncthing-khv:8384/;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
sub_filter 'href="/' 'href="/iris/';
|
||||
sub_filter 'src="/' 'src="/iris/';
|
||||
sub_filter 'url(/' 'url(/iris/';
|
||||
sub_filter '"/meta' '"/iris/meta';
|
||||
sub_filter '"/rest' '"/iris/rest';
|
||||
sub_filter '"/vendor' '"/iris/vendor';
|
||||
sub_filter '"/theme-assets' '"/iris/theme-assets';
|
||||
sub_filter '"/syncthing' '"/iris/syncthing';
|
||||
sub_filter_once off;
|
||||
sub_filter_types text/html text/css application/javascript;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
index.html: |
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Syncthing Instances</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: Arial, sans-serif;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
height: 100vh;
|
||||
margin: 0;
|
||||
background-color: #f0f0f0;
|
||||
}
|
||||
.container {
|
||||
text-align: center;
|
||||
background: white;
|
||||
padding: 40px;
|
||||
border-radius: 10px;
|
||||
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
|
||||
}
|
||||
h1 {
|
||||
color: #333;
|
||||
margin-bottom: 30px;
|
||||
}
|
||||
.links {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 15px;
|
||||
}
|
||||
a {
|
||||
display: inline-block;
|
||||
padding: 15px 30px;
|
||||
background-color: #0078e7;
|
||||
color: white;
|
||||
text-decoration: none;
|
||||
border-radius: 5px;
|
||||
transition: background-color 0.3s;
|
||||
}
|
||||
a:hover {
|
||||
background-color: #0056b3;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>Syncthing Instances</h1>
|
||||
<div class="links">
|
||||
<a href="/nas/">NAS Instance</a>
|
||||
<a href="/master/">Master Instance</a>
|
||||
<a href="/iris/">Iris Instance</a>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: syncthing-router
|
||||
namespace: syncthing
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: syncthing-router
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: syncthing-router
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/nginx/conf.d
|
||||
- name: html
|
||||
mountPath: /usr/share/nginx/html
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: nginx-config
|
||||
items:
|
||||
- key: default.conf
|
||||
path: default.conf
|
||||
- name: html
|
||||
configMap:
|
||||
name: nginx-config
|
||||
items:
|
||||
- key: index.html
|
||||
path: index.html
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: syncthing-router
|
||||
namespace: syncthing
|
||||
spec:
|
||||
selector:
|
||||
app: syncthing-router
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
||||
@@ -4,13 +4,8 @@ persistence:
|
||||
config:
|
||||
enabled: true
|
||||
type: hostPath
|
||||
hostPath: "/k8s/syncthing"
|
||||
hostPath: "/k8s/Syncthing"
|
||||
mountPath: "/var/syncthing"
|
||||
storage:
|
||||
enabled: true
|
||||
type: hostPath
|
||||
hostPath: "/home/share"
|
||||
mountPath: "/storage"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: iris.khv
|
||||
service:
|
||||
@@ -20,23 +15,24 @@ service:
|
||||
port: 8384
|
||||
listen:
|
||||
enabled: true
|
||||
type: LoadBalancer
|
||||
type: NodePort
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
listen:
|
||||
enabled: true
|
||||
port: 30023
|
||||
port: 22000
|
||||
protocol: TCP
|
||||
targetPort: 22000
|
||||
selector:
|
||||
app.kubernetes.io/name: syncthing
|
||||
discovery:
|
||||
enabled: true
|
||||
type: NodePort
|
||||
externalTrafficPolicy: Cluster
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
discovery:
|
||||
enabled: true
|
||||
port: 21027
|
||||
protocol: UDP
|
||||
targetPort: 21027
|
||||
|
||||
port: 21027
|
||||
protocol: UDP
|
||||
targetPort: 21027
|
||||
@@ -5,13 +5,13 @@ persistence:
|
||||
config:
|
||||
enabled: true
|
||||
type: hostPath
|
||||
hostPath: "/k8s/syncthing"
|
||||
hostPath: "/k8s/syncthing-config"
|
||||
mountPath: "/var/syncthing"
|
||||
storage:
|
||||
enabled: true
|
||||
type: hostPath
|
||||
hostPath: "/k8s/"
|
||||
mountPath: "/storage"
|
||||
hostPath: "/k8s/Syncthing-repos"
|
||||
mountPath: "/Syncthing-repos"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
service:
|
||||
@@ -21,23 +21,24 @@ service:
|
||||
port: 8384
|
||||
listen:
|
||||
enabled: true
|
||||
type: LoadBalancer
|
||||
type: NodePort
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
listen:
|
||||
enabled: true
|
||||
port: 30023
|
||||
port: 22000
|
||||
protocol: TCP
|
||||
targetPort: 22000
|
||||
selector:
|
||||
app.kubernetes.io/name: syncthing
|
||||
discovery:
|
||||
enabled: true
|
||||
type: NodePort
|
||||
externalTrafficPolicy: Cluster
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
discovery:
|
||||
enabled: true
|
||||
port: 21027
|
||||
protocol: UDP
|
||||
targetPort: 21027
|
||||
|
||||
port: 21027
|
||||
protocol: UDP
|
||||
targetPort: 21027
|
||||
44
k8s/apps/syncthing/syncthing-nas.yaml
Normal file
44
k8s/apps/syncthing/syncthing-nas.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
image:
|
||||
tag: latest
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
type: hostPath
|
||||
hostPath: "/mnt/storage/Storage/syncthing-config"
|
||||
mountPath: "/var/syncthing"
|
||||
storage:
|
||||
enabled: true
|
||||
type: hostPath
|
||||
hostPath: "/mnt/storage/Storage/Syncthing-repos"
|
||||
mountPath: "/Syncthing-repos"
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: nas.homenet
|
||||
service:
|
||||
main:
|
||||
ports:
|
||||
http:
|
||||
port: 8384
|
||||
listen:
|
||||
enabled: true
|
||||
type: NodePort
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
listen:
|
||||
enabled: true
|
||||
port: 22000
|
||||
protocol: TCP
|
||||
targetPort: 22000
|
||||
discovery:
|
||||
enabled: true
|
||||
type: NodePort
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
discovery:
|
||||
enabled: true
|
||||
port: 21027
|
||||
protocol: UDP
|
||||
targetPort: 21027
|
||||
port: 21027
|
||||
protocol: UDP
|
||||
targetPort: 21027
|
||||
|
||||
36
k8s/apps/syncthing/traefik-simple.yaml
Normal file
36
k8s/apps/syncthing/traefik-simple.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: syncthing-ingressroute
|
||||
namespace: syncthing
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`ss.hexor.cy`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: syncthing-router
|
||||
port: 80
|
||||
middlewares:
|
||||
- name: authentik-forward-auth
|
||||
namespace: syncthing
|
||||
tls:
|
||||
secretName: syncthing-tls
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: authentik-forward-auth
|
||||
namespace: syncthing
|
||||
spec:
|
||||
forwardAuth:
|
||||
address: http://authentik-server.authentik.svc.cluster.local/outpost.goauthentik.io/auth/traefik
|
||||
trustForwardHeader: true
|
||||
authResponseHeaders:
|
||||
- X-authentik-username
|
||||
- X-authentik-groups
|
||||
- X-authentik-email
|
||||
- X-authentik-name
|
||||
- X-authentik-uid
|
||||
21
k8s/apps/tg-bots/app.yaml
Normal file
21
k8s/apps/tg-bots/app.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: tg-bots
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: apps
|
||||
destination:
|
||||
namespace: tg-bots
|
||||
server: https://kubernetes.default.svc
|
||||
source:
|
||||
repoURL: ssh://git@gt.hexor.cy:30022/ab/homelab.git
|
||||
targetRevision: HEAD
|
||||
path: k8s/apps/tg-bots
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
|
||||
42
k8s/apps/tg-bots/desubot.yaml
Normal file
42
k8s/apps/tg-bots/desubot.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: desubot
|
||||
labels:
|
||||
app: desubot
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: desubot
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: desubot
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: home.homenet
|
||||
containers:
|
||||
- name: desubot
|
||||
image: 'ultradesu/desubot:latest'
|
||||
imagePullPolicy: Always
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: desubot
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "info"
|
||||
volumeMounts:
|
||||
- mountPath: /storage
|
||||
name: storage
|
||||
volumes:
|
||||
- name: storage
|
||||
nfs:
|
||||
server: nas.homenet
|
||||
path: /mnt/storage/Storage/k8s/desubot/
|
||||
readOnly: false
|
||||
49
k8s/apps/tg-bots/external-secrets.yaml
Normal file
49
k8s/apps/tg-bots/external-secrets.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: get-id-bot
|
||||
spec:
|
||||
target:
|
||||
name: get-id-bot
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
TELOXIDE_TOKEN: |-
|
||||
{{ .token }}
|
||||
|
||||
data:
|
||||
- secretKey: token
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 97bd0af9-54ab-429a-b060-09626525f4cd
|
||||
property: fields[0].value
|
||||
|
||||
---
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: desubot
|
||||
spec:
|
||||
target:
|
||||
name: desubot
|
||||
deletionPolicy: Delete
|
||||
template:
|
||||
type: Opaque
|
||||
data:
|
||||
TELEGRAM_BOT_TOKEN: |-
|
||||
{{ .token }}
|
||||
|
||||
data:
|
||||
- secretKey: token
|
||||
sourceRef:
|
||||
storeRef:
|
||||
name: vaultwarden-login
|
||||
kind: ClusterSecretStore
|
||||
remoteRef:
|
||||
key: 97bd0af9-54ab-429a-b060-09626525f4cd
|
||||
property: fields[1].value
|
||||
36
k8s/apps/tg-bots/get-id-bot.yaml
Normal file
36
k8s/apps/tg-bots/get-id-bot.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: get-id-bot
|
||||
labels:
|
||||
app: get-id-bot
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: get-id-bot
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: get-id-bot
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
containers:
|
||||
- name: get-id-bot
|
||||
image: 'ghcr.io/house-of-vanity/get_id_bot:main'
|
||||
imagePullPolicy: Always
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: get-id-bot
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "info,teloxide::error_handlers=off"
|
||||
|
||||
|
||||
|
||||
10
k8s/apps/tg-bots/kustomization.yaml
Normal file
10
k8s/apps/tg-bots/kustomization.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- app.yaml
|
||||
- get-id-bot.yaml
|
||||
- external-secrets.yaml
|
||||
- desubot.yaml
|
||||
- restart-job.yaml
|
||||
56
k8s/apps/tg-bots/restart-job.yaml
Normal file
56
k8s/apps/tg-bots/restart-job.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: tg-bots-restart-sa
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: tg-bots-restart-role
|
||||
rules:
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments"]
|
||||
verbs: ["get", "patch"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: tg-bots-restart-rb
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: tg-bots-restart-sa
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: tg-bots-restart-role
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: tg-bots-daily-restart
|
||||
spec:
|
||||
schedule: "0 4 * * *" # every day at 04:00
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: tg-bots-restart-sa
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: bitnami/kubectl:latest
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
kubectl -n "$POD_NAMESPACE" rollout restart deployment/desubot
|
||||
kubectl -n "$POD_NAMESPACE" rollout restart deployment/get-id-bot
|
||||
@@ -31,6 +31,13 @@ spec:
|
||||
- name: vaultwarden
|
||||
image: 'vaultwarden/server:latest'
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "750m"
|
||||
env:
|
||||
- name: DOMAIN
|
||||
value: https://vw.hexor.cy
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: admin-token
|
||||
|
||||
25
k8s/apps/vpn/config.yaml
Normal file
25
k8s/apps/vpn/config.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: outfleet-rs-config
|
||||
data:
|
||||
config.toml: |-
|
||||
[database]
|
||||
url = "postgres://outfleet_rs:FMj#bA0XW14Pd2@psql.psql.svc:5432/outfleet_rs"
|
||||
|
||||
[web]
|
||||
host = "0.0.0.0"
|
||||
port = 8080
|
||||
base_url = "https://vpn.hexor.cy"
|
||||
|
||||
[telegram]
|
||||
enabled = false
|
||||
admin_chat_ids = []
|
||||
allowed_users = []
|
||||
|
||||
[xray]
|
||||
config_path = "./templates"
|
||||
|
||||
[log]
|
||||
level = "debug"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: outfleet-secrets
|
||||
@@ -51,7 +51,7 @@ spec:
|
||||
property: fields[1].value
|
||||
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: outline-config
|
||||
|
||||
@@ -6,4 +6,7 @@ resources:
|
||||
- ./external-secrets.yaml
|
||||
- ./outfleet.yaml
|
||||
- ./shadowsocks.yaml
|
||||
- ./outfleet-rs.yaml
|
||||
- ./config.yaml
|
||||
- ./xray.yaml
|
||||
|
||||
|
||||
66
k8s/apps/vpn/outfleet-rs.yaml
Normal file
66
k8s/apps/vpn/outfleet-rs.yaml
Normal file
@@ -0,0 +1,66 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: outfleet-rs
|
||||
labels:
|
||||
app: outfleet-rs
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: outfleet-rs
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: outfleet-rs
|
||||
spec:
|
||||
hostname: outfleet-rs
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: master.tail2fe2d.ts.net
|
||||
containers:
|
||||
- name: outfleet-rs
|
||||
image: 'ultradesu/outfleet:rs-0.1.3'
|
||||
imagePullPolicy: Always
|
||||
command: ["/bin/sh"]
|
||||
args:
|
||||
- "-c"
|
||||
- |
|
||||
set -x
|
||||
/app/xray-admin --host 0.0.0.0 --port 8080
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "info"
|
||||
volumeMounts:
|
||||
- name: outfleet-config
|
||||
mountPath: /app/config.toml # <-- target path inside container
|
||||
subPath: config.toml # <-- use the specific key as a file
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: outfleet-config
|
||||
configMap:
|
||||
name: outfleet-rs-config
|
||||
items:
|
||||
- key: config.toml
|
||||
path: config.toml
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: outfleet-rs
|
||||
spec:
|
||||
selector:
|
||||
app: outfleet-rs
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
@@ -30,8 +30,11 @@ spec:
|
||||
args:
|
||||
- "-c"
|
||||
- |
|
||||
python ./manage.py makemigrations vpn
|
||||
set -x
|
||||
#python ./manage.py makemigrations
|
||||
#python ./manage.py makemigrations vpn
|
||||
python ./manage.py migrate
|
||||
python ./manage.py migrate vpn
|
||||
python ./manage.py create_admin
|
||||
python ./manage.py runserver 0.0.0.0:8000
|
||||
envFrom:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user