57 Commits

Author SHA1 Message Date
2f1fcd681e Update README.MD 2025-05-12 02:46:25 +03:00
A B
26acbf75ac Fix cross-flow keys 2025-05-11 23:44:18 +00:00
A B
4b2b56bcd2 Improved server performance and logging 2025-04-29 22:44:29 +00:00
A B
2cfc2c6c3a Improved server performance and logging 2025-04-29 22:44:17 +00:00
A B
c865818bfe Improved server performance 2025-04-29 21:22:28 +00:00
A B
32adb309ee Improved server performance 2025-04-29 21:17:40 +00:00
A B
3f852e50f4 Improved server performance 2025-04-29 21:12:27 +00:00
c01eb48451 Added macos build 2025-03-14 02:27:40 +02:00
c3575b013f Added basic auth support 2025-03-14 02:14:15 +02:00
b4c2fae778 Fix readme 2024-11-11 12:48:00 +02:00
61552a3d70 Fix help a bit. 2024-11-11 12:46:55 +02:00
AB
3ca8e27ded Bump version in Cargo.toml 2024-09-25 15:40:40 +03:00
AB
2b7b3b9a22 Fix help text. Added nix-shell to build locally 2024-09-25 15:24:21 +03:00
AB
bc473ed532 test 2024-07-14 04:07:11 +03:00
68ffe9b62d Update main.rs 2024-07-09 10:03:33 +03:00
AB
86f30b90e8 Bump version. linting. improved logging. 2024-07-09 02:48:50 +03:00
AB
8991a0579b Improve logging and error handling 2024-07-09 02:28:44 +03:00
cd1ba739ab fix ci 2024-07-08 23:29:45 +03:00
1ab0167336 fix ci 2024-07-08 23:27:58 +03:00
a39e4835e5 fix ci 2024-07-08 23:18:18 +03:00
30bf476037 fix ci 2024-07-08 23:15:10 +03:00
b2c32ec2b9 fix ci 2024-07-08 23:08:52 +03:00
f7226e918e fix ci 2024-07-08 22:46:20 +03:00
770633d0c6 fix ci 2024-07-08 22:36:56 +03:00
1c2c234930 fix 2024-07-08 22:29:24 +03:00
655c32a915 Update Dockerfile 2024-07-08 22:03:23 +03:00
21fe8036d3 Update main.yml 2024-07-08 21:47:43 +03:00
eac6f22756 Test docker build 2024-07-08 21:33:30 +03:00
444251ad00 Update main.yml 2024-07-08 21:24:20 +03:00
418de46955 Update main.yml 2024-07-08 21:22:15 +03:00
e500f9ff10 Update main.yml 2024-07-08 21:20:04 +03:00
c01714d8ce Update main.yml 2024-07-08 21:03:34 +03:00
72419aaabd Update main.yml 2024-07-08 20:27:12 +03:00
faacf4c034 Update main.yml 2024-07-08 20:13:47 +03:00
ca7ca3dc38 Update main.yml 2024-07-08 20:05:14 +03:00
9b8efacabc Update main.yml 2024-07-08 19:56:45 +03:00
5b88e4ab50 Update main.yml 2024-07-08 19:45:51 +03:00
98f3957076 Update main.yml 2024-07-08 19:43:47 +03:00
9485cf435a Update main.yml 2024-07-08 19:25:31 +03:00
a9748bf7f8 Update main.yml 2024-07-08 19:24:11 +03:00
ab8e3454e7 Update main.yml 2024-07-08 19:20:24 +03:00
a6f2eaec19 Update main.yml 2024-07-08 19:14:56 +03:00
2f416821f0 Update main.yml 2024-07-08 19:02:20 +03:00
643ea91f27 Update main.yml 2024-07-08 19:01:16 +03:00
41cc5a7bb1 Update main.yml 2024-07-08 18:52:22 +03:00
40a142e9c5 Update main.yml 2024-07-08 18:45:13 +03:00
fcc008e949 Update main.yml 2024-07-08 18:41:10 +03:00
b5560943ee Update main.yml 2024-07-08 18:24:05 +03:00
f51b17ff24 Update main.yml 2024-07-08 17:57:42 +03:00
d47e191b18 Update main.yml 2024-07-08 17:51:05 +03:00
653ad49319 Update main.yml 2024-07-08 16:03:07 +03:00
ce9141e2a0 Update main.yml 2024-07-08 16:00:05 +03:00
7e9d21e49c Update main.yml 2024-07-08 15:46:54 +03:00
efd8659836 Update main.yml 2024-07-08 15:38:17 +03:00
f32014306b Update main.yml 2024-07-08 14:56:54 +03:00
bd3054d2f1 Update main.yml 2024-07-08 14:27:46 +03:00
8b66d7395e Update main.yml 2024-07-08 14:13:30 +03:00
11 changed files with 853 additions and 188 deletions

View File

@ -1,84 +1,182 @@
name: Rust CI name: Rust static build and publish
on: on:
push: push:
tags: tags:
- 'v*.*.*' # Запуск при создании новой тэгированной версии - 'v*.*.*'
pull_request:
branches: env:
- main CARGO_TERM_COLOR: always
BINARY_NAME: khm
jobs: jobs:
build: build:
name: Build static binary
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, windows-latest, macos-latest] os: [ubuntu-latest, windows-latest, macos-latest]
include: include:
- os: ubuntu-latest - os: ubuntu-latest
target: x86_64-unknown-linux-gnu build_target: x86_64-unknown-linux-musl
platform_name: linux-amd64
- os: windows-latest - os: windows-latest
target: x86_64-pc-windows-msvc build_target: x86_64-pc-windows-msvc
platform_name: windows-amd64
- os: macos-latest - os: macos-latest
target: x86_64-apple-darwin build_target: aarch64-apple-darwin
platform_name: macos-arm64
permissions:
contents: write
steps: steps:
- name: Checkout repository - uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Cache Cargo registry
uses: actions/cache@v4
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-registry-
- name: Cache Cargo index
uses: actions/cache@v4
with:
path: ~/.cargo/git
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-index-
- name: Cache Cargo build
uses: actions/cache@v4
with:
path: target
key: ${{ runner.os }}-cargo-build-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-build-
- name: Set up Rust - uses: dtolnay/rust-toolchain@stable
uses: actions-rs/toolchain@v1
with: - name: Install rust targets
toolchain: stable run: rustup target add ${{ matrix.build_target }}
profile: minimal
override: true - name: Build Linux MUSL
if: matrix.os == 'ubuntu-latest'
- name: Install target uses: gmiam/rust-musl-action@master
run: rustup target add ${{ matrix.target }} with:
args: cargo build --target ${{ matrix.build_target }} --release
- name: Build project
run: cargo build --release --target ${{ matrix.target }} - name: Build MacOS
if: matrix.os == 'macos-latest'
- name: Run tests run: cargo build --target ${{ matrix.build_target }} --release
run: cargo test --target ${{ matrix.target }}
- name: Build Windows
- name: Upload release assets if: matrix.os == 'windows-latest'
uses: actions/upload-artifact@v4 run: cargo build --target ${{ matrix.build_target }} --release
with:
name: khm-${{ matrix.os }} - name: Upload artifact
path: target/${{ matrix.target }}/release/ uses: actions/upload-artifact@v4
with:
name: ${{ env.BINARY_NAME }}_${{ matrix.platform_name }}
path: target/${{ matrix.build_target }}/release/${{ env.BINARY_NAME }}*
release: release:
runs-on: ubuntu-latest name: Create Release Page
needs: build needs: build
runs-on: ubuntu-latest
outputs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
permissions:
contents: write
steps: steps:
- name: Checkout repository - uses: actions/checkout@v4
uses: actions/checkout@v3 - name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
- name: Download Linux build artifact upload:
uses: actions/download-artifact@v4 name: Upload Release Assets
with: needs: release
name: khm-ubuntu-latest runs-on: ${{ matrix.os }}
path: target/x86_64-unknown-linux-gnu/release/ strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
include:
- os: ubuntu-latest
platform_name: linux-amd64
- os: windows-latest
platform_name: windows-amd64
- os: macos-latest
platform_name: macos-arm64
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
name: Download ${{ matrix.platform_name }} artifact
with:
name: ${{ env.BINARY_NAME }}_${{ matrix.platform_name }}
path: ${{ env.BINARY_NAME }}_${{ matrix.platform_name }}
- name: Upload Release Asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.release.outputs.upload_url }}
asset_path: ${{ env.BINARY_NAME }}_${{ matrix.platform_name }}/${{ env.BINARY_NAME }}${{ matrix.platform_name == 'windows-amd64' && '.exe' || '' }}
asset_name: ${{ env.BINARY_NAME }}_${{ matrix.platform_name }}${{ matrix.platform_name == 'windows-amd64' && '.exe' || '' }}
asset_content_type: application/octet-stream
- name: Download Windows build artifact build_docker:
uses: actions/download-artifact@v4 name: Build and Publish Docker Image
with: needs: build
name: khm-windows-latest runs-on: ubuntu-latest
path: target/x86_64-pc-windows-msvc/release/ steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
name: Download Linux artifact
with:
name: ${{ env.BINARY_NAME }}_linux-amd64
path: .
- name: ls
run: |
ls -lah
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Download macOS build artifact - name: Set up QEMU
uses: actions/download-artifact@v4 uses: docker/setup-qemu-action@v3
with:
name: khm-macos-latest - name: Login to Docker Hub
path: target/x86_64-apple-darwin/release/ uses: docker/login-action@v3
with:
username: ultradesu
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set exec flag
run: |
chmod +x ${{ env.BINARY_NAME }}
- name: Set outputs
id: get_tag
run: |
echo "tag=$(echo ${GITHUB_REF} | cut -d'/' -f3)" >> $GITHUB_OUTPUT
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
tags: ultradesu/${{ env.BINARY_NAME }}:latest,ultradesu/${{ env.BINARY_NAME }}:${{ steps.get_tag.outputs.tag }}
- name: Create Release
id: create_release
uses: ncipollo/release-action@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
artifacts: "target/x86_64-apple-darwin/release/khm,target/x86_64-pc-windows-msvc/release/khm.exe,target/x86_64-unknown-linux-gnu/release/khm"
#bodyFile: "body.md"

2
.gitignore vendored
View File

@ -1 +1,3 @@
/target /target
*.swp
*.swo

45
Cargo.lock generated
View File

@ -1,6 +1,6 @@
# This file is automatically @generated by Cargo. # This file is automatically @generated by Cargo.
# It is not intended for manual editing. # It is not intended for manual editing.
version = 3 version = 4
[[package]] [[package]]
name = "actix-codec" name = "actix-codec"
@ -836,6 +836,17 @@ dependencies = [
"digest", "digest",
] ]
[[package]]
name = "hostname"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867"
dependencies = [
"libc",
"match_cfg",
"winapi",
]
[[package]] [[package]]
name = "http" name = "http"
version = "0.2.12" version = "0.2.12"
@ -1053,12 +1064,14 @@ dependencies = [
[[package]] [[package]]
name = "khm" name = "khm"
version = "0.1.0" version = "0.4.1"
dependencies = [ dependencies = [
"actix-web", "actix-web",
"base64 0.21.7",
"chrono", "chrono",
"clap", "clap",
"env_logger", "env_logger",
"hostname",
"log", "log",
"regex", "regex",
"reqwest", "reqwest",
@ -1119,6 +1132,12 @@ version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "match_cfg"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4"
[[package]] [[package]]
name = "md-5" name = "md-5"
version = "0.10.6" version = "0.10.6"
@ -2222,6 +2241,28 @@ dependencies = [
"web-sys", "web-sys",
] ]
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]] [[package]]
name = "windows-core" name = "windows-core"
version = "0.52.0" version = "0.52.0"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "khm" name = "khm"
version = "0.1.0" version = "0.4.2"
edition = "2021" edition = "2021"
authors = ["AB <ab@hexor.cy>"] authors = ["AB <ab@hexor.cy>"]
@ -11,8 +11,10 @@ serde_json = "1.0"
env_logger = "0.11.3" env_logger = "0.11.3"
log = "0.4" log = "0.4"
regex = "1.10.5" regex = "1.10.5"
base64 = "0.21"
tokio = { version = "1", features = ["full"] } tokio = { version = "1", features = ["full"] }
tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] } tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] }
clap = { version = "4", features = ["derive"] } clap = { version = "4", features = ["derive"] }
chrono = "0.4.38" chrono = "0.4.38"
reqwest = { version = "0.12", features = ["json"] } reqwest = { version = "0.12", features = ["json"] }
hostname = "0.3"

5
Dockerfile Normal file
View File

@ -0,0 +1,5 @@
# syntax=docker/dockerfile:1
FROM alpine:latest
COPY khm /usr/local/bin/khm
ENTRYPOINT ["/usr/local/bin/khm"]

View File

@ -24,21 +24,24 @@ khm --server --ip 127.0.0.1 --port 8080 --db-host 127.0.0.1 --db-name khm --db-u
To run the application in client mode, use the following command: To run the application in client mode, use the following command:
```bash ```bash
khm --host http://khm.example.com:8080 --known-hosts ~/.ssh/known_hosts --in-place khm --host http://khm.example.com:8080/<FLOW_NAME>/ --known-hosts ~/.ssh/known_hosts --in-place
``` ```
### Arguments ### Arguments
- `--server`: Run in server mode (default: false). Options:
- `--ip`: IP address to bind the server or client to (default: 127.0.0.1). - `--server` Run in server mode
- `--port`: Port to bind the server or client to (default: 8080). - `--in-place` Server mode: Sync the known_hosts file with keys from the server
- `--db-host`: Hostname or IP address of the PostgreSQL database (default: 127.0.0.1). - `--flows <FLOWS>...` Server mode: Comma-separated list of flows to manage [default: default]
- `--db-name`: Name of the PostgreSQL database (default: khm). - `-i, --ip <IP>` Server mode: IP address to bind the server to [default: 127.0.0.1]
- `--db-user`: Username for the PostgreSQL database (required in server mode). - `-p, --port <PORT>` Server mode: Port to bind the server to [default: 8080]
- `--db-password`: Password for the PostgreSQL database (required in server mode). - `--db-host <DB_HOST>` Server mode: Hostname or IP address of the PostgreSQL database [default: 127.0.0.1]
- `--host`: Host address of the server to connect to in client mode (required in client mode). - `--db-name <DB_NAME>` Server mode: Name of the PostgreSQL database [default: khm]
- `--known-hosts`: Path to the `known_hosts` file (default: ~/.ssh/known_hosts). - `--db-user <DB_USER>` Server mode: Username for the PostgreSQL database
- `--in-place`: Update the `known_hosts` file with keys from the server after sending keys (default: false). - `--db-password <DB_PASSWORD>` Server mode: Password for the PostgreSQL database
- `--flows`: Comma-separated list of flows to manage (default: default). - `--basic-auth <BASIC_AUTH>` Client mode: Basic Auth credentials [default: ""]
- `--host <HOST>` Client mode: Full host address of the server to connect to. Like `https://khm.example.com/<FLOW_NAME>`
- `--known-hosts <KNOWN_HOSTS>` Client mode: Path to the known_hosts file [default: ~/.ssh/known_hosts]
## Installation ## Installation
@ -59,4 +62,4 @@ Contributions are welcome! Please open an issue or submit a pull request for any
## License ## License
This project is licensed under the WTFPL License. This project is licensed under the WTFPL License.

11
shell.nix Normal file
View File

@ -0,0 +1,11 @@
{ pkgs ? import <nixpkgs> {} }:
pkgs.mkShell {
buildInputs = [
pkgs.openssl
pkgs.pkg-config
];
shellHook = ''
unset OPENSSL_DIR
'';
}

View File

@ -1,3 +1,6 @@
use base64::{engine::general_purpose, Engine as _};
use log::{error, info};
use reqwest::header::{HeaderMap, HeaderValue, AUTHORIZATION};
use reqwest::Client; use reqwest::Client;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fs::File; use std::fs::File;
@ -17,15 +20,21 @@ fn read_known_hosts(file_path: &str) -> io::Result<Vec<SshKey>> {
let mut keys = Vec::new(); let mut keys = Vec::new();
for line in reader.lines() { for line in reader.lines() {
if let Ok(line) = line { match line {
let parts: Vec<&str> = line.split_whitespace().collect(); Ok(line) => {
if parts.len() >= 2 { let parts: Vec<&str> = line.split_whitespace().collect();
let server = parts[0].to_string(); if parts.len() >= 2 {
let public_key = parts[1..].join(" "); let server = parts[0].to_string();
keys.push(SshKey { server, public_key }); let public_key = parts[1..].join(" ");
keys.push(SshKey { server, public_key });
}
}
Err(e) => {
error!("Error reading line from known_hosts file: {}", e);
} }
} }
} }
info!("Read {} keys from known_hosts file", keys.len());
Ok(keys) Ok(keys)
} }
@ -36,19 +45,68 @@ fn write_known_hosts(file_path: &str, keys: &[SshKey]) -> io::Result<()> {
for key in keys { for key in keys {
writeln!(file, "{} {}", key.server, key.public_key)?; writeln!(file, "{} {}", key.server, key.public_key)?;
} }
info!("Wrote {} keys to known_hosts file", keys.len());
Ok(()) Ok(())
} }
async fn send_keys_to_server(host: &str, keys: Vec<SshKey>) -> Result<(), reqwest::Error> { // Get local hostname for request headers
fn get_hostname() -> String {
match hostname::get() {
Ok(name) => name.to_string_lossy().to_string(),
Err(_) => "unknown-host".to_string(),
}
}
async fn send_keys_to_server(
host: &str,
keys: Vec<SshKey>,
auth_string: &str,
) -> Result<(), reqwest::Error> {
let client = Client::new(); let client = Client::new();
let url = format!("{}/keys", host); let url = format!("{}/keys", host);
let response = client.post(&url).json(&keys).send().await?; info!("URL: {} ", url);
let mut headers = HeaderMap::new();
// Add hostname header
let hostname = get_hostname();
headers.insert(
"X-Client-Hostname",
HeaderValue::from_str(&hostname).unwrap_or_else(|_| {
error!("Failed to create hostname header value");
HeaderValue::from_static("unknown-host")
}),
);
info!("Adding hostname header: {}", hostname);
if !auth_string.is_empty() {
let parts: Vec<&str> = auth_string.splitn(2, ':').collect();
if parts.len() == 2 {
let username = parts[0];
let password = parts[1];
let auth_header_value = format!("{}:{}", username, password);
let encoded_auth = general_purpose::STANDARD.encode(auth_header_value);
let auth_header = format!("Basic {}", encoded_auth);
headers.insert(AUTHORIZATION, HeaderValue::from_str(&auth_header).unwrap());
} else {
error!("Invalid auth string format. Expected 'username:password'");
}
}
let response = client
.post(&url)
.headers(headers)
.json(&keys)
.send()
.await?;
if response.status().is_success() { if response.status().is_success() {
println!("Keys successfully sent to server."); info!("Keys successfully sent to server.");
} else { } else {
eprintln!( error!(
"Failed to send keys to server. Status: {}", "Failed to send keys to server. Status: {}",
response.status() response.status()
); );
@ -57,39 +115,72 @@ async fn send_keys_to_server(host: &str, keys: Vec<SshKey>) -> Result<(), reqwes
Ok(()) Ok(())
} }
async fn get_keys_from_server(host: &str) -> Result<Vec<SshKey>, reqwest::Error> { async fn get_keys_from_server(
host: &str,
auth_string: &str,
) -> Result<Vec<SshKey>, reqwest::Error> {
let client = Client::new(); let client = Client::new();
let url = format!("{}/keys", host); let url = format!("{}/keys", host);
let response = client.get(&url).send().await?;
if response.status().is_success() { let mut headers = HeaderMap::new();
let keys: Vec<SshKey> = response.json().await?;
Ok(keys) // Add hostname header
} else { let hostname = get_hostname();
eprintln!( headers.insert(
"Failed to get keys from server. Status: {}", "X-Client-Hostname",
response.status() HeaderValue::from_str(&hostname).unwrap_or_else(|_| {
); error!("Failed to create hostname header value");
Ok(vec![]) HeaderValue::from_static("unknown-host")
}),
);
info!("Adding hostname header: {}", hostname);
if !auth_string.is_empty() {
let parts: Vec<&str> = auth_string.splitn(2, ':').collect();
if parts.len() == 2 {
let username = parts[0];
let password = parts[1];
let auth_header_value = format!("{}:{}", username, password);
let encoded_auth = general_purpose::STANDARD.encode(auth_header_value);
let auth_header = format!("Basic {}", encoded_auth);
headers.insert(AUTHORIZATION, HeaderValue::from_str(&auth_header).unwrap());
} else {
error!("Invalid auth string format. Expected 'username:password'");
}
} }
let response = client.get(&url).headers(headers).send().await?;
let response = response.error_for_status()?;
let keys: Vec<SshKey> = response.json().await?;
info!("Received {} keys from server", keys.len());
Ok(keys)
} }
pub async fn run_client(args: crate::Args) -> std::io::Result<()> { pub async fn run_client(args: crate::Args) -> std::io::Result<()> {
info!("Client mode: Reading known_hosts file");
let keys = read_known_hosts(&args.known_hosts).expect("Failed to read known hosts file"); let keys = read_known_hosts(&args.known_hosts).expect("Failed to read known hosts file");
let host = args.host.expect("host is required in client mode"); let host = args.host.expect("host is required in client mode");
send_keys_to_server(&host, keys) info!("Client mode: Sending keys to server at {}", host);
send_keys_to_server(&host, keys, &args.basic_auth)
.await .await
.expect("Failed to send keys to server"); .expect("Failed to send keys to server");
if args.in_place { if args.in_place {
let server_keys = get_keys_from_server(&host) info!("Client mode: In-place update is enabled. Fetching keys from server.");
let server_keys = get_keys_from_server(&host, &args.basic_auth)
.await .await
.expect("Failed to get keys from server"); .expect("Failed to get keys from server");
info!("Client mode: Writing updated known_hosts file");
write_known_hosts(&args.known_hosts, &server_keys) write_known_hosts(&args.known_hosts, &server_keys)
.expect("Failed to write known hosts file"); .expect("Failed to write known hosts file");
} }
info!("Client mode: Finished operations");
Ok(()) Ok(())
} }

295
src/db.rs Normal file
View File

@ -0,0 +1,295 @@
use crate::server::SshKey;
use log::info;
use std::collections::HashMap;
use std::collections::HashSet;
use tokio_postgres::Client;
// Structure for storing key processing statistics
pub struct KeyInsertStats {
pub total: usize, // Total number of received keys
pub inserted: usize, // Number of new keys
pub unchanged: usize, // Number of unchanged keys
pub key_id_map: Vec<(SshKey, i32)>, // Mapping of keys to their IDs in the database
}
pub async fn initialize_db_schema(client: &Client) -> Result<(), tokio_postgres::Error> {
info!("Checking and initializing database schema if needed");
// Check if tables exist by querying information_schema
let tables_exist = client
.query(
"SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = 'keys'
) AND EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = 'flows'
)",
&[],
)
.await?
.get(0)
.map(|row| row.get::<_, bool>(0))
.unwrap_or(false);
if !tables_exist {
info!("Database schema doesn't exist. Creating tables...");
// Create the keys table
client
.execute(
"CREATE TABLE IF NOT EXISTS public.keys (
key_id SERIAL PRIMARY KEY,
host VARCHAR(255) NOT NULL,
key TEXT NOT NULL,
updated TIMESTAMP WITH TIME ZONE NOT NULL,
CONSTRAINT unique_host_key UNIQUE (host, key)
)",
&[],
)
.await?;
// Create the flows table
client
.execute(
"CREATE TABLE IF NOT EXISTS public.flows (
flow_id SERIAL PRIMARY KEY,
name VARCHAR(255) NOT NULL,
key_id INTEGER NOT NULL,
CONSTRAINT fk_key
FOREIGN KEY(key_id)
REFERENCES public.keys(key_id)
ON DELETE CASCADE,
CONSTRAINT unique_flow_key UNIQUE (name, key_id)
)",
&[],
)
.await?;
// Create an index for faster lookups
client
.execute(
"CREATE INDEX IF NOT EXISTS idx_flows_name ON public.flows(name)",
&[],
)
.await?;
info!("Database schema created successfully");
} else {
info!("Database schema already exists");
}
Ok(())
}
pub async fn batch_insert_keys(
client: &Client,
keys: &[SshKey],
) -> Result<KeyInsertStats, tokio_postgres::Error> {
if keys.is_empty() {
return Ok(KeyInsertStats {
total: 0,
inserted: 0,
unchanged: 0,
key_id_map: Vec::new(),
});
}
// Prepare arrays for batch insertion
let mut host_values: Vec<&str> = Vec::with_capacity(keys.len());
let mut key_values: Vec<&str> = Vec::with_capacity(keys.len());
for key in keys {
host_values.push(&key.server);
key_values.push(&key.public_key);
}
// First, check which keys already exist in the database
let mut existing_keys = HashMap::new();
let mut key_query = String::from("SELECT host, key, key_id FROM public.keys WHERE ");
for i in 0..keys.len() {
if i > 0 {
key_query.push_str(" OR ");
}
key_query.push_str(&format!("(host = ${} AND key = ${})", i * 2 + 1, i * 2 + 2));
}
let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
Vec::with_capacity(keys.len() * 2);
for i in 0..keys.len() {
params.push(&host_values[i]);
params.push(&key_values[i]);
}
let rows = client.query(&key_query, &params[..]).await?;
for row in rows {
let host: String = row.get(0);
let key: String = row.get(1);
let key_id: i32 = row.get(2);
existing_keys.insert((host, key), key_id);
}
// Determine which keys need to be inserted and which already exist
let mut keys_to_insert = Vec::new();
let mut unchanged_keys = Vec::new();
for key in keys {
let key_tuple = (key.server.clone(), key.public_key.clone());
if existing_keys.contains_key(&key_tuple) {
unchanged_keys.push((key.clone(), *existing_keys.get(&key_tuple).unwrap()));
} else {
keys_to_insert.push(key.clone());
}
}
let mut inserted_keys = Vec::new();
// If there are keys to insert, perform the insertion
if !keys_to_insert.is_empty() {
let mut insert_sql = String::from("INSERT INTO public.keys (host, key, updated) VALUES ");
let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = Vec::new();
let mut param_count = 1;
for (i, key) in keys_to_insert.iter().enumerate() {
if i > 0 {
insert_sql.push_str(", ");
}
insert_sql.push_str(&format!("(${}, ${}, NOW())", param_count, param_count + 1));
insert_params.push(&key.server);
insert_params.push(&key.public_key);
param_count += 2;
}
insert_sql.push_str(" RETURNING key_id, host, key");
let inserted_rows = client.query(&insert_sql, &insert_params[..]).await?;
for row in inserted_rows {
let host: String = row.get(1);
let key_text: String = row.get(2);
let key_id: i32 = row.get(0);
if let Some(orig_key) = keys_to_insert
.iter()
.find(|k| k.server == host && k.public_key == key_text)
{
inserted_keys.push((orig_key.clone(), key_id));
}
}
}
// Save the number of elements before combining
let inserted_count = inserted_keys.len();
let unchanged_count = unchanged_keys.len();
// Combine results and generate statistics
let mut key_id_map = Vec::with_capacity(unchanged_count + inserted_count);
key_id_map.extend(unchanged_keys);
key_id_map.extend(inserted_keys);
let stats = KeyInsertStats {
total: keys.len(),
inserted: inserted_count,
unchanged: unchanged_count,
key_id_map,
};
info!(
"Keys stats: received={}, new={}, unchanged={}",
stats.total, stats.inserted, stats.unchanged
);
Ok(stats)
}
pub async fn batch_insert_flow_keys(
client: &Client,
flow_name: &str,
key_ids: &[i32],
) -> Result<usize, tokio_postgres::Error> {
if key_ids.is_empty() {
info!("No keys to associate with flow '{}'", flow_name);
return Ok(0);
}
// First, check which associations already exist
let mut existing_query =
String::from("SELECT key_id FROM public.flows WHERE name = $1 AND key_id IN (");
for i in 0..key_ids.len() {
if i > 0 {
existing_query.push_str(", ");
}
existing_query.push_str(&format!("${}", i + 2));
}
existing_query.push_str(")");
let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
Vec::with_capacity(key_ids.len() + 1);
params.push(&flow_name);
for key_id in key_ids {
params.push(key_id);
}
let rows = client.query(&existing_query, &params[..]).await?;
let mut existing_associations = HashSet::new();
for row in rows {
let key_id: i32 = row.get(0);
existing_associations.insert(key_id);
}
// Filter only keys that are not yet associated with the flow
let new_key_ids: Vec<&i32> = key_ids
.iter()
.filter(|&id| !existing_associations.contains(id))
.collect();
if new_key_ids.is_empty() {
info!(
"All {} keys are already associated with flow '{}'",
key_ids.len(),
flow_name
);
return Ok(0);
}
// Build SQL query with multiple values only for new associations
let mut sql = String::from("INSERT INTO public.flows (name, key_id) VALUES ");
for i in 0..new_key_ids.len() {
if i > 0 {
sql.push_str(", ");
}
sql.push_str(&format!("($1, ${})", i + 2));
}
sql.push_str(" ON CONFLICT (name, key_id) DO NOTHING");
// Prepare parameters for the query
let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
Vec::with_capacity(new_key_ids.len() + 1);
insert_params.push(&flow_name);
for key_id in &new_key_ids {
insert_params.push(*key_id);
}
// Execute query
let affected = client.execute(&sql, &insert_params[..]).await?;
let affected_usize = affected as usize;
info!(
"Added {} new key-flow associations for flow '{}' (skipped {} existing)",
affected_usize,
flow_name,
existing_associations.len()
);
Ok(affected_usize)
}

View File

@ -1,8 +1,10 @@
mod client; mod client;
mod db;
mod server; mod server;
use clap::Parser; use clap::Parser;
use env_logger; use env_logger;
use log::{error, info};
/// This application manages SSH keys and flows, either as a server or client. /// This application manages SSH keys and flows, either as a server or client.
/// In server mode, it stores keys and flows in a PostgreSQL database. /// In server mode, it stores keys and flows in a PostgreSQL database.
@ -11,19 +13,34 @@ use env_logger;
#[command( #[command(
author = env!("CARGO_PKG_AUTHORS"), author = env!("CARGO_PKG_AUTHORS"),
version = env!("CARGO_PKG_VERSION"), version = env!("CARGO_PKG_VERSION"),
about = "SSH Key Manager", about = "SSH Host Key Manager",
long_about = None, long_about = None,
after_help = "Examples:\n\ after_help = "Examples:\n\
\n\ \n\
Running in server mode:\n\ Running in server mode:\n\
khm --server --ip 0.0.0.0 --port 1337 --db-host psql.psql.svc --db-name khm --db-user admin --db-password <SECRET> --flows work,home\n\ khm --server --ip 0.0.0.0 --port 1337 --db-host psql.psql.svc --db-name khm --db-user admin --db-password <SECRET> --flows work,home\n\
\n\ \n\
Running in client mode to send diff and sync ~/.ssh/known_hosts with remote flow in place:\n\ Running in client mode to send diff and sync ~/.ssh/known_hosts with remote flow `work` in place:\n\
khm --host http://kh.example.com:8080 --known_hosts ~/.ssh/known_hosts --in-place\n\ khm --host https://khm.example.com/work --known-hosts ~/.ssh/known_hosts --in-place\n\
\n\ \n\
" "
)] )]
struct Args { struct Args {
/// Run in server mode (default: false)
#[arg(long, help = "Run in server mode")]
server: bool,
/// Update the known_hosts file with keys from the server after sending keys (default: false)
#[arg(
long,
help = "Server mode: Sync the known_hosts file with keys from the server"
)]
in_place: bool,
/// Comma-separated list of flows to manage (default: default)
#[arg(long, default_value = "default", value_parser, num_args = 1.., value_delimiter = ',', help = "Server mode: Comma-separated list of flows to manage")]
flows: Vec<String>,
/// IP address to bind the server or client to (default: 127.0.0.1) /// IP address to bind the server or client to (default: 127.0.0.1)
#[arg( #[arg(
short, short,
@ -78,14 +95,10 @@ struct Args {
#[arg( #[arg(
long, long,
required_if_eq("server", "false"), required_if_eq("server", "false"),
help = "Client mode: Host address of the server to connect to" help = "Client mode: Full host address of the server to connect to. Like https://khm.example.com/<FLOW_NAME>"
)] )]
host: Option<String>, host: Option<String>,
/// Run in server mode (default: false)
#[arg(long, help = "Run in server mode")]
server: bool,
/// Path to the known_hosts file (default: ~/.ssh/known_hosts) /// Path to the known_hosts file (default: ~/.ssh/known_hosts)
#[arg( #[arg(
long, long,
@ -94,27 +107,30 @@ struct Args {
)] )]
known_hosts: String, known_hosts: String,
/// Update the known_hosts file with keys from the server after sending keys (default: false) /// Basic auth string for client mode. Format: user:pass
#[arg( #[arg(long, default_value = "", help = "Client mode: Basic Auth credentials")]
long, basic_auth: String,
help = "Server mode: Sync the known_hosts file with keys from the server"
)]
in_place: bool,
/// Comma-separated list of flows to manage (default: default)
#[arg(long, default_value = "default", value_parser, num_args = 1.., value_delimiter = ',', help = "Comma-separated list of flows to manage")]
flows: Vec<String>,
} }
#[actix_web::main] #[actix_web::main]
async fn main() -> std::io::Result<()> { async fn main() -> std::io::Result<()> {
env_logger::init(); env_logger::init();
info!("Starting SSH Key Manager");
let args = Args::parse(); let args = Args::parse();
if args.server { if args.server {
server::run_server(args).await info!("Running in server mode");
if let Err(e) = server::run_server(args).await {
error!("Failed to run server: {}", e);
}
} else { } else {
client::run_client(args).await info!("Running in client mode");
if let Err(e) = client::run_client(args).await {
error!("Failed to run client: {}", e);
}
} }
info!("Application has exited");
Ok(())
} }

View File

@ -1,11 +1,13 @@
use actix_web::{web, App, HttpResponse, HttpServer, Responder}; use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer, Responder};
use log; use log::{error, info};
use regex::Regex; use regex::Regex;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use tokio_postgres::{Client, NoTls}; use tokio_postgres::{Client, NoTls};
use crate::db;
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SshKey { pub struct SshKey {
pub server: String, pub server: String,
@ -33,48 +35,6 @@ pub fn is_valid_ssh_key(key: &str) -> bool {
|| ed25519_re.is_match(key) || ed25519_re.is_match(key)
} }
pub async fn insert_key_if_not_exists(
client: &Client,
key: &SshKey,
) -> Result<i32, tokio_postgres::Error> {
let row = client
.query_opt(
"SELECT key_id FROM public.keys WHERE host = $1 AND key = $2",
&[&key.server, &key.public_key],
)
.await?;
if let Some(row) = row {
client
.execute(
"UPDATE public.keys SET updated = NOW() WHERE key_id = $1",
&[&row.get::<_, i32>(0)],
)
.await?;
Ok(row.get(0))
} else {
let row = client.query_one(
"INSERT INTO public.keys (host, key, updated) VALUES ($1, $2, NOW()) RETURNING key_id",
&[&key.server, &key.public_key]
).await?;
Ok(row.get(0))
}
}
pub async fn insert_flow_key(
client: &Client,
flow_name: &str,
key_id: i32,
) -> Result<(), tokio_postgres::Error> {
client
.execute(
"INSERT INTO public.flows (name, key_id) VALUES ($1, $2) ON CONFLICT DO NOTHING",
&[&flow_name, &key_id],
)
.await?;
Ok(())
}
pub async fn get_keys_from_db(client: &Client) -> Result<Vec<Flow>, tokio_postgres::Error> { pub async fn get_keys_from_db(client: &Client) -> Result<Vec<Flow>, tokio_postgres::Error> {
let rows = client.query( let rows = client.query(
"SELECT k.host, k.key, f.name FROM public.keys k INNER JOIN public.flows f ON k.key_id = f.key_id", "SELECT k.host, k.key, f.name FROM public.keys k INNER JOIN public.flows f ON k.key_id = f.key_id",
@ -106,24 +66,57 @@ pub async fn get_keys_from_db(client: &Client) -> Result<Vec<Flow>, tokio_postgr
} }
} }
info!("Retrieved {} flows from database", flows_map.len());
Ok(flows_map.into_values().collect()) Ok(flows_map.into_values().collect())
} }
// Extract client hostname from request headers
fn get_client_hostname(req: &HttpRequest) -> String {
if let Some(hostname) = req.headers().get("X-Client-Hostname") {
if let Ok(hostname_str) = hostname.to_str() {
return hostname_str.to_string();
}
}
"unknown-client".to_string()
}
pub async fn get_keys( pub async fn get_keys(
flows: web::Data<Flows>, flows: web::Data<Flows>,
flow_id: web::Path<String>, flow_id: web::Path<String>,
allowed_flows: web::Data<Vec<String>>, allowed_flows: web::Data<Vec<String>>,
req: HttpRequest,
) -> impl Responder { ) -> impl Responder {
let client_hostname = get_client_hostname(&req);
let flow_id_str = flow_id.into_inner(); let flow_id_str = flow_id.into_inner();
info!(
"Received keys request from client '{}' for flow '{}'",
client_hostname, flow_id_str
);
if !allowed_flows.contains(&flow_id_str) { if !allowed_flows.contains(&flow_id_str) {
error!(
"Flow ID not allowed for client '{}': {}",
client_hostname, flow_id_str
);
return HttpResponse::Forbidden().body("Flow ID not allowed"); return HttpResponse::Forbidden().body("Flow ID not allowed");
} }
let flows = flows.lock().unwrap(); let flows = flows.lock().unwrap();
if let Some(flow) = flows.iter().find(|flow| flow.name == flow_id_str) { if let Some(flow) = flows.iter().find(|flow| flow.name == flow_id_str) {
let servers: Vec<&SshKey> = flow.servers.iter().collect(); let servers: Vec<&SshKey> = flow.servers.iter().collect();
info!(
"Returning {} keys for flow '{}' to client '{}'",
servers.len(),
flow_id_str,
client_hostname
);
HttpResponse::Ok().json(servers) HttpResponse::Ok().json(servers)
} else { } else {
error!(
"Flow ID not found for client '{}': {}",
client_hostname, flow_id_str
);
HttpResponse::NotFound().body("Flow ID not found") HttpResponse::NotFound().body("Flow ID not found")
} }
} }
@ -134,48 +127,132 @@ pub async fn add_keys(
new_keys: web::Json<Vec<SshKey>>, new_keys: web::Json<Vec<SshKey>>,
db_client: web::Data<Arc<Client>>, db_client: web::Data<Arc<Client>>,
allowed_flows: web::Data<Vec<String>>, allowed_flows: web::Data<Vec<String>>,
req: HttpRequest,
) -> impl Responder { ) -> impl Responder {
let client_hostname = get_client_hostname(&req);
let flow_id_str = flow_id.into_inner(); let flow_id_str = flow_id.into_inner();
info!(
"Received {} keys from client '{}' for flow '{}'",
new_keys.len(),
client_hostname,
flow_id_str
);
if !allowed_flows.contains(&flow_id_str) { if !allowed_flows.contains(&flow_id_str) {
error!(
"Flow ID not allowed for client '{}': {}",
client_hostname, flow_id_str
);
return HttpResponse::Forbidden().body("Flow ID not allowed"); return HttpResponse::Forbidden().body("Flow ID not allowed");
} }
// Check SSH key format
let mut valid_keys = Vec::new();
for new_key in new_keys.iter() { for new_key in new_keys.iter() {
if !is_valid_ssh_key(&new_key.public_key) { if !is_valid_ssh_key(&new_key.public_key) {
error!(
"Invalid SSH key format from client '{}' for server: {}",
client_hostname, new_key.server
);
return HttpResponse::BadRequest().body(format!( return HttpResponse::BadRequest().body(format!(
"Invalid SSH key format for server: {}", "Invalid SSH key format for server: {}",
new_key.server new_key.server
)); ));
} }
valid_keys.push(new_key.clone());
match insert_key_if_not_exists(&db_client, new_key).await {
Ok(key_id) => {
if let Err(e) = insert_flow_key(&db_client, &flow_id_str, key_id).await {
log::error!("Failed to insert flow key into database: {}", e);
return HttpResponse::InternalServerError()
.body("Failed to insert flow key into database");
}
}
Err(e) => {
log::error!("Failed to insert key into database: {}", e);
return HttpResponse::InternalServerError()
.body("Failed to insert key into database");
}
}
} }
// Refresh the flows data from the database info!(
let updated_flows = get_keys_from_db(&db_client) "Processing batch of {} keys from client '{}' for flow: {}",
.await valid_keys.len(),
.unwrap_or_else(|_| Vec::new()); client_hostname,
flow_id_str
);
// Batch insert keys with statistics
let key_stats = match crate::db::batch_insert_keys(&db_client, &valid_keys).await {
Ok(stats) => stats,
Err(e) => {
error!(
"Failed to batch insert keys from client '{}' into database: {}",
client_hostname, e
);
return HttpResponse::InternalServerError()
.body("Failed to batch insert keys into database");
}
};
// Always try to associate all keys with the flow, regardless of whether they're new or existing
if !key_stats.key_id_map.is_empty() {
// Extract all key IDs from statistics, both new and existing
let key_ids: Vec<i32> = key_stats.key_id_map.iter().map(|(_, id)| *id).collect();
// Batch insert key-flow associations
if let Err(e) = crate::db::batch_insert_flow_keys(&db_client, &flow_id_str, &key_ids).await
{
error!(
"Failed to batch insert flow keys from client '{}' into database: {}",
client_hostname, e
);
return HttpResponse::InternalServerError()
.body("Failed to batch insert flow keys into database");
}
info!(
"Added flow associations for {} keys from client '{}' in flow '{}'",
key_ids.len(),
client_hostname,
flow_id_str
);
} else {
info!(
"No keys to associate from client '{}' with flow '{}'",
client_hostname, flow_id_str
);
}
// Get updated data
let updated_flows = match get_keys_from_db(&db_client).await {
Ok(flows) => flows,
Err(e) => {
error!(
"Failed to get updated flows from database after client '{}' request: {}",
client_hostname, e
);
return HttpResponse::InternalServerError()
.body("Failed to refresh flows from database");
}
};
let mut flows_guard = flows.lock().unwrap(); let mut flows_guard = flows.lock().unwrap();
*flows_guard = updated_flows; *flows_guard = updated_flows;
let updated_flow = flows_guard.iter().find(|flow| flow.name == flow_id_str); let updated_flow = flows_guard.iter().find(|flow| flow.name == flow_id_str);
if let Some(flow) = updated_flow { if let Some(flow) = updated_flow {
let servers: Vec<&SshKey> = flow.servers.iter().collect(); let servers: Vec<&SshKey> = flow.servers.iter().collect();
HttpResponse::Ok().json(servers) info!(
"Keys summary for client '{}', flow '{}': total received={}, new={}, unchanged={}, total in flow={}",
client_hostname,
flow_id_str,
key_stats.total,
key_stats.inserted,
key_stats.unchanged,
servers.len()
);
// Add statistics to HTTP response headers
let mut response = HttpResponse::Ok();
response.append_header(("X-Keys-Total", key_stats.total.to_string()));
response.append_header(("X-Keys-New", key_stats.inserted.to_string()));
response.append_header(("X-Keys-Unchanged", key_stats.unchanged.to_string()));
response.json(servers)
} else { } else {
error!(
"Flow ID not found after update from client '{}': {}",
client_hostname, flow_id_str
);
HttpResponse::NotFound().body("Flow ID not found") HttpResponse::NotFound().body("Flow ID not found")
} }
} }
@ -191,19 +268,42 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> {
args.db_host, db_user, db_password, args.db_name args.db_host, db_user, db_password, args.db_name
); );
let (db_client, connection) = tokio_postgres::connect(&db_conn_str, NoTls).await.unwrap(); info!("Connecting to database at {}", args.db_host);
let (db_client, connection) = match tokio_postgres::connect(&db_conn_str, NoTls).await {
Ok((client, conn)) => (client, conn),
Err(e) => {
error!("Failed to connect to the database: {}", e);
return Err(std::io::Error::new(
std::io::ErrorKind::ConnectionRefused,
format!("Database connection error: {}", e),
));
}
};
let db_client = Arc::new(db_client); let db_client = Arc::new(db_client);
// Spawn a new thread to run the database connection // Spawn a new thread to run the database connection
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = connection.await { if let Err(e) = connection.await {
eprintln!("Connection error: {}", e); error!("Connection error: {}", e);
} }
}); });
let mut initial_flows = get_keys_from_db(&db_client) // Initialize database schema if needed
.await if let Err(e) = db::initialize_db_schema(&db_client).await {
.unwrap_or_else(|_| Vec::new()); error!("Failed to initialize database schema: {}", e);
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("Database schema initialization error: {}", e),
));
}
let mut initial_flows = match get_keys_from_db(&db_client).await {
Ok(flows) => flows,
Err(e) => {
error!("Failed to get initial flows from database: {}", e);
Vec::new()
}
};
// Ensure all allowed flows are initialized // Ensure all allowed flows are initialized
for allowed_flow in &args.flows { for allowed_flow in &args.flows {
@ -218,6 +318,7 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> {
let flows: Flows = Arc::new(Mutex::new(initial_flows)); let flows: Flows = Arc::new(Mutex::new(initial_flows));
let allowed_flows = web::Data::new(args.flows); let allowed_flows = web::Data::new(args.flows);
info!("Starting HTTP server on {}:{}", args.ip, args.port);
HttpServer::new(move || { HttpServer::new(move || {
App::new() App::new()
.app_data(web::Data::new(flows.clone())) .app_data(web::Data::new(flows.clone()))