mirror of
https://github.com/house-of-vanity/khm.git
synced 2025-07-08 07:44:08 +00:00
Compare commits
2 Commits
Author | SHA1 | Date | |
---|---|---|---|
3bedadf394 | |||
a0f83db19a |
13
.github/workflows/main.yml
vendored
13
.github/workflows/main.yml
vendored
@ -57,6 +57,10 @@ jobs:
|
|||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@stable
|
- uses: dtolnay/rust-toolchain@stable
|
||||||
|
|
||||||
|
- uses: mbrobbel/rustfmt-check@master
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Install rust targets
|
- name: Install rust targets
|
||||||
run: rustup target add ${{ matrix.build_target }}
|
run: rustup target add ${{ matrix.build_target }}
|
||||||
|
|
||||||
@ -90,16 +94,13 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Create Release
|
- uses: ncipollo/release-action@v1
|
||||||
id: create_release
|
id: create_release
|
||||||
uses: actions/create-release@v1
|
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
tag_name: ${{ github.ref }}
|
allowUpdates: true
|
||||||
release_name: Release ${{ github.ref }}
|
#artifacts: "release.tar.gz,foo/*.txt"
|
||||||
draft: false
|
|
||||||
prerelease: false
|
|
||||||
|
|
||||||
upload:
|
upload:
|
||||||
name: Upload Release Assets
|
name: Upload Release Assets
|
||||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,3 +1 @@
|
|||||||
/target
|
/target
|
||||||
*.swp
|
|
||||||
*.swo
|
|
||||||
|
42
Cargo.lock
generated
42
Cargo.lock
generated
@ -836,17 +836,6 @@ dependencies = [
|
|||||||
"digest",
|
"digest",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hostname"
|
|
||||||
version = "0.3.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867"
|
|
||||||
dependencies = [
|
|
||||||
"libc",
|
|
||||||
"match_cfg",
|
|
||||||
"winapi",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http"
|
name = "http"
|
||||||
version = "0.2.12"
|
version = "0.2.12"
|
||||||
@ -1064,14 +1053,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "khm"
|
name = "khm"
|
||||||
version = "0.4.1"
|
version = "0.2.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"actix-web",
|
"actix-web",
|
||||||
"base64 0.21.7",
|
"base64 0.21.7",
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap",
|
"clap",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"hostname",
|
|
||||||
"log",
|
"log",
|
||||||
"regex",
|
"regex",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
@ -1132,12 +1120,6 @@ version = "0.4.22"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "match_cfg"
|
|
||||||
version = "0.1.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "md-5"
|
name = "md-5"
|
||||||
version = "0.10.6"
|
version = "0.10.6"
|
||||||
@ -2241,28 +2223,6 @@ dependencies = [
|
|||||||
"web-sys",
|
"web-sys",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "winapi"
|
|
||||||
version = "0.3.9"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
|
||||||
dependencies = [
|
|
||||||
"winapi-i686-pc-windows-gnu",
|
|
||||||
"winapi-x86_64-pc-windows-gnu",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "winapi-i686-pc-windows-gnu"
|
|
||||||
version = "0.4.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "winapi-x86_64-pc-windows-gnu"
|
|
||||||
version = "0.4.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows-core"
|
name = "windows-core"
|
||||||
version = "0.52.0"
|
version = "0.52.0"
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "khm"
|
name = "khm"
|
||||||
version = "0.4.2"
|
version = "0.3.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
authors = ["AB <ab@hexor.cy>"]
|
authors = ["AB <ab@hexor.cy>", "ChatGPT-4o"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-web = "4"
|
actix-web = "4"
|
||||||
@ -17,4 +17,3 @@ tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] }
|
|||||||
clap = { version = "4", features = ["derive"] }
|
clap = { version = "4", features = ["derive"] }
|
||||||
chrono = "0.4.38"
|
chrono = "0.4.38"
|
||||||
reqwest = { version = "0.12", features = ["json"] }
|
reqwest = { version = "0.12", features = ["json"] }
|
||||||
hostname = "0.3"
|
|
||||||
|
@ -50,14 +50,6 @@ fn write_known_hosts(file_path: &str, keys: &[SshKey]) -> io::Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get local hostname for request headers
|
|
||||||
fn get_hostname() -> String {
|
|
||||||
match hostname::get() {
|
|
||||||
Ok(name) => name.to_string_lossy().to_string(),
|
|
||||||
Err(_) => "unknown-host".to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_keys_to_server(
|
async fn send_keys_to_server(
|
||||||
host: &str,
|
host: &str,
|
||||||
keys: Vec<SshKey>,
|
keys: Vec<SshKey>,
|
||||||
@ -69,17 +61,6 @@ async fn send_keys_to_server(
|
|||||||
|
|
||||||
let mut headers = HeaderMap::new();
|
let mut headers = HeaderMap::new();
|
||||||
|
|
||||||
// Add hostname header
|
|
||||||
let hostname = get_hostname();
|
|
||||||
headers.insert(
|
|
||||||
"X-Client-Hostname",
|
|
||||||
HeaderValue::from_str(&hostname).unwrap_or_else(|_| {
|
|
||||||
error!("Failed to create hostname header value");
|
|
||||||
HeaderValue::from_static("unknown-host")
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
info!("Adding hostname header: {}", hostname);
|
|
||||||
|
|
||||||
if !auth_string.is_empty() {
|
if !auth_string.is_empty() {
|
||||||
let parts: Vec<&str> = auth_string.splitn(2, ':').collect();
|
let parts: Vec<&str> = auth_string.splitn(2, ':').collect();
|
||||||
if parts.len() == 2 {
|
if parts.len() == 2 {
|
||||||
@ -124,17 +105,6 @@ async fn get_keys_from_server(
|
|||||||
|
|
||||||
let mut headers = HeaderMap::new();
|
let mut headers = HeaderMap::new();
|
||||||
|
|
||||||
// Add hostname header
|
|
||||||
let hostname = get_hostname();
|
|
||||||
headers.insert(
|
|
||||||
"X-Client-Hostname",
|
|
||||||
HeaderValue::from_str(&hostname).unwrap_or_else(|_| {
|
|
||||||
error!("Failed to create hostname header value");
|
|
||||||
HeaderValue::from_static("unknown-host")
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
info!("Adding hostname header: {}", hostname);
|
|
||||||
|
|
||||||
if !auth_string.is_empty() {
|
if !auth_string.is_empty() {
|
||||||
let parts: Vec<&str> = auth_string.splitn(2, ':').collect();
|
let parts: Vec<&str> = auth_string.splitn(2, ':').collect();
|
||||||
if parts.len() == 2 {
|
if parts.len() == 2 {
|
||||||
|
295
src/db.rs
295
src/db.rs
@ -1,295 +0,0 @@
|
|||||||
use crate::server::SshKey;
|
|
||||||
use log::info;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use tokio_postgres::Client;
|
|
||||||
|
|
||||||
// Structure for storing key processing statistics
|
|
||||||
pub struct KeyInsertStats {
|
|
||||||
pub total: usize, // Total number of received keys
|
|
||||||
pub inserted: usize, // Number of new keys
|
|
||||||
pub unchanged: usize, // Number of unchanged keys
|
|
||||||
pub key_id_map: Vec<(SshKey, i32)>, // Mapping of keys to their IDs in the database
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn initialize_db_schema(client: &Client) -> Result<(), tokio_postgres::Error> {
|
|
||||||
info!("Checking and initializing database schema if needed");
|
|
||||||
|
|
||||||
// Check if tables exist by querying information_schema
|
|
||||||
let tables_exist = client
|
|
||||||
.query(
|
|
||||||
"SELECT EXISTS (
|
|
||||||
SELECT FROM information_schema.tables
|
|
||||||
WHERE table_schema = 'public'
|
|
||||||
AND table_name = 'keys'
|
|
||||||
) AND EXISTS (
|
|
||||||
SELECT FROM information_schema.tables
|
|
||||||
WHERE table_schema = 'public'
|
|
||||||
AND table_name = 'flows'
|
|
||||||
)",
|
|
||||||
&[],
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.get(0)
|
|
||||||
.map(|row| row.get::<_, bool>(0))
|
|
||||||
.unwrap_or(false);
|
|
||||||
|
|
||||||
if !tables_exist {
|
|
||||||
info!("Database schema doesn't exist. Creating tables...");
|
|
||||||
|
|
||||||
// Create the keys table
|
|
||||||
client
|
|
||||||
.execute(
|
|
||||||
"CREATE TABLE IF NOT EXISTS public.keys (
|
|
||||||
key_id SERIAL PRIMARY KEY,
|
|
||||||
host VARCHAR(255) NOT NULL,
|
|
||||||
key TEXT NOT NULL,
|
|
||||||
updated TIMESTAMP WITH TIME ZONE NOT NULL,
|
|
||||||
CONSTRAINT unique_host_key UNIQUE (host, key)
|
|
||||||
)",
|
|
||||||
&[],
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Create the flows table
|
|
||||||
client
|
|
||||||
.execute(
|
|
||||||
"CREATE TABLE IF NOT EXISTS public.flows (
|
|
||||||
flow_id SERIAL PRIMARY KEY,
|
|
||||||
name VARCHAR(255) NOT NULL,
|
|
||||||
key_id INTEGER NOT NULL,
|
|
||||||
CONSTRAINT fk_key
|
|
||||||
FOREIGN KEY(key_id)
|
|
||||||
REFERENCES public.keys(key_id)
|
|
||||||
ON DELETE CASCADE,
|
|
||||||
CONSTRAINT unique_flow_key UNIQUE (name, key_id)
|
|
||||||
)",
|
|
||||||
&[],
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Create an index for faster lookups
|
|
||||||
client
|
|
||||||
.execute(
|
|
||||||
"CREATE INDEX IF NOT EXISTS idx_flows_name ON public.flows(name)",
|
|
||||||
&[],
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("Database schema created successfully");
|
|
||||||
} else {
|
|
||||||
info!("Database schema already exists");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn batch_insert_keys(
|
|
||||||
client: &Client,
|
|
||||||
keys: &[SshKey],
|
|
||||||
) -> Result<KeyInsertStats, tokio_postgres::Error> {
|
|
||||||
if keys.is_empty() {
|
|
||||||
return Ok(KeyInsertStats {
|
|
||||||
total: 0,
|
|
||||||
inserted: 0,
|
|
||||||
unchanged: 0,
|
|
||||||
key_id_map: Vec::new(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare arrays for batch insertion
|
|
||||||
let mut host_values: Vec<&str> = Vec::with_capacity(keys.len());
|
|
||||||
let mut key_values: Vec<&str> = Vec::with_capacity(keys.len());
|
|
||||||
|
|
||||||
for key in keys {
|
|
||||||
host_values.push(&key.server);
|
|
||||||
key_values.push(&key.public_key);
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, check which keys already exist in the database
|
|
||||||
let mut existing_keys = HashMap::new();
|
|
||||||
let mut key_query = String::from("SELECT host, key, key_id FROM public.keys WHERE ");
|
|
||||||
|
|
||||||
for i in 0..keys.len() {
|
|
||||||
if i > 0 {
|
|
||||||
key_query.push_str(" OR ");
|
|
||||||
}
|
|
||||||
key_query.push_str(&format!("(host = ${} AND key = ${})", i * 2 + 1, i * 2 + 2));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
|
|
||||||
Vec::with_capacity(keys.len() * 2);
|
|
||||||
for i in 0..keys.len() {
|
|
||||||
params.push(&host_values[i]);
|
|
||||||
params.push(&key_values[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let rows = client.query(&key_query, ¶ms[..]).await?;
|
|
||||||
|
|
||||||
for row in rows {
|
|
||||||
let host: String = row.get(0);
|
|
||||||
let key: String = row.get(1);
|
|
||||||
let key_id: i32 = row.get(2);
|
|
||||||
existing_keys.insert((host, key), key_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine which keys need to be inserted and which already exist
|
|
||||||
let mut keys_to_insert = Vec::new();
|
|
||||||
let mut unchanged_keys = Vec::new();
|
|
||||||
|
|
||||||
for key in keys {
|
|
||||||
let key_tuple = (key.server.clone(), key.public_key.clone());
|
|
||||||
if existing_keys.contains_key(&key_tuple) {
|
|
||||||
unchanged_keys.push((key.clone(), *existing_keys.get(&key_tuple).unwrap()));
|
|
||||||
} else {
|
|
||||||
keys_to_insert.push(key.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut inserted_keys = Vec::new();
|
|
||||||
|
|
||||||
// If there are keys to insert, perform the insertion
|
|
||||||
if !keys_to_insert.is_empty() {
|
|
||||||
let mut insert_sql = String::from("INSERT INTO public.keys (host, key, updated) VALUES ");
|
|
||||||
|
|
||||||
let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = Vec::new();
|
|
||||||
let mut param_count = 1;
|
|
||||||
|
|
||||||
for (i, key) in keys_to_insert.iter().enumerate() {
|
|
||||||
if i > 0 {
|
|
||||||
insert_sql.push_str(", ");
|
|
||||||
}
|
|
||||||
insert_sql.push_str(&format!("(${}, ${}, NOW())", param_count, param_count + 1));
|
|
||||||
insert_params.push(&key.server);
|
|
||||||
insert_params.push(&key.public_key);
|
|
||||||
param_count += 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
insert_sql.push_str(" RETURNING key_id, host, key");
|
|
||||||
|
|
||||||
let inserted_rows = client.query(&insert_sql, &insert_params[..]).await?;
|
|
||||||
|
|
||||||
for row in inserted_rows {
|
|
||||||
let host: String = row.get(1);
|
|
||||||
let key_text: String = row.get(2);
|
|
||||||
let key_id: i32 = row.get(0);
|
|
||||||
|
|
||||||
if let Some(orig_key) = keys_to_insert
|
|
||||||
.iter()
|
|
||||||
.find(|k| k.server == host && k.public_key == key_text)
|
|
||||||
{
|
|
||||||
inserted_keys.push((orig_key.clone(), key_id));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the number of elements before combining
|
|
||||||
let inserted_count = inserted_keys.len();
|
|
||||||
let unchanged_count = unchanged_keys.len();
|
|
||||||
|
|
||||||
// Combine results and generate statistics
|
|
||||||
let mut key_id_map = Vec::with_capacity(unchanged_count + inserted_count);
|
|
||||||
key_id_map.extend(unchanged_keys);
|
|
||||||
key_id_map.extend(inserted_keys);
|
|
||||||
|
|
||||||
let stats = KeyInsertStats {
|
|
||||||
total: keys.len(),
|
|
||||||
inserted: inserted_count,
|
|
||||||
unchanged: unchanged_count,
|
|
||||||
key_id_map,
|
|
||||||
};
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"Keys stats: received={}, new={}, unchanged={}",
|
|
||||||
stats.total, stats.inserted, stats.unchanged
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(stats)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn batch_insert_flow_keys(
|
|
||||||
client: &Client,
|
|
||||||
flow_name: &str,
|
|
||||||
key_ids: &[i32],
|
|
||||||
) -> Result<usize, tokio_postgres::Error> {
|
|
||||||
if key_ids.is_empty() {
|
|
||||||
info!("No keys to associate with flow '{}'", flow_name);
|
|
||||||
return Ok(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, check which associations already exist
|
|
||||||
let mut existing_query =
|
|
||||||
String::from("SELECT key_id FROM public.flows WHERE name = $1 AND key_id IN (");
|
|
||||||
|
|
||||||
for i in 0..key_ids.len() {
|
|
||||||
if i > 0 {
|
|
||||||
existing_query.push_str(", ");
|
|
||||||
}
|
|
||||||
existing_query.push_str(&format!("${}", i + 2));
|
|
||||||
}
|
|
||||||
existing_query.push_str(")");
|
|
||||||
|
|
||||||
let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
|
|
||||||
Vec::with_capacity(key_ids.len() + 1);
|
|
||||||
params.push(&flow_name);
|
|
||||||
for key_id in key_ids {
|
|
||||||
params.push(key_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
let rows = client.query(&existing_query, ¶ms[..]).await?;
|
|
||||||
|
|
||||||
let mut existing_associations = HashSet::new();
|
|
||||||
for row in rows {
|
|
||||||
let key_id: i32 = row.get(0);
|
|
||||||
existing_associations.insert(key_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter only keys that are not yet associated with the flow
|
|
||||||
let new_key_ids: Vec<&i32> = key_ids
|
|
||||||
.iter()
|
|
||||||
.filter(|&id| !existing_associations.contains(id))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if new_key_ids.is_empty() {
|
|
||||||
info!(
|
|
||||||
"All {} keys are already associated with flow '{}'",
|
|
||||||
key_ids.len(),
|
|
||||||
flow_name
|
|
||||||
);
|
|
||||||
return Ok(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build SQL query with multiple values only for new associations
|
|
||||||
let mut sql = String::from("INSERT INTO public.flows (name, key_id) VALUES ");
|
|
||||||
|
|
||||||
for i in 0..new_key_ids.len() {
|
|
||||||
if i > 0 {
|
|
||||||
sql.push_str(", ");
|
|
||||||
}
|
|
||||||
sql.push_str(&format!("($1, ${})", i + 2));
|
|
||||||
}
|
|
||||||
|
|
||||||
sql.push_str(" ON CONFLICT (name, key_id) DO NOTHING");
|
|
||||||
|
|
||||||
// Prepare parameters for the query
|
|
||||||
let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
|
|
||||||
Vec::with_capacity(new_key_ids.len() + 1);
|
|
||||||
insert_params.push(&flow_name);
|
|
||||||
for key_id in &new_key_ids {
|
|
||||||
insert_params.push(*key_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute query
|
|
||||||
let affected = client.execute(&sql, &insert_params[..]).await?;
|
|
||||||
|
|
||||||
let affected_usize = affected as usize;
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"Added {} new key-flow associations for flow '{}' (skipped {} existing)",
|
|
||||||
affected_usize,
|
|
||||||
flow_name,
|
|
||||||
existing_associations.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(affected_usize)
|
|
||||||
}
|
|
@ -1,5 +1,4 @@
|
|||||||
mod client;
|
mod client;
|
||||||
mod db;
|
|
||||||
mod server;
|
mod server;
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
214
src/server.rs
214
src/server.rs
@ -1,4 +1,4 @@
|
|||||||
use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer, Responder};
|
use actix_web::{web, App, HttpResponse, HttpServer, Responder};
|
||||||
use log::{error, info};
|
use log::{error, info};
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@ -6,8 +6,6 @@ use std::collections::HashMap;
|
|||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use tokio_postgres::{Client, NoTls};
|
use tokio_postgres::{Client, NoTls};
|
||||||
|
|
||||||
use crate::db;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
pub struct SshKey {
|
pub struct SshKey {
|
||||||
pub server: String,
|
pub server: String,
|
||||||
@ -35,6 +33,51 @@ pub fn is_valid_ssh_key(key: &str) -> bool {
|
|||||||
|| ed25519_re.is_match(key)
|
|| ed25519_re.is_match(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn insert_key_if_not_exists(
|
||||||
|
client: &Client,
|
||||||
|
key: &SshKey,
|
||||||
|
) -> Result<i32, tokio_postgres::Error> {
|
||||||
|
let row = client
|
||||||
|
.query_opt(
|
||||||
|
"SELECT key_id FROM public.keys WHERE host = $1 AND key = $2",
|
||||||
|
&[&key.server, &key.public_key],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if let Some(row) = row {
|
||||||
|
client
|
||||||
|
.execute(
|
||||||
|
"UPDATE public.keys SET updated = NOW() WHERE key_id = $1",
|
||||||
|
&[&row.get::<_, i32>(0)],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
info!("Updated existing key for server: {}", key.server);
|
||||||
|
Ok(row.get(0))
|
||||||
|
} else {
|
||||||
|
let row = client.query_one(
|
||||||
|
"INSERT INTO public.keys (host, key, updated) VALUES ($1, $2, NOW()) RETURNING key_id",
|
||||||
|
&[&key.server, &key.public_key]
|
||||||
|
).await?;
|
||||||
|
info!("Inserted new key for server: {}", key.server);
|
||||||
|
Ok(row.get(0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn insert_flow_key(
|
||||||
|
client: &Client,
|
||||||
|
flow_name: &str,
|
||||||
|
key_id: i32,
|
||||||
|
) -> Result<(), tokio_postgres::Error> {
|
||||||
|
client
|
||||||
|
.execute(
|
||||||
|
"INSERT INTO public.flows (name, key_id) VALUES ($1, $2) ON CONFLICT DO NOTHING",
|
||||||
|
&[&flow_name, &key_id],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
info!("Inserted key_id {} into flow: {}", key_id, flow_name);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn get_keys_from_db(client: &Client) -> Result<Vec<Flow>, tokio_postgres::Error> {
|
pub async fn get_keys_from_db(client: &Client) -> Result<Vec<Flow>, tokio_postgres::Error> {
|
||||||
let rows = client.query(
|
let rows = client.query(
|
||||||
"SELECT k.host, k.key, f.name FROM public.keys k INNER JOIN public.flows f ON k.key_id = f.key_id",
|
"SELECT k.host, k.key, f.name FROM public.keys k INNER JOIN public.flows f ON k.key_id = f.key_id",
|
||||||
@ -70,53 +113,24 @@ pub async fn get_keys_from_db(client: &Client) -> Result<Vec<Flow>, tokio_postgr
|
|||||||
Ok(flows_map.into_values().collect())
|
Ok(flows_map.into_values().collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract client hostname from request headers
|
|
||||||
fn get_client_hostname(req: &HttpRequest) -> String {
|
|
||||||
if let Some(hostname) = req.headers().get("X-Client-Hostname") {
|
|
||||||
if let Ok(hostname_str) = hostname.to_str() {
|
|
||||||
return hostname_str.to_string();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"unknown-client".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_keys(
|
pub async fn get_keys(
|
||||||
flows: web::Data<Flows>,
|
flows: web::Data<Flows>,
|
||||||
flow_id: web::Path<String>,
|
flow_id: web::Path<String>,
|
||||||
allowed_flows: web::Data<Vec<String>>,
|
allowed_flows: web::Data<Vec<String>>,
|
||||||
req: HttpRequest,
|
|
||||||
) -> impl Responder {
|
) -> impl Responder {
|
||||||
let client_hostname = get_client_hostname(&req);
|
|
||||||
let flow_id_str = flow_id.into_inner();
|
let flow_id_str = flow_id.into_inner();
|
||||||
|
|
||||||
info!(
|
|
||||||
"Received keys request from client '{}' for flow '{}'",
|
|
||||||
client_hostname, flow_id_str
|
|
||||||
);
|
|
||||||
|
|
||||||
if !allowed_flows.contains(&flow_id_str) {
|
if !allowed_flows.contains(&flow_id_str) {
|
||||||
error!(
|
error!("Flow ID not allowed: {}", flow_id_str);
|
||||||
"Flow ID not allowed for client '{}': {}",
|
|
||||||
client_hostname, flow_id_str
|
|
||||||
);
|
|
||||||
return HttpResponse::Forbidden().body("Flow ID not allowed");
|
return HttpResponse::Forbidden().body("Flow ID not allowed");
|
||||||
}
|
}
|
||||||
|
|
||||||
let flows = flows.lock().unwrap();
|
let flows = flows.lock().unwrap();
|
||||||
if let Some(flow) = flows.iter().find(|flow| flow.name == flow_id_str) {
|
if let Some(flow) = flows.iter().find(|flow| flow.name == flow_id_str) {
|
||||||
let servers: Vec<&SshKey> = flow.servers.iter().collect();
|
let servers: Vec<&SshKey> = flow.servers.iter().collect();
|
||||||
info!(
|
info!("Returning {} keys for flow: {}", servers.len(), flow_id_str);
|
||||||
"Returning {} keys for flow '{}' to client '{}'",
|
|
||||||
servers.len(),
|
|
||||||
flow_id_str,
|
|
||||||
client_hostname
|
|
||||||
);
|
|
||||||
HttpResponse::Ok().json(servers)
|
HttpResponse::Ok().json(servers)
|
||||||
} else {
|
} else {
|
||||||
error!(
|
error!("Flow ID not found: {}", flow_id_str);
|
||||||
"Flow ID not found for client '{}': {}",
|
|
||||||
client_hostname, flow_id_str
|
|
||||||
);
|
|
||||||
HttpResponse::NotFound().body("Flow ID not found")
|
HttpResponse::NotFound().body("Flow ID not found")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -127,99 +141,42 @@ pub async fn add_keys(
|
|||||||
new_keys: web::Json<Vec<SshKey>>,
|
new_keys: web::Json<Vec<SshKey>>,
|
||||||
db_client: web::Data<Arc<Client>>,
|
db_client: web::Data<Arc<Client>>,
|
||||||
allowed_flows: web::Data<Vec<String>>,
|
allowed_flows: web::Data<Vec<String>>,
|
||||||
req: HttpRequest,
|
|
||||||
) -> impl Responder {
|
) -> impl Responder {
|
||||||
let client_hostname = get_client_hostname(&req);
|
|
||||||
let flow_id_str = flow_id.into_inner();
|
let flow_id_str = flow_id.into_inner();
|
||||||
|
|
||||||
info!(
|
|
||||||
"Received {} keys from client '{}' for flow '{}'",
|
|
||||||
new_keys.len(),
|
|
||||||
client_hostname,
|
|
||||||
flow_id_str
|
|
||||||
);
|
|
||||||
|
|
||||||
if !allowed_flows.contains(&flow_id_str) {
|
if !allowed_flows.contains(&flow_id_str) {
|
||||||
error!(
|
error!("Flow ID not allowed: {}", flow_id_str);
|
||||||
"Flow ID not allowed for client '{}': {}",
|
|
||||||
client_hostname, flow_id_str
|
|
||||||
);
|
|
||||||
return HttpResponse::Forbidden().body("Flow ID not allowed");
|
return HttpResponse::Forbidden().body("Flow ID not allowed");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check SSH key format
|
|
||||||
let mut valid_keys = Vec::new();
|
|
||||||
for new_key in new_keys.iter() {
|
for new_key in new_keys.iter() {
|
||||||
if !is_valid_ssh_key(&new_key.public_key) {
|
if !is_valid_ssh_key(&new_key.public_key) {
|
||||||
error!(
|
error!("Invalid SSH key format for server: {}", new_key.server);
|
||||||
"Invalid SSH key format from client '{}' for server: {}",
|
|
||||||
client_hostname, new_key.server
|
|
||||||
);
|
|
||||||
return HttpResponse::BadRequest().body(format!(
|
return HttpResponse::BadRequest().body(format!(
|
||||||
"Invalid SSH key format for server: {}",
|
"Invalid SSH key format for server: {}",
|
||||||
new_key.server
|
new_key.server
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
valid_keys.push(new_key.clone());
|
|
||||||
|
match insert_key_if_not_exists(&db_client, new_key).await {
|
||||||
|
Ok(key_id) => {
|
||||||
|
if let Err(e) = insert_flow_key(&db_client, &flow_id_str, key_id).await {
|
||||||
|
error!("Failed to insert flow key into database: {}", e);
|
||||||
|
return HttpResponse::InternalServerError()
|
||||||
|
.body("Failed to insert flow key into database");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(
|
|
||||||
"Processing batch of {} keys from client '{}' for flow: {}",
|
|
||||||
valid_keys.len(),
|
|
||||||
client_hostname,
|
|
||||||
flow_id_str
|
|
||||||
);
|
|
||||||
|
|
||||||
// Batch insert keys with statistics
|
|
||||||
let key_stats = match crate::db::batch_insert_keys(&db_client, &valid_keys).await {
|
|
||||||
Ok(stats) => stats,
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(
|
error!("Failed to insert key into database: {}", e);
|
||||||
"Failed to batch insert keys from client '{}' into database: {}",
|
|
||||||
client_hostname, e
|
|
||||||
);
|
|
||||||
return HttpResponse::InternalServerError()
|
return HttpResponse::InternalServerError()
|
||||||
.body("Failed to batch insert keys into database");
|
.body("Failed to insert key into database");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
// Always try to associate all keys with the flow, regardless of whether they're new or existing
|
|
||||||
if !key_stats.key_id_map.is_empty() {
|
|
||||||
// Extract all key IDs from statistics, both new and existing
|
|
||||||
let key_ids: Vec<i32> = key_stats.key_id_map.iter().map(|(_, id)| *id).collect();
|
|
||||||
|
|
||||||
// Batch insert key-flow associations
|
|
||||||
if let Err(e) = crate::db::batch_insert_flow_keys(&db_client, &flow_id_str, &key_ids).await
|
|
||||||
{
|
|
||||||
error!(
|
|
||||||
"Failed to batch insert flow keys from client '{}' into database: {}",
|
|
||||||
client_hostname, e
|
|
||||||
);
|
|
||||||
return HttpResponse::InternalServerError()
|
|
||||||
.body("Failed to batch insert flow keys into database");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(
|
|
||||||
"Added flow associations for {} keys from client '{}' in flow '{}'",
|
|
||||||
key_ids.len(),
|
|
||||||
client_hostname,
|
|
||||||
flow_id_str
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"No keys to associate from client '{}' with flow '{}'",
|
|
||||||
client_hostname, flow_id_str
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get updated data
|
|
||||||
let updated_flows = match get_keys_from_db(&db_client).await {
|
let updated_flows = match get_keys_from_db(&db_client).await {
|
||||||
Ok(flows) => flows,
|
Ok(flows) => flows,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(
|
error!("Failed to get updated flows from database: {}", e);
|
||||||
"Failed to get updated flows from database after client '{}' request: {}",
|
|
||||||
client_hostname, e
|
|
||||||
);
|
|
||||||
return HttpResponse::InternalServerError()
|
return HttpResponse::InternalServerError()
|
||||||
.body("Failed to refresh flows from database");
|
.body("Failed to refresh flows from database");
|
||||||
}
|
}
|
||||||
@ -231,28 +188,10 @@ pub async fn add_keys(
|
|||||||
let updated_flow = flows_guard.iter().find(|flow| flow.name == flow_id_str);
|
let updated_flow = flows_guard.iter().find(|flow| flow.name == flow_id_str);
|
||||||
if let Some(flow) = updated_flow {
|
if let Some(flow) = updated_flow {
|
||||||
let servers: Vec<&SshKey> = flow.servers.iter().collect();
|
let servers: Vec<&SshKey> = flow.servers.iter().collect();
|
||||||
info!(
|
info!("Updated flow: {} with {} keys", flow_id_str, servers.len());
|
||||||
"Keys summary for client '{}', flow '{}': total received={}, new={}, unchanged={}, total in flow={}",
|
HttpResponse::Ok().json(servers)
|
||||||
client_hostname,
|
|
||||||
flow_id_str,
|
|
||||||
key_stats.total,
|
|
||||||
key_stats.inserted,
|
|
||||||
key_stats.unchanged,
|
|
||||||
servers.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Add statistics to HTTP response headers
|
|
||||||
let mut response = HttpResponse::Ok();
|
|
||||||
response.append_header(("X-Keys-Total", key_stats.total.to_string()));
|
|
||||||
response.append_header(("X-Keys-New", key_stats.inserted.to_string()));
|
|
||||||
response.append_header(("X-Keys-Unchanged", key_stats.unchanged.to_string()));
|
|
||||||
|
|
||||||
response.json(servers)
|
|
||||||
} else {
|
} else {
|
||||||
error!(
|
error!("Flow ID not found after update: {}", flow_id_str);
|
||||||
"Flow ID not found after update from client '{}': {}",
|
|
||||||
client_hostname, flow_id_str
|
|
||||||
);
|
|
||||||
HttpResponse::NotFound().body("Flow ID not found")
|
HttpResponse::NotFound().body("Flow ID not found")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -268,17 +207,7 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> {
|
|||||||
args.db_host, db_user, db_password, args.db_name
|
args.db_host, db_user, db_password, args.db_name
|
||||||
);
|
);
|
||||||
|
|
||||||
info!("Connecting to database at {}", args.db_host);
|
let (db_client, connection) = tokio_postgres::connect(&db_conn_str, NoTls).await.unwrap();
|
||||||
let (db_client, connection) = match tokio_postgres::connect(&db_conn_str, NoTls).await {
|
|
||||||
Ok((client, conn)) => (client, conn),
|
|
||||||
Err(e) => {
|
|
||||||
error!("Failed to connect to the database: {}", e);
|
|
||||||
return Err(std::io::Error::new(
|
|
||||||
std::io::ErrorKind::ConnectionRefused,
|
|
||||||
format!("Database connection error: {}", e),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let db_client = Arc::new(db_client);
|
let db_client = Arc::new(db_client);
|
||||||
|
|
||||||
// Spawn a new thread to run the database connection
|
// Spawn a new thread to run the database connection
|
||||||
@ -288,15 +217,6 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Initialize database schema if needed
|
|
||||||
if let Err(e) = db::initialize_db_schema(&db_client).await {
|
|
||||||
error!("Failed to initialize database schema: {}", e);
|
|
||||||
return Err(std::io::Error::new(
|
|
||||||
std::io::ErrorKind::Other,
|
|
||||||
format!("Database schema initialization error: {}", e),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut initial_flows = match get_keys_from_db(&db_client).await {
|
let mut initial_flows = match get_keys_from_db(&db_client).await {
|
||||||
Ok(flows) => flows,
|
Ok(flows) => flows,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
Reference in New Issue
Block a user