11 Commits

Author SHA1 Message Date
Ultradesu
af6c4d7e61 Added auto deprecation feature 2025-07-20 17:37:46 +03:00
Ultradesu
9c5518b39e Added auto deprecation feature 2025-07-20 17:26:44 +03:00
Ultradesu
1eccc0e0f7 Fixed client mode flow args 2025-07-19 15:51:17 +03:00
Ultradesu
45ac3fca51 Fixed web ui. Added deprecation feature 2025-07-19 12:56:25 +03:00
Ultradesu
e33910a2db Added web ui 2025-07-19 12:20:52 +03:00
Ultradesu
c5d8ebd89f Added web ui 2025-07-19 12:20:37 +03:00
Ultradesu
1534d88300 Added web ui 2025-07-18 18:35:04 +03:00
Ultradesu
3fa43d276d Added web ui 2025-07-18 18:06:26 +03:00
Ultradesu
d5ce88dfff Added web ui 2025-07-18 17:52:58 +03:00
Alexandr Bogomyakov
484ddd9803 Add files via upload 2025-07-17 16:19:45 +03:00
Alexandr Bogomyakov
2f1fcd681e Update README.MD 2025-05-12 02:46:25 +03:00
12 changed files with 3736 additions and 315 deletions

266
Cargo.lock generated
View File

@@ -557,6 +557,12 @@ dependencies = [
"typenum",
]
[[package]]
name = "data-encoding"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476"
[[package]]
name = "deranged"
version = "0.3.11"
@@ -599,6 +605,18 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "enum-as-inner"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "env_filter"
version = "0.1.0"
@@ -690,6 +708,21 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "futures"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.30"
@@ -706,6 +739,23 @@ version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
[[package]]
name = "futures-executor"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-io"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
[[package]]
name = "futures-macro"
version = "0.3.30"
@@ -735,10 +785,13 @@ version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task",
"memchr",
"pin-project-lite",
"pin-utils",
"slab",
@@ -1006,6 +1059,16 @@ dependencies = [
"cc",
]
[[package]]
name = "idna"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
dependencies = [
"unicode-bidi",
"unicode-normalization",
]
[[package]]
name = "idna"
version = "0.5.0"
@@ -1026,6 +1089,18 @@ dependencies = [
"hashbrown",
]
[[package]]
name = "ipconfig"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f"
dependencies = [
"socket2",
"widestring",
"windows-sys 0.48.0",
"winreg 0.50.0",
]
[[package]]
name = "ipnet"
version = "2.9.0"
@@ -1064,21 +1139,25 @@ dependencies = [
[[package]]
name = "khm"
version = "0.4.1"
version = "0.6.3"
dependencies = [
"actix-web",
"base64 0.21.7",
"chrono",
"clap",
"env_logger",
"futures",
"hostname",
"log",
"regex",
"reqwest",
"rust-embed",
"serde",
"serde_json",
"tokio",
"tokio-postgres",
"tokio-util",
"trust-dns-resolver",
]
[[package]]
@@ -1093,6 +1172,12 @@ version = "0.2.155"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
[[package]]
name = "linked-hash-map"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
[[package]]
name = "linux-raw-sys"
version = "0.4.14"
@@ -1132,6 +1217,15 @@ version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "lru-cache"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c"
dependencies = [
"linked-hash-map",
]
[[package]]
name = "match_cfg"
version = "0.1.0"
@@ -1556,9 +1650,15 @@ dependencies = [
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"winreg",
"winreg 0.52.0",
]
[[package]]
name = "resolv-conf"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3"
[[package]]
name = "ring"
version = "0.17.8"
@@ -1574,6 +1674,40 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "rust-embed"
version = "8.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "025908b8682a26ba8d12f6f2d66b987584a4a87bc024abc5bbc12553a8cd178a"
dependencies = [
"rust-embed-impl",
"rust-embed-utils",
"walkdir",
]
[[package]]
name = "rust-embed-impl"
version = "8.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6065f1a4392b71819ec1ea1df1120673418bf386f50de1d6f54204d836d4349c"
dependencies = [
"proc-macro2",
"quote",
"rust-embed-utils",
"syn",
"walkdir",
]
[[package]]
name = "rust-embed-utils"
version = "8.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6cc0c81648b20b70c491ff8cce00c1c3b223bb8ed2b5d41f0e54c6c4c0a3594"
dependencies = [
"sha2",
"walkdir",
]
[[package]]
name = "rustc-demangle"
version = "0.1.24"
@@ -1648,6 +1782,15 @@ version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "schannel"
version = "0.1.23"
@@ -1828,9 +1971,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]]
name = "syn"
version = "2.0.68"
version = "2.0.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9"
checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d"
dependencies = [
"proc-macro2",
"quote",
@@ -1876,6 +2019,26 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "time"
version = "0.3.36"
@@ -2047,9 +2210,21 @@ checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
dependencies = [
"log",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.32"
@@ -2059,6 +2234,52 @@ dependencies = [
"once_cell",
]
[[package]]
name = "trust-dns-proto"
version = "0.23.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374"
dependencies = [
"async-trait",
"cfg-if",
"data-encoding",
"enum-as-inner",
"futures-channel",
"futures-io",
"futures-util",
"idna 0.4.0",
"ipnet",
"once_cell",
"rand",
"smallvec",
"thiserror",
"tinyvec",
"tokio",
"tracing",
"url",
]
[[package]]
name = "trust-dns-resolver"
version = "0.23.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6"
dependencies = [
"cfg-if",
"futures-util",
"ipconfig",
"lru-cache",
"once_cell",
"parking_lot",
"rand",
"resolv-conf",
"smallvec",
"thiserror",
"tokio",
"tracing",
"trust-dns-proto",
]
[[package]]
name = "try-lock"
version = "0.2.5"
@@ -2111,7 +2332,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c"
dependencies = [
"form_urlencoded",
"idna",
"idna 0.5.0",
"percent-encoding",
]
@@ -2133,6 +2354,16 @@ version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "want"
version = "0.3.1"
@@ -2241,6 +2472,12 @@ dependencies = [
"web-sys",
]
[[package]]
name = "widestring"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d"
[[package]]
name = "winapi"
version = "0.3.9"
@@ -2257,6 +2494,15 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
@@ -2411,6 +2657,16 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "winreg"
version = "0.50.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
dependencies = [
"cfg-if",
"windows-sys 0.48.0",
]
[[package]]
name = "winreg"
version = "0.52.0"

View File

@@ -1,6 +1,6 @@
[package]
name = "khm"
version = "0.4.2"
version = "0.6.3"
edition = "2021"
authors = ["AB <ab@hexor.cy>"]
@@ -12,9 +12,13 @@ env_logger = "0.11.3"
log = "0.4"
regex = "1.10.5"
base64 = "0.21"
tokio = { version = "1", features = ["full"] }
tokio = { version = "1", features = ["full", "sync"] }
tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] }
tokio-util = { version = "0.7", features = ["codec"] }
clap = { version = "4", features = ["derive"] }
chrono = "0.4.38"
reqwest = { version = "0.12", features = ["json"] }
trust-dns-resolver = "0.23"
futures = "0.3"
hostname = "0.3"
rust-embed = "8.0"

13
LICENSE-WTFPL Normal file
View File

@@ -0,0 +1,13 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View File

@@ -38,6 +38,7 @@ Options:
- `--db-name <DB_NAME>` Server mode: Name of the PostgreSQL database [default: khm]
- `--db-user <DB_USER>` Server mode: Username for the PostgreSQL database
- `--db-password <DB_PASSWORD>` Server mode: Password for the PostgreSQL database
- `--basic-auth <BASIC_AUTH>` Client mode: Basic Auth credentials [default: ""]
- `--host <HOST>` Client mode: Full host address of the server to connect to. Like `https://khm.example.com/<FLOW_NAME>`
- `--known-hosts <KNOWN_HOSTS>` Client mode: Path to the known_hosts file [default: ~/.ssh/known_hosts]
@@ -61,4 +62,4 @@ Contributions are welcome! Please open an issue or submit a pull request for any
## License
This project is licensed under the WTFPL License.
This project is licensed under the WTFPL License.

View File

@@ -11,6 +11,8 @@ use std::path::Path;
struct SshKey {
server: String,
public_key: String,
#[serde(default)]
deprecated: bool,
}
fn read_known_hosts(file_path: &str) -> io::Result<Vec<SshKey>> {
@@ -26,7 +28,11 @@ fn read_known_hosts(file_path: &str) -> io::Result<Vec<SshKey>> {
if parts.len() >= 2 {
let server = parts[0].to_string();
let public_key = parts[1..].join(" ");
keys.push(SshKey { server, public_key });
keys.push(SshKey {
server,
public_key,
deprecated: false, // Keys from known_hosts are not deprecated
});
}
}
Err(e) => {
@@ -42,10 +48,17 @@ fn write_known_hosts(file_path: &str, keys: &[SshKey]) -> io::Result<()> {
let path = Path::new(file_path);
let mut file = File::create(&path)?;
for key in keys {
// Filter out deprecated keys - they should not be written to known_hosts
let active_keys: Vec<&SshKey> = keys.iter().filter(|key| !key.deprecated).collect();
let active_count = active_keys.len();
for key in active_keys {
writeln!(file, "{} {}", key.server, key.public_key)?;
}
info!("Wrote {} keys to known_hosts file", keys.len());
info!(
"Wrote {} active keys to known_hosts file (filtered out deprecated keys)",
active_count
);
Ok(())
}
@@ -162,23 +175,55 @@ async fn get_keys_from_server(
pub async fn run_client(args: crate::Args) -> std::io::Result<()> {
info!("Client mode: Reading known_hosts file");
let keys = read_known_hosts(&args.known_hosts).expect("Failed to read known hosts file");
let keys = match read_known_hosts(&args.known_hosts) {
Ok(keys) => keys,
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
info!(
"known_hosts file not found: {}. Starting with empty key list.",
args.known_hosts
);
Vec::new()
} else {
error!("Failed to read known_hosts file: {}", e);
return Err(e);
}
}
};
let host = args.host.expect("host is required in client mode");
info!("Client mode: Sending keys to server at {}", host);
send_keys_to_server(&host, keys, &args.basic_auth)
.await
.expect("Failed to send keys to server");
let flow = args.flow.expect("flow is required in client mode");
let url = format!("{}/{}", host, flow);
info!("Client mode: Sending keys to server at {}", url);
if let Err(e) = send_keys_to_server(&url, keys, &args.basic_auth).await {
error!("Failed to send keys to server: {}", e);
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Network error: {}", e),
));
}
if args.in_place {
info!("Client mode: In-place update is enabled. Fetching keys from server.");
let server_keys = get_keys_from_server(&host, &args.basic_auth)
.await
.expect("Failed to get keys from server");
let server_keys = match get_keys_from_server(&url, &args.basic_auth).await {
Ok(keys) => keys,
Err(e) => {
error!("Failed to get keys from server: {}", e);
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Network error: {}", e),
));
}
};
info!("Client mode: Writing updated known_hosts file");
write_known_hosts(&args.known_hosts, &server_keys)
.expect("Failed to write known hosts file");
if let Err(e) = write_known_hosts(&args.known_hosts, &server_keys) {
error!("Failed to write known_hosts file: {}", e);
return Err(e);
}
}
info!("Client mode: Finished operations");

932
src/db.rs
View File

@@ -1,8 +1,10 @@
use crate::server::SshKey;
use log::info;
use log::{error, info};
use std::collections::HashMap;
use std::collections::HashSet;
use tokio_postgres::Client;
use tokio_postgres::tls::NoTlsStream;
use tokio_postgres::Socket;
use tokio_postgres::{Client, Connection, NoTls};
// Structure for storing key processing statistics
pub struct KeyInsertStats {
@@ -12,284 +14,758 @@ pub struct KeyInsertStats {
pub key_id_map: Vec<(SshKey, i32)>, // Mapping of keys to their IDs in the database
}
pub async fn initialize_db_schema(client: &Client) -> Result<(), tokio_postgres::Error> {
info!("Checking and initializing database schema if needed");
// Check if tables exist by querying information_schema
let tables_exist = client
.query(
"SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = 'keys'
) AND EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = 'flows'
)",
&[],
)
.await?
.get(0)
.map(|row| row.get::<_, bool>(0))
.unwrap_or(false);
if !tables_exist {
info!("Database schema doesn't exist. Creating tables...");
// Create the keys table
client
.execute(
"CREATE TABLE IF NOT EXISTS public.keys (
key_id SERIAL PRIMARY KEY,
host VARCHAR(255) NOT NULL,
key TEXT NOT NULL,
updated TIMESTAMP WITH TIME ZONE NOT NULL,
CONSTRAINT unique_host_key UNIQUE (host, key)
)",
&[],
)
.await?;
// Create the flows table
client
.execute(
"CREATE TABLE IF NOT EXISTS public.flows (
flow_id SERIAL PRIMARY KEY,
name VARCHAR(255) NOT NULL,
key_id INTEGER NOT NULL,
CONSTRAINT fk_key
FOREIGN KEY(key_id)
REFERENCES public.keys(key_id)
ON DELETE CASCADE,
CONSTRAINT unique_flow_key UNIQUE (name, key_id)
)",
&[],
)
.await?;
// Create an index for faster lookups
client
.execute(
"CREATE INDEX IF NOT EXISTS idx_flows_name ON public.flows(name)",
&[],
)
.await?;
info!("Database schema created successfully");
} else {
info!("Database schema already exists");
}
Ok(())
// Simple database client that exits on connection errors
pub struct DbClient {
client: Client,
}
pub async fn batch_insert_keys(
client: &Client,
keys: &[SshKey],
) -> Result<KeyInsertStats, tokio_postgres::Error> {
if keys.is_empty() {
return Ok(KeyInsertStats {
total: 0,
inserted: 0,
unchanged: 0,
key_id_map: Vec::new(),
});
impl DbClient {
pub async fn connect(
connection_string: &str,
) -> Result<(Self, Connection<Socket, NoTlsStream>), tokio_postgres::Error> {
info!("Connecting to database...");
let (client, connection) = tokio_postgres::connect(connection_string, NoTls).await?;
info!("Successfully connected to database");
Ok((DbClient { client }, connection))
}
// Prepare arrays for batch insertion
let mut host_values: Vec<&str> = Vec::with_capacity(keys.len());
let mut key_values: Vec<&str> = Vec::with_capacity(keys.len());
for key in keys {
host_values.push(&key.server);
key_values.push(&key.public_key);
}
// First, check which keys already exist in the database
let mut existing_keys = HashMap::new();
let mut key_query = String::from("SELECT host, key, key_id FROM public.keys WHERE ");
for i in 0..keys.len() {
if i > 0 {
key_query.push_str(" OR ");
// Helper function to handle database errors - exits the application on connection errors
fn handle_db_error<T>(
result: Result<T, tokio_postgres::Error>,
operation: &str,
) -> Result<T, tokio_postgres::Error> {
match result {
Ok(value) => Ok(value),
Err(e) => {
if Self::is_connection_error(&e) {
error!("Database connection lost during {}: {}", operation, e);
error!("Exiting application due to database connection failure");
std::process::exit(1);
} else {
// For non-connection errors, just return the error
Err(e)
}
}
}
key_query.push_str(&format!("(host = ${} AND key = ${})", i * 2 + 1, i * 2 + 2));
}
let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
Vec::with_capacity(keys.len() * 2);
for i in 0..keys.len() {
params.push(&host_values[i]);
params.push(&key_values[i]);
fn is_connection_error(error: &tokio_postgres::Error) -> bool {
// Check if the error is related to connection issues
let error_str = error.to_string();
error_str.contains("connection closed")
|| error_str.contains("connection reset")
|| error_str.contains("broken pipe")
|| error_str.contains("Connection refused")
|| error_str.contains("connection terminated")
|| error.as_db_error().is_none() // Non-database errors are often connection issues
}
let rows = client.query(&key_query, &params[..]).await?;
pub async fn initialize_schema(&self) -> Result<(), tokio_postgres::Error> {
info!("Checking and initializing database schema if needed");
for row in rows {
let host: String = row.get(0);
let key: String = row.get(1);
let key_id: i32 = row.get(2);
existing_keys.insert((host, key), key_id);
}
// Check if tables exist by querying information_schema
let result = self
.client
.query(
"SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = 'keys'
) AND EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = 'flows'
)",
&[],
)
.await;
// Determine which keys need to be inserted and which already exist
let mut keys_to_insert = Vec::new();
let mut unchanged_keys = Vec::new();
let tables_exist = Self::handle_db_error(result, "checking table existence")?
.get(0)
.map(|row| row.get::<_, bool>(0))
.unwrap_or(false);
for key in keys {
let key_tuple = (key.server.clone(), key.public_key.clone());
if existing_keys.contains_key(&key_tuple) {
unchanged_keys.push((key.clone(), *existing_keys.get(&key_tuple).unwrap()));
if !tables_exist {
info!("Database schema doesn't exist. Creating tables...");
// Create the keys table
let result = self
.client
.execute(
"CREATE TABLE IF NOT EXISTS public.keys (
key_id SERIAL PRIMARY KEY,
host VARCHAR(255) NOT NULL,
key TEXT NOT NULL,
updated TIMESTAMP WITH TIME ZONE NOT NULL,
deprecated BOOLEAN NOT NULL DEFAULT FALSE,
CONSTRAINT unique_host_key UNIQUE (host, key)
)",
&[],
)
.await;
Self::handle_db_error(result, "creating keys table")?;
// Create the flows table
let result = self
.client
.execute(
"CREATE TABLE IF NOT EXISTS public.flows (
flow_id SERIAL PRIMARY KEY,
name VARCHAR(255) NOT NULL,
key_id INTEGER NOT NULL,
CONSTRAINT fk_key
FOREIGN KEY(key_id)
REFERENCES public.keys(key_id)
ON DELETE CASCADE,
CONSTRAINT unique_flow_key UNIQUE (name, key_id)
)",
&[],
)
.await;
Self::handle_db_error(result, "creating flows table")?;
// Create an index for faster lookups
let result = self
.client
.execute(
"CREATE INDEX IF NOT EXISTS idx_flows_name ON public.flows(name)",
&[],
)
.await;
Self::handle_db_error(result, "creating index")?;
info!("Database schema created successfully");
} else {
keys_to_insert.push(key.clone());
info!("Database schema already exists");
// Check if deprecated column exists, add it if missing (migration)
let result = self
.client
.query(
"SELECT EXISTS (
SELECT FROM information_schema.columns
WHERE table_schema = 'public'
AND table_name = 'keys'
AND column_name = 'deprecated'
)",
&[],
)
.await;
let column_exists = Self::handle_db_error(result, "checking deprecated column")?
.get(0)
.map(|row| row.get::<_, bool>(0))
.unwrap_or(false);
if !column_exists {
info!("Adding deprecated column to existing keys table...");
let result = self.client
.execute(
"ALTER TABLE public.keys ADD COLUMN deprecated BOOLEAN NOT NULL DEFAULT FALSE",
&[],
)
.await;
Self::handle_db_error(result, "adding deprecated column")?;
info!("Migration completed: deprecated column added");
}
}
Ok(())
}
let mut inserted_keys = Vec::new();
pub async fn batch_insert_keys(
&self,
keys: &[SshKey],
) -> Result<KeyInsertStats, tokio_postgres::Error> {
if keys.is_empty() {
return Ok(KeyInsertStats {
total: 0,
inserted: 0,
unchanged: 0,
key_id_map: Vec::new(),
});
}
// If there are keys to insert, perform the insertion
if !keys_to_insert.is_empty() {
let mut insert_sql = String::from("INSERT INTO public.keys (host, key, updated) VALUES ");
// Prepare arrays for batch insertion
let mut host_values: Vec<&str> = Vec::with_capacity(keys.len());
let mut key_values: Vec<&str> = Vec::with_capacity(keys.len());
let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = Vec::new();
let mut param_count = 1;
for key in keys {
host_values.push(&key.server);
key_values.push(&key.public_key);
}
for (i, key) in keys_to_insert.iter().enumerate() {
// First, check which keys already exist in the database (including deprecated status)
let mut existing_keys = HashMap::new();
let mut key_query =
String::from("SELECT host, key, key_id, deprecated FROM public.keys WHERE ");
for i in 0..keys.len() {
if i > 0 {
insert_sql.push_str(", ");
key_query.push_str(" OR ");
}
insert_sql.push_str(&format!("(${}, ${}, NOW())", param_count, param_count + 1));
insert_params.push(&key.server);
insert_params.push(&key.public_key);
param_count += 2;
key_query.push_str(&format!("(host = ${} AND key = ${})", i * 2 + 1, i * 2 + 2));
}
insert_sql.push_str(" RETURNING key_id, host, key");
let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
Vec::with_capacity(keys.len() * 2);
for i in 0..keys.len() {
params.push(&host_values[i]);
params.push(&key_values[i]);
}
let inserted_rows = client.query(&insert_sql, &insert_params[..]).await?;
let result = self.client.query(&key_query, &params[..]).await;
let rows = Self::handle_db_error(result, "checking existing keys")?;
for row in inserted_rows {
let host: String = row.get(1);
let key_text: String = row.get(2);
let key_id: i32 = row.get(0);
for row in rows {
let host: String = row.get(0);
let key: String = row.get(1);
let key_id: i32 = row.get(2);
let deprecated: bool = row.get(3);
existing_keys.insert((host, key), (key_id, deprecated));
}
if let Some(orig_key) = keys_to_insert
.iter()
.find(|k| k.server == host && k.public_key == key_text)
{
inserted_keys.push((orig_key.clone(), key_id));
// Determine which keys need to be inserted and which already exist
let mut keys_to_insert = Vec::new();
let mut unchanged_keys = Vec::new();
let mut ignored_deprecated = 0;
for key in keys {
let key_tuple = (key.server.clone(), key.public_key.clone());
if let Some((key_id, is_deprecated)) = existing_keys.get(&key_tuple) {
if *is_deprecated {
// Ignore deprecated keys - don't add them to any flow
ignored_deprecated += 1;
} else {
// Key exists and is not deprecated - add to unchanged
unchanged_keys.push((key.clone(), *key_id));
}
} else {
// Key doesn't exist - add to insert list
keys_to_insert.push(key.clone());
}
}
}
// Save the number of elements before combining
let inserted_count = inserted_keys.len();
let unchanged_count = unchanged_keys.len();
let mut inserted_keys = Vec::new();
// Combine results and generate statistics
let mut key_id_map = Vec::with_capacity(unchanged_count + inserted_count);
key_id_map.extend(unchanged_keys);
key_id_map.extend(inserted_keys);
// If there are keys to insert, perform the insertion
if !keys_to_insert.is_empty() {
let mut insert_sql =
String::from("INSERT INTO public.keys (host, key, updated) VALUES ");
let stats = KeyInsertStats {
total: keys.len(),
inserted: inserted_count,
unchanged: unchanged_count,
key_id_map,
};
let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = Vec::new();
let mut param_count = 1;
info!(
"Keys stats: received={}, new={}, unchanged={}",
stats.total, stats.inserted, stats.unchanged
);
for (i, key) in keys_to_insert.iter().enumerate() {
if i > 0 {
insert_sql.push_str(", ");
}
insert_sql.push_str(&format!("(${}, ${}, NOW())", param_count, param_count + 1));
insert_params.push(&key.server);
insert_params.push(&key.public_key);
param_count += 2;
}
Ok(stats)
}
insert_sql.push_str(" RETURNING key_id, host, key");
pub async fn batch_insert_flow_keys(
client: &Client,
flow_name: &str,
key_ids: &[i32],
) -> Result<usize, tokio_postgres::Error> {
if key_ids.is_empty() {
info!("No keys to associate with flow '{}'", flow_name);
return Ok(0);
}
let result = self.client.query(&insert_sql, &insert_params[..]).await;
let inserted_rows = Self::handle_db_error(result, "inserting keys")?;
// First, check which associations already exist
let mut existing_query =
String::from("SELECT key_id FROM public.flows WHERE name = $1 AND key_id IN (");
for row in inserted_rows {
let host: String = row.get(1);
let key_text: String = row.get(2);
let key_id: i32 = row.get(0);
for i in 0..key_ids.len() {
if i > 0 {
existing_query.push_str(", ");
if let Some(orig_key) = keys_to_insert
.iter()
.find(|k| k.server == host && k.public_key == key_text)
{
inserted_keys.push((orig_key.clone(), key_id));
}
}
}
existing_query.push_str(&format!("${}", i + 2));
}
existing_query.push_str(")");
let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
Vec::with_capacity(key_ids.len() + 1);
params.push(&flow_name);
for key_id in key_ids {
params.push(key_id);
}
// Save the number of elements before combining
let inserted_count = inserted_keys.len();
let unchanged_count = unchanged_keys.len();
let rows = client.query(&existing_query, &params[..]).await?;
// Combine results and generate statistics
let mut key_id_map = Vec::with_capacity(unchanged_count + inserted_count);
key_id_map.extend(unchanged_keys);
key_id_map.extend(inserted_keys);
let mut existing_associations = HashSet::new();
for row in rows {
let key_id: i32 = row.get(0);
existing_associations.insert(key_id);
}
let stats = KeyInsertStats {
total: keys.len(),
inserted: inserted_count,
unchanged: unchanged_count,
key_id_map,
};
// Filter only keys that are not yet associated with the flow
let new_key_ids: Vec<&i32> = key_ids
.iter()
.filter(|&id| !existing_associations.contains(id))
.collect();
if new_key_ids.is_empty() {
info!(
"All {} keys are already associated with flow '{}'",
key_ids.len(),
flow_name
"Keys stats: received={}, new={}, unchanged={}, ignored_deprecated={}",
stats.total, stats.inserted, stats.unchanged, ignored_deprecated
);
return Ok(0);
Ok(stats)
}
// Build SQL query with multiple values only for new associations
let mut sql = String::from("INSERT INTO public.flows (name, key_id) VALUES ");
for i in 0..new_key_ids.len() {
if i > 0 {
sql.push_str(", ");
pub async fn batch_insert_flow_keys(
&self,
flow_name: &str,
key_ids: &[i32],
) -> Result<usize, tokio_postgres::Error> {
if key_ids.is_empty() {
info!("No keys to associate with flow '{}'", flow_name);
return Ok(0);
}
sql.push_str(&format!("($1, ${})", i + 2));
// First, check which associations already exist
let mut existing_query =
String::from("SELECT key_id FROM public.flows WHERE name = $1 AND key_id IN (");
for i in 0..key_ids.len() {
if i > 0 {
existing_query.push_str(", ");
}
existing_query.push_str(&format!("${}", i + 2));
}
existing_query.push_str(")");
let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
Vec::with_capacity(key_ids.len() + 1);
params.push(&flow_name);
for key_id in key_ids {
params.push(key_id);
}
let result = self.client.query(&existing_query, &params[..]).await;
let rows = Self::handle_db_error(result, "checking existing flow associations")?;
let mut existing_associations = HashSet::new();
for row in rows {
let key_id: i32 = row.get(0);
existing_associations.insert(key_id);
}
// Filter only keys that are not yet associated with the flow
let new_key_ids: Vec<&i32> = key_ids
.iter()
.filter(|&id| !existing_associations.contains(id))
.collect();
if new_key_ids.is_empty() {
info!(
"All {} keys are already associated with flow '{}'",
key_ids.len(),
flow_name
);
return Ok(0);
}
// Build SQL query with multiple values only for new associations
let mut sql = String::from("INSERT INTO public.flows (name, key_id) VALUES ");
for i in 0..new_key_ids.len() {
if i > 0 {
sql.push_str(", ");
}
sql.push_str(&format!("($1, ${})", i + 2));
}
sql.push_str(" ON CONFLICT (name, key_id) DO NOTHING");
// Prepare parameters for the query
let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
Vec::with_capacity(new_key_ids.len() + 1);
insert_params.push(&flow_name);
for key_id in &new_key_ids {
insert_params.push(*key_id);
}
// Execute query
let result = self.client.execute(&sql, &insert_params[..]).await;
let affected = Self::handle_db_error(result, "inserting flow associations")?;
let affected_usize = affected as usize;
info!(
"Added {} new key-flow associations for flow '{}' (skipped {} existing)",
affected_usize,
flow_name,
existing_associations.len()
);
Ok(affected_usize)
}
sql.push_str(" ON CONFLICT (name, key_id) DO NOTHING");
pub async fn get_keys_from_db(
&self,
) -> Result<Vec<crate::server::Flow>, tokio_postgres::Error> {
let result = self.client.query(
"SELECT k.host, k.key, k.deprecated, f.name FROM public.keys k INNER JOIN public.flows f ON k.key_id = f.key_id",
&[]
).await;
let rows = Self::handle_db_error(result, "getting keys from database")?;
// Prepare parameters for the query
let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> =
Vec::with_capacity(new_key_ids.len() + 1);
insert_params.push(&flow_name);
for key_id in &new_key_ids {
insert_params.push(*key_id);
let mut flows_map: HashMap<String, crate::server::Flow> = HashMap::new();
for row in rows {
let host: String = row.get(0);
let key: String = row.get(1);
let deprecated: bool = row.get(2);
let flow: String = row.get(3);
let ssh_key = SshKey {
server: host,
public_key: key,
deprecated,
};
if let Some(flow_entry) = flows_map.get_mut(&flow) {
flow_entry.servers.push(ssh_key);
} else {
flows_map.insert(
flow.clone(),
crate::server::Flow {
name: flow,
servers: vec![ssh_key],
},
);
}
}
info!("Retrieved {} flows from database", flows_map.len());
Ok(flows_map.into_values().collect())
}
// Execute query
let affected = client.execute(&sql, &insert_params[..]).await?;
pub async fn deprecate_key_by_server(
&self,
server_name: &str,
flow_name: &str,
) -> Result<u64, tokio_postgres::Error> {
// Update keys to deprecated status for the given server
let result = self
.client
.execute(
"UPDATE public.keys
SET deprecated = TRUE, updated = NOW()
WHERE host = $1
AND key_id IN (
SELECT key_id FROM public.flows WHERE name = $2
)",
&[&server_name, &flow_name],
)
.await;
let affected = Self::handle_db_error(result, "deprecating key")?;
let affected_usize = affected as usize;
info!(
"Deprecated {} key(s) for server '{}' in flow '{}'",
affected, server_name, flow_name
);
info!(
"Added {} new key-flow associations for flow '{}' (skipped {} existing)",
affected_usize,
flow_name,
existing_associations.len()
);
Ok(affected)
}
Ok(affected_usize)
pub async fn bulk_deprecate_keys_by_servers(
&self,
server_names: &[String],
flow_name: &str,
) -> Result<u64, tokio_postgres::Error> {
if server_names.is_empty() {
return Ok(0);
}
// Update keys to deprecated status for multiple servers in one query
let result = self
.client
.execute(
"UPDATE public.keys
SET deprecated = TRUE, updated = NOW()
WHERE host = ANY($1)
AND key_id IN (
SELECT key_id FROM public.flows WHERE name = $2
)",
&[&server_names, &flow_name],
)
.await;
let affected = Self::handle_db_error(result, "bulk deprecating keys")?;
info!(
"Bulk deprecated {} key(s) for {} servers in flow '{}'",
affected, server_names.len(), flow_name
);
Ok(affected)
}
pub async fn bulk_restore_keys_by_servers(
&self,
server_names: &[String],
flow_name: &str,
) -> Result<u64, tokio_postgres::Error> {
if server_names.is_empty() {
return Ok(0);
}
// Update keys to active status for multiple servers in one query
let result = self
.client
.execute(
"UPDATE public.keys
SET deprecated = FALSE, updated = NOW()
WHERE host = ANY($1)
AND deprecated = TRUE
AND key_id IN (
SELECT key_id FROM public.flows WHERE name = $2
)",
&[&server_names, &flow_name],
)
.await;
let affected = Self::handle_db_error(result, "bulk restoring keys")?;
info!(
"Bulk restored {} key(s) for {} servers in flow '{}'",
affected, server_names.len(), flow_name
);
Ok(affected)
}
pub async fn restore_key_by_server(
&self,
server_name: &str,
flow_name: &str,
) -> Result<u64, tokio_postgres::Error> {
// Update keys to active status for the given server in the flow
let result = self
.client
.execute(
"UPDATE public.keys
SET deprecated = FALSE, updated = NOW()
WHERE host = $1
AND deprecated = TRUE
AND key_id IN (
SELECT key_id FROM public.flows WHERE name = $2
)",
&[&server_name, &flow_name],
)
.await;
let affected = Self::handle_db_error(result, "restoring key")?;
info!(
"Restored {} key(s) for server '{}' in flow '{}'",
affected, server_name, flow_name
);
Ok(affected)
}
pub async fn permanently_delete_key_by_server(
&self,
server_name: &str,
flow_name: &str,
) -> Result<u64, tokio_postgres::Error> {
// First, find the key_ids for the given server in the flow
let result = self
.client
.query(
"SELECT k.key_id FROM public.keys k
INNER JOIN public.flows f ON k.key_id = f.key_id
WHERE k.host = $1 AND f.name = $2",
&[&server_name, &flow_name],
)
.await;
let key_rows = Self::handle_db_error(result, "finding keys to delete")?;
if key_rows.is_empty() {
return Ok(0);
}
let key_ids: Vec<i32> = key_rows.iter().map(|row| row.get::<_, i32>(0)).collect();
// Delete flow associations first
let mut flow_delete_count = 0;
for key_id in &key_ids {
let result = self
.client
.execute(
"DELETE FROM public.flows WHERE name = $1 AND key_id = $2",
&[&flow_name, key_id],
)
.await;
let deleted = Self::handle_db_error(result, "deleting flow association")?;
flow_delete_count += deleted;
}
// Check if any of these keys are used in other flows
let mut keys_to_delete = Vec::new();
for key_id in &key_ids {
let result = self
.client
.query_one(
"SELECT COUNT(*) FROM public.flows WHERE key_id = $1",
&[key_id],
)
.await;
let count: i64 = Self::handle_db_error(result, "checking key references")?.get(0);
if count == 0 {
keys_to_delete.push(*key_id);
}
}
// Permanently delete keys that are no longer referenced by any flow
let mut total_deleted = 0;
for key_id in keys_to_delete {
let result = self
.client
.execute("DELETE FROM public.keys WHERE key_id = $1", &[&key_id])
.await;
let deleted = Self::handle_db_error(result, "deleting key")?;
total_deleted += deleted;
}
info!(
"Permanently deleted {} flow associations and {} orphaned keys for server '{}' in flow '{}'",
flow_delete_count, total_deleted, server_name, flow_name
);
Ok(std::cmp::max(flow_delete_count, total_deleted))
}
}
// Compatibility wrapper for transition
pub struct ReconnectingDbClient {
inner: Option<DbClient>,
}
impl ReconnectingDbClient {
pub fn new(_connection_string: String) -> Self {
Self { inner: None }
}
pub async fn connect(&mut self, connection_string: &str) -> Result<(), tokio_postgres::Error> {
let (client, connection) = DbClient::connect(connection_string).await?;
// Spawn connection handler that will exit on error
tokio::spawn(async move {
if let Err(e) = connection.await {
error!("Database connection error: {}", e);
error!("Exiting application due to database connection failure");
std::process::exit(1);
}
});
self.inner = Some(client);
Ok(())
}
pub async fn initialize_schema(&self) -> Result<(), tokio_postgres::Error> {
match &self.inner {
Some(client) => client.initialize_schema().await,
None => panic!("Database client not initialized"),
}
}
pub async fn batch_insert_keys_reconnecting(
&self,
keys: Vec<SshKey>,
) -> Result<KeyInsertStats, tokio_postgres::Error> {
match &self.inner {
Some(client) => client.batch_insert_keys(&keys).await,
None => panic!("Database client not initialized"),
}
}
pub async fn batch_insert_flow_keys_reconnecting(
&self,
flow_name: String,
key_ids: Vec<i32>,
) -> Result<usize, tokio_postgres::Error> {
match &self.inner {
Some(client) => client.batch_insert_flow_keys(&flow_name, &key_ids).await,
None => panic!("Database client not initialized"),
}
}
pub async fn get_keys_from_db_reconnecting(
&self,
) -> Result<Vec<crate::server::Flow>, tokio_postgres::Error> {
match &self.inner {
Some(client) => client.get_keys_from_db().await,
None => panic!("Database client not initialized"),
}
}
pub async fn deprecate_key_by_server_reconnecting(
&self,
server_name: String,
flow_name: String,
) -> Result<u64, tokio_postgres::Error> {
match &self.inner {
Some(client) => {
client
.deprecate_key_by_server(&server_name, &flow_name)
.await
}
None => panic!("Database client not initialized"),
}
}
pub async fn bulk_deprecate_keys_by_servers_reconnecting(
&self,
server_names: Vec<String>,
flow_name: String,
) -> Result<u64, tokio_postgres::Error> {
match &self.inner {
Some(client) => {
client
.bulk_deprecate_keys_by_servers(&server_names, &flow_name)
.await
}
None => panic!("Database client not initialized"),
}
}
pub async fn bulk_restore_keys_by_servers_reconnecting(
&self,
server_names: Vec<String>,
flow_name: String,
) -> Result<u64, tokio_postgres::Error> {
match &self.inner {
Some(client) => {
client
.bulk_restore_keys_by_servers(&server_names, &flow_name)
.await
}
None => panic!("Database client not initialized"),
}
}
pub async fn restore_key_by_server_reconnecting(
&self,
server_name: String,
flow_name: String,
) -> Result<u64, tokio_postgres::Error> {
match &self.inner {
Some(client) => client.restore_key_by_server(&server_name, &flow_name).await,
None => panic!("Database client not initialized"),
}
}
pub async fn permanently_delete_key_by_server_reconnecting(
&self,
server_name: String,
flow_name: String,
) -> Result<u64, tokio_postgres::Error> {
match &self.inner {
Some(client) => {
client
.permanently_delete_key_by_server(&server_name, &flow_name)
.await
}
None => panic!("Database client not initialized"),
}
}
}

View File

@@ -1,6 +1,7 @@
mod client;
mod db;
mod server;
mod web;
use clap::Parser;
use env_logger;
@@ -21,7 +22,7 @@ use log::{error, info};
khm --server --ip 0.0.0.0 --port 1337 --db-host psql.psql.svc --db-name khm --db-user admin --db-password <SECRET> --flows work,home\n\
\n\
Running in client mode to send diff and sync ~/.ssh/known_hosts with remote flow `work` in place:\n\
khm --host https://khm.example.com/work --known-hosts ~/.ssh/known_hosts --in-place\n\
khm --host https://khm.example.com --flow work --known-hosts ~/.ssh/known_hosts --in-place\n\
\n\
"
)]
@@ -95,10 +96,18 @@ struct Args {
#[arg(
long,
required_if_eq("server", "false"),
help = "Client mode: Full host address of the server to connect to. Like https://khm.example.com/<FLOW_NAME>"
help = "Client mode: Full host address of the server to connect to. Like https://khm.example.com"
)]
host: Option<String>,
/// Flow name to use on the server
#[arg(
long,
required_if_eq("server", "false"),
help = "Client mode: Flow name to use on the server"
)]
flow: Option<String>,
/// Path to the known_hosts file (default: ~/.ssh/known_hosts)
#[arg(
long,
@@ -119,6 +128,25 @@ async fn main() -> std::io::Result<()> {
let args = Args::parse();
// Check if we have the minimum required arguments
if !args.server && (args.host.is_none() || args.flow.is_none()) {
// Neither server mode nor client mode properly configured
eprintln!("Error: You must specify either server mode (--server) or client mode (--host and --flow)");
eprintln!();
eprintln!("Examples:");
eprintln!(
" Server mode: {} --server --db-user admin --db-password pass --flows work,home",
env!("CARGO_PKG_NAME")
);
eprintln!(
" Client mode: {} --host https://khm.example.com --flow work",
env!("CARGO_PKG_NAME")
);
eprintln!();
eprintln!("Use --help for more information.");
std::process::exit(1);
}
if args.server {
info!("Running in server mode");
if let Err(e) = server::run_server(args).await {

View File

@@ -2,16 +2,16 @@ use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer, Responder};
use log::{error, info};
use regex::Regex;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use tokio_postgres::{Client, NoTls};
use crate::db;
use crate::db::ReconnectingDbClient;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SshKey {
pub server: String,
pub public_key: String,
#[serde(default)]
pub deprecated: bool,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
@@ -35,41 +35,6 @@ pub fn is_valid_ssh_key(key: &str) -> bool {
|| ed25519_re.is_match(key)
}
pub async fn get_keys_from_db(client: &Client) -> Result<Vec<Flow>, tokio_postgres::Error> {
let rows = client.query(
"SELECT k.host, k.key, f.name FROM public.keys k INNER JOIN public.flows f ON k.key_id = f.key_id",
&[]
).await?;
let mut flows_map: HashMap<String, Flow> = HashMap::new();
for row in rows {
let host: String = row.get(0);
let key: String = row.get(1);
let flow: String = row.get(2);
let ssh_key = SshKey {
server: host,
public_key: key,
};
if let Some(flow_entry) = flows_map.get_mut(&flow) {
flow_entry.servers.push(ssh_key);
} else {
flows_map.insert(
flow.clone(),
Flow {
name: flow,
servers: vec![ssh_key],
},
);
}
}
info!("Retrieved {} flows from database", flows_map.len());
Ok(flows_map.into_values().collect())
}
// Extract client hostname from request headers
fn get_client_hostname(req: &HttpRequest) -> String {
if let Some(hostname) = req.headers().get("X-Client-Hostname") {
@@ -85,6 +50,7 @@ pub async fn get_keys(
flow_id: web::Path<String>,
allowed_flows: web::Data<Vec<String>>,
req: HttpRequest,
query: web::Query<std::collections::HashMap<String, String>>,
) -> impl Responder {
let client_hostname = get_client_hostname(&req);
let flow_id_str = flow_id.into_inner();
@@ -104,10 +70,25 @@ pub async fn get_keys(
let flows = flows.lock().unwrap();
if let Some(flow) = flows.iter().find(|flow| flow.name == flow_id_str) {
let servers: Vec<&SshKey> = flow.servers.iter().collect();
// Check if we should include deprecated keys (default: false for CLI clients)
let include_deprecated = query
.get("include_deprecated")
.map(|v| v == "true")
.unwrap_or(false);
let servers: Vec<&SshKey> = if include_deprecated {
// Return all keys (for web interface)
flow.servers.iter().collect()
} else {
// Return only active keys (for CLI clients)
flow.servers.iter().filter(|key| !key.deprecated).collect()
};
info!(
"Returning {} keys for flow '{}' to client '{}'",
"Returning {} keys ({} total, deprecated filtered: {}) for flow '{}' to client '{}'",
servers.len(),
flow.servers.len(),
!include_deprecated,
flow_id_str,
client_hostname
);
@@ -125,7 +106,7 @@ pub async fn add_keys(
flows: web::Data<Flows>,
flow_id: web::Path<String>,
new_keys: web::Json<Vec<SshKey>>,
db_client: web::Data<Arc<Client>>,
db_client: web::Data<Arc<ReconnectingDbClient>>,
allowed_flows: web::Data<Vec<String>>,
req: HttpRequest,
) -> impl Responder {
@@ -171,7 +152,10 @@ pub async fn add_keys(
);
// Batch insert keys with statistics
let key_stats = match crate::db::batch_insert_keys(&db_client, &valid_keys).await {
let key_stats = match db_client
.batch_insert_keys_reconnecting(valid_keys.clone())
.await
{
Ok(stats) => stats,
Err(e) => {
error!(
@@ -189,7 +173,9 @@ pub async fn add_keys(
let key_ids: Vec<i32> = key_stats.key_id_map.iter().map(|(_, id)| *id).collect();
// Batch insert key-flow associations
if let Err(e) = crate::db::batch_insert_flow_keys(&db_client, &flow_id_str, &key_ids).await
if let Err(e) = db_client
.batch_insert_flow_keys_reconnecting(flow_id_str.clone(), key_ids.clone())
.await
{
error!(
"Failed to batch insert flow keys from client '{}' into database: {}",
@@ -213,7 +199,7 @@ pub async fn add_keys(
}
// Get updated data
let updated_flows = match get_keys_from_db(&db_client).await {
let updated_flows = match db_client.get_keys_from_db_reconnecting().await {
Ok(flows) => flows,
Err(e) => {
error!(
@@ -268,28 +254,22 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> {
args.db_host, db_user, db_password, args.db_name
);
info!("Connecting to database at {}", args.db_host);
let (db_client, connection) = match tokio_postgres::connect(&db_conn_str, NoTls).await {
Ok((client, conn)) => (client, conn),
Err(e) => {
error!("Failed to connect to the database: {}", e);
return Err(std::io::Error::new(
std::io::ErrorKind::ConnectionRefused,
format!("Database connection error: {}", e),
));
}
};
let db_client = Arc::new(db_client);
info!("Creating database client for {}", args.db_host);
let mut db_client_temp = ReconnectingDbClient::new(db_conn_str.clone());
// Spawn a new thread to run the database connection
tokio::spawn(async move {
if let Err(e) = connection.await {
error!("Connection error: {}", e);
}
});
// Initial connection
if let Err(e) = db_client_temp.connect(&db_conn_str).await {
error!("Failed to connect to the database: {}", e);
return Err(std::io::Error::new(
std::io::ErrorKind::ConnectionRefused,
format!("Database connection error: {}", e),
));
}
let db_client = Arc::new(db_client_temp);
// Initialize database schema if needed
if let Err(e) = db::initialize_db_schema(&db_client).await {
if let Err(e) = db_client.initialize_schema().await {
error!("Failed to initialize database schema: {}", e);
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
@@ -297,7 +277,7 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> {
));
}
let mut initial_flows = match get_keys_from_db(&db_client).await {
let mut initial_flows = match db_client.get_keys_from_db_reconnecting().await {
Ok(flows) => flows,
Err(e) => {
error!("Failed to get initial flows from database: {}", e);
@@ -324,8 +304,42 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> {
.app_data(web::Data::new(flows.clone()))
.app_data(web::Data::new(db_client.clone()))
.app_data(allowed_flows.clone())
// API routes
.route("/api/version", web::get().to(crate::web::get_version_api))
.route("/api/flows", web::get().to(crate::web::get_flows_api))
.route(
"/{flow_id}/scan-dns",
web::post().to(crate::web::scan_dns_resolution),
)
.route(
"/{flow_id}/bulk-deprecate",
web::post().to(crate::web::bulk_deprecate_servers),
)
.route(
"/{flow_id}/bulk-restore",
web::post().to(crate::web::bulk_restore_servers),
)
.route(
"/{flow_id}/keys/{server}",
web::delete().to(crate::web::delete_key_by_server),
)
.route(
"/{flow_id}/keys/{server}/restore",
web::post().to(crate::web::restore_key_by_server),
)
.route(
"/{flow_id}/keys/{server}/delete",
web::delete().to(crate::web::permanently_delete_key_by_server),
)
// Original API routes
.route("/{flow_id}/keys", web::get().to(get_keys))
.route("/{flow_id}/keys", web::post().to(add_keys))
// Web interface routes
.route("/", web::get().to(crate::web::serve_web_interface))
.route(
"/static/{filename:.*}",
web::get().to(crate::web::serve_static_file),
)
})
.bind((args.ip.as_str(), args.port))?
.run()

470
src/web.rs Normal file
View File

@@ -0,0 +1,470 @@
use actix_web::{web, HttpResponse, Result};
use log::info;
use rust_embed::RustEmbed;
use serde_json::json;
use std::sync::Arc;
use trust_dns_resolver::TokioAsyncResolver;
use trust_dns_resolver::config::*;
use serde::{Deserialize, Serialize};
use futures::future;
use tokio::sync::Semaphore;
use tokio::time::{timeout, Duration};
use crate::db::ReconnectingDbClient;
use crate::server::Flows;
#[derive(RustEmbed)]
#[folder = "static/"]
struct StaticAssets;
#[derive(Serialize, Deserialize, Debug)]
pub struct DnsResolutionResult {
pub server: String,
pub resolved: bool,
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct BulkDeprecateRequest {
pub servers: Vec<String>,
}
async fn check_dns_resolution(hostname: String, semaphore: Arc<Semaphore>) -> DnsResolutionResult {
let _permit = match semaphore.acquire().await {
Ok(permit) => permit,
Err(_) => {
return DnsResolutionResult {
server: hostname,
resolved: false,
error: Some("Failed to acquire semaphore".to_string()),
};
}
};
let resolver = TokioAsyncResolver::tokio(
ResolverConfig::default(),
ResolverOpts::default(),
);
let lookup_result = timeout(Duration::from_secs(5), resolver.lookup_ip(&hostname)).await;
match lookup_result {
Ok(Ok(_)) => DnsResolutionResult {
server: hostname,
resolved: true,
error: None,
},
Ok(Err(e)) => DnsResolutionResult {
server: hostname,
resolved: false,
error: Some(e.to_string()),
},
Err(_) => DnsResolutionResult {
server: hostname,
resolved: false,
error: Some("DNS lookup timeout (5s)".to_string()),
},
}
}
// API endpoint to get application version
pub async fn get_version_api() -> Result<HttpResponse> {
Ok(HttpResponse::Ok().json(json!({
"version": env!("CARGO_PKG_VERSION")
})))
}
// API endpoint to get list of available flows
pub async fn get_flows_api(allowed_flows: web::Data<Vec<String>>) -> Result<HttpResponse> {
info!("API request for available flows");
Ok(HttpResponse::Ok().json(&**allowed_flows))
}
// API endpoint to scan DNS resolution for all hosts in a flow
pub async fn scan_dns_resolution(
flows: web::Data<Flows>,
path: web::Path<String>,
allowed_flows: web::Data<Vec<String>>,
) -> Result<HttpResponse> {
let flow_id_str = path.into_inner();
info!("API request to scan DNS resolution for flow '{}'" , flow_id_str);
if !allowed_flows.contains(&flow_id_str) {
return Ok(HttpResponse::Forbidden().json(json!({
"error": "Flow ID not allowed"
})));
}
let flows_guard = flows.lock().unwrap();
let flow = match flows_guard.iter().find(|flow| flow.name == flow_id_str) {
Some(flow) => flow,
None => {
return Ok(HttpResponse::NotFound().json(json!({
"error": "Flow ID not found"
})));
}
};
// Get unique hostnames
let mut hostnames: std::collections::HashSet<String> = std::collections::HashSet::new();
for key in &flow.servers {
hostnames.insert(key.server.clone());
}
drop(flows_guard);
info!("Scanning DNS resolution for {} unique hosts", hostnames.len());
// Limit concurrent DNS requests to prevent "too many open files" error
let semaphore = Arc::new(Semaphore::new(20));
// Scan all hostnames concurrently with rate limiting
let mut scan_futures = Vec::new();
for hostname in hostnames {
scan_futures.push(check_dns_resolution(hostname, semaphore.clone()));
}
let results = future::join_all(scan_futures).await;
let unresolved_count = results.iter().filter(|r| !r.resolved).count();
info!("DNS scan complete: {} unresolved out of {} hosts", unresolved_count, results.len());
Ok(HttpResponse::Ok().json(json!({
"results": results,
"total": results.len(),
"unresolved": unresolved_count
})))
}
// API endpoint to bulk deprecate multiple servers
pub async fn bulk_deprecate_servers(
flows: web::Data<Flows>,
path: web::Path<String>,
request: web::Json<BulkDeprecateRequest>,
db_client: web::Data<Arc<ReconnectingDbClient>>,
allowed_flows: web::Data<Vec<String>>,
) -> Result<HttpResponse> {
let flow_id_str = path.into_inner();
info!("API request to bulk deprecate {} servers in flow '{}'", request.servers.len(), flow_id_str);
if !allowed_flows.contains(&flow_id_str) {
return Ok(HttpResponse::Forbidden().json(json!({
"error": "Flow ID not allowed"
})));
}
// Use single bulk operation instead of loop
let total_deprecated = match db_client
.bulk_deprecate_keys_by_servers_reconnecting(request.servers.clone(), flow_id_str.clone())
.await
{
Ok(count) => {
info!("Bulk deprecated {} key(s) for {} servers", count, request.servers.len());
count
}
Err(e) => {
return Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to bulk deprecate keys: {}", e)
})));
}
};
// Refresh the in-memory flows
let updated_flows = match db_client.get_keys_from_db_reconnecting().await {
Ok(flows) => flows,
Err(e) => {
return Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to refresh flows: {}", e)
})));
}
};
let mut flows_guard = flows.lock().unwrap();
*flows_guard = updated_flows;
let response = json!({
"message": format!("Successfully deprecated {} key(s) for {} server(s)", total_deprecated, request.servers.len()),
"deprecated_count": total_deprecated,
"servers_processed": request.servers.len()
});
Ok(HttpResponse::Ok().json(response))
}
// API endpoint to bulk restore multiple servers
pub async fn bulk_restore_servers(
flows: web::Data<Flows>,
path: web::Path<String>,
request: web::Json<BulkDeprecateRequest>,
db_client: web::Data<Arc<ReconnectingDbClient>>,
allowed_flows: web::Data<Vec<String>>,
) -> Result<HttpResponse> {
let flow_id_str = path.into_inner();
info!("API request to bulk restore {} servers in flow '{}'", request.servers.len(), flow_id_str);
if !allowed_flows.contains(&flow_id_str) {
return Ok(HttpResponse::Forbidden().json(json!({
"error": "Flow ID not allowed"
})));
}
// Use single bulk operation
let total_restored = match db_client
.bulk_restore_keys_by_servers_reconnecting(request.servers.clone(), flow_id_str.clone())
.await
{
Ok(count) => {
info!("Bulk restored {} key(s) for {} servers", count, request.servers.len());
count
}
Err(e) => {
return Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to bulk restore keys: {}", e)
})));
}
};
// Refresh the in-memory flows
let updated_flows = match db_client.get_keys_from_db_reconnecting().await {
Ok(flows) => flows,
Err(e) => {
return Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to refresh flows: {}", e)
})));
}
};
let mut flows_guard = flows.lock().unwrap();
*flows_guard = updated_flows;
let response = json!({
"message": format!("Successfully restored {} key(s) for {} server(s)", total_restored, request.servers.len()),
"restored_count": total_restored,
"servers_processed": request.servers.len()
});
Ok(HttpResponse::Ok().json(response))
}
// API endpoint to deprecate a specific key by server name
pub async fn delete_key_by_server(
flows: web::Data<Flows>,
path: web::Path<(String, String)>,
db_client: web::Data<Arc<ReconnectingDbClient>>,
allowed_flows: web::Data<Vec<String>>,
) -> Result<HttpResponse> {
let (flow_id_str, server_name) = path.into_inner();
info!(
"API request to deprecate key for server '{}' in flow '{}'",
server_name, flow_id_str
);
if !allowed_flows.contains(&flow_id_str) {
return Ok(HttpResponse::Forbidden().json(json!({
"error": "Flow ID not allowed"
})));
}
// Deprecate in database
match db_client
.deprecate_key_by_server_reconnecting(server_name.clone(), flow_id_str.clone())
.await
{
Ok(deprecated_count) => {
if deprecated_count > 0 {
info!(
"Deprecated {} key(s) for server '{}' in flow '{}'",
deprecated_count, server_name, flow_id_str
);
// Refresh the in-memory flows
let updated_flows = match db_client.get_keys_from_db_reconnecting().await {
Ok(flows) => flows,
Err(e) => {
return Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to refresh flows: {}", e)
})));
}
};
let mut flows_guard = flows.lock().unwrap();
*flows_guard = updated_flows;
Ok(HttpResponse::Ok().json(json!({
"message": format!("Successfully deprecated {} key(s) for server '{}'", deprecated_count, server_name),
"deprecated_count": deprecated_count
})))
} else {
Ok(HttpResponse::NotFound().json(json!({
"error": format!("No keys found for server '{}'", server_name)
})))
}
}
Err(e) => Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to deprecate key: {}", e)
}))),
}
}
// API endpoint to restore a deprecated key
pub async fn restore_key_by_server(
flows: web::Data<Flows>,
path: web::Path<(String, String)>,
db_client: web::Data<Arc<ReconnectingDbClient>>,
allowed_flows: web::Data<Vec<String>>,
) -> Result<HttpResponse> {
let (flow_id_str, server_name) = path.into_inner();
info!(
"API request to restore key for server '{}' in flow '{}'",
server_name, flow_id_str
);
if !allowed_flows.contains(&flow_id_str) {
return Ok(HttpResponse::Forbidden().json(json!({
"error": "Flow ID not allowed"
})));
}
// Restore in database
match db_client
.restore_key_by_server_reconnecting(server_name.clone(), flow_id_str.clone())
.await
{
Ok(restored_count) => {
if restored_count > 0 {
info!(
"Restored {} key(s) for server '{}' in flow '{}'",
restored_count, server_name, flow_id_str
);
// Refresh the in-memory flows
let updated_flows = match db_client.get_keys_from_db_reconnecting().await {
Ok(flows) => flows,
Err(e) => {
return Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to refresh flows: {}", e)
})));
}
};
let mut flows_guard = flows.lock().unwrap();
*flows_guard = updated_flows;
Ok(HttpResponse::Ok().json(json!({
"message": format!("Successfully restored {} key(s) for server '{}'", restored_count, server_name),
"restored_count": restored_count
})))
} else {
Ok(HttpResponse::NotFound().json(json!({
"error": format!("No deprecated keys found for server '{}'", server_name)
})))
}
}
Err(e) => Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to restore key: {}", e)
}))),
}
}
// API endpoint to permanently delete a key
pub async fn permanently_delete_key_by_server(
flows: web::Data<Flows>,
path: web::Path<(String, String)>,
db_client: web::Data<Arc<ReconnectingDbClient>>,
allowed_flows: web::Data<Vec<String>>,
) -> Result<HttpResponse> {
let (flow_id_str, server_name) = path.into_inner();
info!(
"API request to permanently delete key for server '{}' in flow '{}'",
server_name, flow_id_str
);
if !allowed_flows.contains(&flow_id_str) {
return Ok(HttpResponse::Forbidden().json(json!({
"error": "Flow ID not allowed"
})));
}
// Permanently delete from database
match db_client
.permanently_delete_key_by_server_reconnecting(server_name.clone(), flow_id_str.clone())
.await
{
Ok(deleted_count) => {
if deleted_count > 0 {
info!(
"Permanently deleted {} key(s) for server '{}' in flow '{}'",
deleted_count, server_name, flow_id_str
);
// Refresh the in-memory flows
let updated_flows = match db_client.get_keys_from_db_reconnecting().await {
Ok(flows) => flows,
Err(e) => {
return Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to refresh flows: {}", e)
})));
}
};
let mut flows_guard = flows.lock().unwrap();
*flows_guard = updated_flows;
Ok(HttpResponse::Ok().json(json!({
"message": format!("Successfully deleted {} key(s) for server '{}'", deleted_count, server_name),
"deleted_count": deleted_count
})))
} else {
Ok(HttpResponse::NotFound().json(json!({
"error": format!("No keys found for server '{}'", server_name)
})))
}
}
Err(e) => Ok(HttpResponse::InternalServerError().json(json!({
"error": format!("Failed to delete key: {}", e)
}))),
}
}
// Serve static files from embedded assets
pub async fn serve_static_file(path: web::Path<String>) -> Result<HttpResponse> {
let file_path = path.into_inner();
match StaticAssets::get(&file_path) {
Some(content) => {
let content_type = match std::path::Path::new(&file_path)
.extension()
.and_then(|s| s.to_str())
{
Some("html") => "text/html; charset=utf-8",
Some("css") => "text/css; charset=utf-8",
Some("js") => "application/javascript; charset=utf-8",
Some("png") => "image/png",
Some("jpg") | Some("jpeg") => "image/jpeg",
Some("svg") => "image/svg+xml",
_ => "application/octet-stream",
};
Ok(HttpResponse::Ok()
.content_type(content_type)
.body(content.data.as_ref().to_vec()))
}
None => Ok(HttpResponse::NotFound().body(format!("File not found: {}", file_path))),
}
}
// Serve the main web interface from embedded assets
pub async fn serve_web_interface() -> Result<HttpResponse> {
match StaticAssets::get("index.html") {
Some(content) => Ok(HttpResponse::Ok()
.content_type("text/html; charset=utf-8")
.body(content.data.as_ref().to_vec())),
None => Ok(HttpResponse::NotFound().body("Web interface not found")),
}
}

180
static/index.html Normal file
View File

@@ -0,0 +1,180 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>SSH Key Manager</title>
<link rel="stylesheet" href="/static/style.css">
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600&display=swap" rel="stylesheet">
</head>
<body>
<div class="container">
<header>
<div class="header-title">
<h1>SSH Key Manager</h1>
<span class="version" id="appVersion">Loading...</span>
</div>
<div class="flow-selector">
<label for="flowSelect">Flow:</label>
<select id="flowSelect">
<option value="">Select a flow...</option>
</select>
<button id="refreshBtn" class="btn btn-secondary">Refresh</button>
</div>
</header>
<main>
<div class="stats-panel">
<div class="stat-item">
<span class="stat-value" id="totalKeys">0</span>
<span class="stat-label">Total Keys</span>
</div>
<div class="stat-item">
<span class="stat-value" id="activeKeys">0</span>
<span class="stat-label">Active Keys</span>
</div>
<div class="stat-item">
<span class="stat-value deprecated" id="deprecatedKeys">0</span>
<span class="stat-label">Deprecated Keys</span>
</div>
<div class="stat-item">
<span class="stat-value" id="uniqueServers">0</span>
<span class="stat-label">Unique Servers</span>
</div>
</div>
<div class="actions-panel">
<button id="addKeyBtn" class="btn btn-primary">Add SSH Key</button>
<button id="scanDnsBtn" class="btn btn-secondary">Scan DNS Resolution</button>
<button id="bulkDeleteBtn" class="btn btn-danger" disabled>Deprecate Selected</button>
<button id="bulkRestoreBtn" class="btn btn-success" disabled style="display: none;">Restore Selected</button>
<button id="bulkPermanentDeleteBtn" class="btn btn-danger" disabled style="display: none;">Delete Selected</button>
<div class="filter-controls">
<label class="filter-label">
<input type="checkbox" id="showDeprecatedOnly">
<span>Show only deprecated keys</span>
</label>
</div>
<div class="search-box">
<input type="text" id="searchInput" placeholder="Search servers or keys...">
</div>
</div>
<div class="keys-table-container">
<table class="keys-table">
<thead>
<tr>
<th>
<input type="checkbox" id="selectAll">
</th>
<th>Server/Type</th>
<th>Key Preview</th>
<th></th>
<th>Actions</th>
</tr>
</thead>
<tbody id="keysTableBody">
<!-- Keys will be populated here -->
</tbody>
</table>
<div id="noKeysMessage" class="no-keys-message" style="display: none;">
No SSH keys found for this flow.
</div>
</div>
<div class="pagination">
<button id="prevPage" class="btn btn-secondary" disabled>Previous</button>
<span id="pageInfo">Page 1 of 1</span>
<button id="nextPage" class="btn btn-secondary" disabled>Next</button>
</div>
</main>
</div>
<!-- Add Key Modal -->
<div id="addKeyModal" class="modal">
<div class="modal-content">
<div class="modal-header">
<h2>Add SSH Key</h2>
<span class="close">&times;</span>
</div>
<div class="modal-body">
<form id="addKeyForm">
<div class="form-group">
<label for="serverInput">Server/Hostname:</label>
<input type="text" id="serverInput" required placeholder="example.com">
</div>
<div class="form-group">
<label for="keyInput">SSH Public Key:</label>
<textarea id="keyInput" required placeholder="ssh-rsa AAAAB3..."></textarea>
</div>
<div class="form-actions">
<button type="button" class="btn btn-secondary" id="cancelAdd">Cancel</button>
<button type="submit" class="btn btn-primary">Add Key</button>
</div>
</form>
</div>
</div>
</div>
<!-- View Key Modal -->
<div id="viewKeyModal" class="modal">
<div class="modal-content">
<div class="modal-header">
<h2>SSH Key Details</h2>
<span class="close">&times;</span>
</div>
<div class="modal-body">
<div class="form-group">
<label>Server:</label>
<div id="viewServer" class="read-only-field"></div>
</div>
<div class="form-group">
<label>SSH Public Key:</label>
<textarea id="viewKey" class="read-only-field" readonly></textarea>
</div>
<div class="form-actions">
<button type="button" class="btn btn-secondary" id="closeView">Close</button>
<button type="button" class="btn btn-primary" id="copyKey">Copy Key</button>
</div>
</div>
</div>
</div>
<!-- DNS Scan Results Modal -->
<div id="dnsScanModal" class="modal">
<div class="modal-content modal-large">
<div class="modal-header">
<h2>DNS Resolution Scan Results</h2>
<span class="close">&times;</span>
</div>
<div class="modal-body">
<div id="dnsScanStats" class="scan-stats"></div>
<div id="unresolvedHosts" class="unresolved-hosts">
<div class="section-header">
<h3>Unresolved Hosts</h3>
<button id="selectAllUnresolved" class="btn btn-sm btn-secondary">Select All</button>
</div>
<div id="unresolvedList" class="host-list"></div>
</div>
<div class="form-actions">
<button type="button" class="btn btn-secondary" id="closeDnsScan">Close</button>
<button type="button" class="btn btn-danger" id="deprecateUnresolved" disabled>Deprecate Selected</button>
</div>
</div>
</div>
</div>
<!-- Loading Overlay -->
<div id="loadingOverlay" class="loading-overlay">
<div class="loading-spinner"></div>
<div class="loading-text">Loading...</div>
</div>
<!-- Toast Notifications -->
<div id="toastContainer" class="toast-container"></div>
<script src="/static/script.js"></script>
</body>
</html>

1109
static/script.js Normal file

File diff suppressed because it is too large Load Diff

825
static/style.css Normal file
View File

@@ -0,0 +1,825 @@
:root {
--primary-color: #2563eb;
--primary-hover: #1d4ed8;
--secondary-color: #64748b;
--danger-color: #dc2626;
--danger-hover: #b91c1c;
--success-color: #16a34a;
--warning-color: #d97706;
--background: #f8fafc;
--surface: #ffffff;
--border: #e2e8f0;
--text-primary: #1e293b;
--text-secondary: #64748b;
--shadow: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1);
--shadow-lg: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1);
--border-radius: 0.5rem;
--font-family: 'Inter', sans-serif;
}
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: var(--font-family);
background-color: var(--background);
color: var(--text-primary);
line-height: 1.6;
}
.container {
max-width: 1400px;
margin: 0 auto;
padding: 2rem;
min-height: 100vh;
}
header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 2rem;
padding-bottom: 1rem;
border-bottom: 2px solid var(--border);
}
header h1 {
font-size: 2.5rem;
font-weight: 600;
color: var(--text-primary);
margin: 0;
}
.header-title {
display: flex;
align-items: baseline;
gap: 1rem;
}
.version {
font-size: 0.875rem;
color: var(--text-secondary);
background: var(--background);
padding: 0.25rem 0.5rem;
border-radius: var(--border-radius);
font-weight: 500;
border: 1px solid var(--border);
}
.flow-selector {
display: flex;
align-items: center;
gap: 1rem;
}
.flow-selector label {
font-weight: 500;
color: var(--text-secondary);
}
.flow-selector select {
padding: 0.5rem 1rem;
border: 1px solid var(--border);
border-radius: var(--border-radius);
background: var(--surface);
color: var(--text-primary);
font-size: 1rem;
min-width: 200px;
}
.btn {
padding: 0.5rem 1rem;
border: none;
border-radius: var(--border-radius);
font-size: 0.875rem;
font-weight: 500;
cursor: pointer;
transition: all 0.2s ease;
display: inline-flex;
align-items: center;
gap: 0.5rem;
text-decoration: none;
}
.btn:disabled {
opacity: 0.6;
cursor: not-allowed;
}
.btn-primary {
background-color: var(--primary-color);
color: white;
}
.btn-primary:hover:not(:disabled) {
background-color: var(--primary-hover);
}
.btn-secondary {
background-color: var(--secondary-color);
color: white;
}
.btn-secondary:hover:not(:disabled) {
background-color: #475569;
}
.btn-danger {
background-color: var(--danger-color);
color: white;
}
.btn-danger:hover:not(:disabled) {
background-color: var(--danger-hover);
}
.btn-success {
background-color: var(--success-color);
color: white;
}
.btn-success:hover:not(:disabled) {
background-color: #059669;
}
.btn-sm {
padding: 0.25rem 0.5rem;
font-size: 0.75rem;
}
.stats-panel {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 1rem;
margin-bottom: 2rem;
}
.stat-item {
background: var(--surface);
padding: 1.5rem;
border-radius: var(--border-radius);
box-shadow: var(--shadow);
text-align: center;
}
.stat-value {
display: block;
font-size: 2rem;
font-weight: 600;
color: var(--primary-color);
}
.stat-value.deprecated {
color: var(--danger-color);
}
.stat-label {
color: var(--text-secondary);
font-size: 0.875rem;
font-weight: 500;
}
.actions-panel {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 1.5rem;
gap: 1rem;
flex-wrap: wrap;
}
.filter-controls {
display: flex;
align-items: center;
gap: 1rem;
}
.filter-label {
display: flex;
align-items: center;
gap: 0.5rem;
font-size: 0.875rem;
color: var(--text-primary);
cursor: pointer;
user-select: none;
padding: 0.5rem 0.75rem;
border-radius: var(--border-radius);
transition: background-color 0.2s ease;
}
.filter-label:hover {
background-color: var(--background);
}
.filter-label.active {
background-color: var(--primary-color);
color: white;
}
.filter-label.active input[type="checkbox"] {
accent-color: white;
}
.filter-label input[type="checkbox"] {
margin: 0;
}
.filter-label span {
white-space: nowrap;
}
.search-box input {
padding: 0.5rem 1rem;
border: 1px solid var(--border);
border-radius: var(--border-radius);
background: var(--surface);
color: var(--text-primary);
font-size: 1rem;
width: 300px;
}
.keys-table-container {
background: var(--surface);
border-radius: var(--border-radius);
box-shadow: var(--shadow);
overflow: hidden;
margin-bottom: 1.5rem;
}
.keys-table {
width: 100%;
border-collapse: collapse;
}
.keys-table th,
.keys-table td {
padding: 1rem;
text-align: left;
border-bottom: 1px solid var(--border);
}
.keys-table th {
background-color: #f1f5f9;
font-weight: 600;
color: var(--text-primary);
}
.keys-table tbody tr:hover {
background-color: #f8fafc;
}
.keys-table tbody tr.deprecated {
opacity: 0.6;
background-color: #fef2f2;
}
.keys-table tbody tr.deprecated:hover {
background-color: #fee2e2;
}
.keys-table tbody tr.deprecated .key-preview,
.keys-table tbody tr.deprecated td:nth-child(2) {
text-decoration: line-through;
color: var(--text-secondary);
}
.host-group-header {
background-color: #f1f5f9;
font-weight: 600;
transition: background-color 0.2s ease;
border-left: 4px solid var(--primary-color);
}
.host-group-header:hover {
background-color: #e2e8f0;
}
.host-group-header.collapsed {
border-left-color: var(--secondary-color);
}
.host-group-header .expand-icon {
transition: transform 0.2s ease;
display: inline-block;
margin-right: 0.5rem;
user-select: none;
}
.host-group-header.collapsed .expand-icon {
transform: rotate(-90deg);
}
.host-group-header input[type="checkbox"] {
margin: 0;
}
.host-group-header td:first-child {
width: 50px;
text-align: center;
}
.host-group-header td:nth-child(2) {
cursor: pointer;
user-select: none;
}
.key-row {
border-left: 4px solid transparent;
}
.key-row.hidden {
display: none;
}
.host-summary {
font-size: 0.875rem;
color: var(--text-secondary);
}
.key-count {
background-color: var(--primary-color);
color: white;
padding: 0.125rem 0.375rem;
border-radius: 0.25rem;
font-size: 0.75rem;
font-weight: 500;
margin-left: 0.5rem;
}
.deprecated-count {
background-color: var(--danger-color);
color: white;
padding: 0.125rem 0.375rem;
border-radius: 0.25rem;
font-size: 0.75rem;
font-weight: 500;
margin-left: 0.25rem;
}
.key-preview {
font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace;
font-size: 0.875rem;
color: var(--text-secondary);
max-width: 200px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.key-type {
display: inline-block;
padding: 0.25rem 0.5rem;
background-color: #e0e7ff;
color: #3730a3;
border-radius: 0.25rem;
font-size: 0.75rem;
font-weight: 500;
}
.key-type.rsa { background-color: #fef3c7; color: #92400e; }
.key-type.ed25519 { background-color: #dcfce7; color: #166534; }
.key-type.ecdsa { background-color: #e0e7ff; color: #3730a3; }
.key-type.dsa { background-color: #fce7f3; color: #9d174d; }
.deprecated-badge {
display: inline-block;
padding: 0.25rem 0.5rem;
background-color: #fecaca;
color: #991b1b;
border-radius: 0.25rem;
font-size: 0.75rem;
font-weight: 500;
margin-left: 0.5rem;
}
.no-keys-message {
text-align: center;
padding: 3rem;
color: var(--text-secondary);
font-size: 1.125rem;
}
.pagination {
display: flex;
justify-content: center;
align-items: center;
gap: 1rem;
}
.modal {
display: none;
position: fixed;
z-index: 1000;
left: 0;
top: 0;
width: 100%;
height: 100%;
background-color: rgba(0, 0, 0, 0.5);
backdrop-filter: blur(4px);
}
.modal-content {
background-color: var(--surface);
margin: 5% auto;
padding: 0;
border-radius: var(--border-radius);
box-shadow: var(--shadow-lg);
width: 90%;
max-width: 600px;
max-height: 80vh;
overflow: hidden;
}
.modal-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 1.5rem;
border-bottom: 1px solid var(--border);
}
.modal-header h2 {
font-size: 1.5rem;
font-weight: 600;
}
.close {
font-size: 1.5rem;
font-weight: bold;
cursor: pointer;
color: var(--text-secondary);
padding: 0.5rem;
border-radius: var(--border-radius);
transition: all 0.2s ease;
}
.close:hover {
background-color: var(--background);
color: var(--text-primary);
}
.modal-body {
padding: 1.5rem;
max-height: 60vh;
overflow-y: auto;
}
.form-group {
margin-bottom: 1.5rem;
}
.form-group label {
display: block;
margin-bottom: 0.5rem;
font-weight: 500;
color: var(--text-primary);
}
.form-group input,
.form-group textarea {
width: 100%;
padding: 0.75rem;
border: 1px solid var(--border);
border-radius: var(--border-radius);
font-size: 1rem;
font-family: var(--font-family);
background: var(--surface);
color: var(--text-primary);
transition: border-color 0.2s ease;
}
.form-group input:focus,
.form-group textarea:focus {
outline: none;
border-color: var(--primary-color);
box-shadow: 0 0 0 3px rgba(37, 99, 235, 0.1);
}
.form-group textarea {
resize: vertical;
min-height: 120px;
font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace;
font-size: 0.875rem;
}
.read-only-field {
background-color: var(--background) !important;
cursor: not-allowed;
}
.form-actions {
display: flex;
justify-content: flex-end;
gap: 1rem;
margin-top: 2rem;
}
.loading-overlay {
display: none;
position: fixed;
z-index: 9999;
left: 0;
top: 0;
width: 100%;
height: 100%;
background-color: rgba(255, 255, 255, 0.9);
backdrop-filter: blur(4px);
}
.loading-spinner {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
width: 40px;
height: 40px;
border: 4px solid var(--border);
border-top: 4px solid var(--primary-color);
border-radius: 50%;
animation: spin 1s linear infinite;
}
.loading-text {
position: absolute;
top: 60%;
left: 50%;
transform: translate(-50%, -50%);
color: var(--text-secondary);
font-weight: 500;
}
@keyframes spin {
0% { transform: translate(-50%, -50%) rotate(0deg); }
100% { transform: translate(-50%, -50%) rotate(360deg); }
}
.toast-container {
position: fixed;
top: 1rem;
right: 1rem;
z-index: 10000;
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.toast {
padding: 1rem 1.5rem;
border-radius: var(--border-radius);
color: white;
font-weight: 500;
box-shadow: var(--shadow-lg);
transform: translateX(100%);
transition: transform 0.3s ease;
max-width: 400px;
display: flex;
align-items: center;
gap: 0.5rem;
}
.toast.show {
transform: translateX(0);
}
.toast.success {
background-color: var(--success-color);
}
.toast.error {
background-color: var(--danger-color);
}
.toast.warning {
background-color: var(--warning-color);
}
.toast.info {
background-color: var(--primary-color);
}
@media (max-width: 768px) {
.container {
padding: 1rem;
}
header {
flex-direction: column;
gap: 1rem;
align-items: stretch;
}
.header-title {
flex-direction: column;
align-items: flex-start;
gap: 0.5rem;
}
.header-title h1 {
font-size: 2rem;
}
.actions-panel {
flex-direction: column;
align-items: stretch;
gap: 1rem;
}
.filter-controls {
justify-content: center;
}
.search-box input {
width: 100%;
}
.keys-table-container {
overflow-x: auto;
}
.keys-table {
min-width: 600px;
}
.modal-content {
margin: 10% auto;
width: 95%;
}
.form-actions {
flex-direction: column;
}
.stats-panel {
grid-template-columns: 1fr;
}
}
/* Checkbox styles */
input[type="checkbox"] {
width: 1rem;
height: 1rem;
accent-color: var(--primary-color);
}
/* Indeterminate checkbox styling */
input[type="checkbox"]:indeterminate {
background-color: var(--primary-color);
background-image: linear-gradient(90deg, transparent 40%, white 40%, white 60%, transparent 60%);
border-color: var(--primary-color);
}
/* Action buttons in table */
.table-actions {
display: flex;
gap: 0.5rem;
align-items: center;
}
/* Error states */
.form-group input:invalid,
.form-group textarea:invalid {
border-color: var(--danger-color);
}
.form-group input:invalid:focus,
.form-group textarea:invalid:focus {
box-shadow: 0 0 0 3px rgba(220, 38, 38, 0.1);
}
/* Success states */
.form-group input:valid,
.form-group textarea:valid {
border-color: var(--success-color);
}
/* DNS Scan Modal Styles */
.modal-large {
max-width: 800px;
}
.scan-stats {
background: var(--background);
padding: 1rem;
border-radius: var(--border-radius);
margin-bottom: 1.5rem;
display: grid;
grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
gap: 1rem;
}
.scan-stat {
text-align: center;
}
.scan-stat-value {
display: block;
font-size: 1.5rem;
font-weight: 600;
color: var(--primary-color);
}
.scan-stat-label {
color: var(--text-secondary);
font-size: 0.875rem;
}
.unresolved-count {
color: var(--danger-color) !important;
}
.section-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 1rem;
}
.section-header h3 {
font-size: 1.125rem;
font-weight: 600;
color: var(--text-primary);
}
.host-list {
max-height: 300px;
overflow-y: auto;
border: 1px solid var(--border);
border-radius: var(--border-radius);
}
.host-item {
display: flex;
align-items: center;
padding: 0.75rem;
border-bottom: 1px solid var(--border);
transition: background-color 0.2s ease;
}
.host-item:last-child {
border-bottom: none;
}
.host-item:hover {
background-color: var(--background);
}
.host-item label {
display: flex;
align-items: center;
gap: 0.75rem;
flex: 1;
cursor: pointer;
margin: 0;
}
.host-name {
font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace;
font-weight: 500;
color: var(--text-primary);
}
.host-error {
font-size: 0.75rem;
color: var(--danger-color);
margin-left: auto;
max-width: 200px;
word-break: break-word;
}
.empty-state {
text-align: center;
padding: 2rem;
color: var(--text-secondary);
font-style: italic;
}
.scan-progress {
background: var(--background);
padding: 1rem;
border-radius: var(--border-radius);
margin-bottom: 1rem;
text-align: center;
}
.scan-progress-text {
color: var(--text-secondary);
margin-bottom: 0.5rem;
}
.progress-bar {
width: 100%;
height: 8px;
background: var(--border);
border-radius: 4px;
overflow: hidden;
}
.progress-fill {
height: 100%;
background: var(--primary-color);
transition: width 0.3s ease;
border-radius: 4px;
}