From 45ac3fca5163430af91732bc62224c3ef911fbee Mon Sep 17 00:00:00 2001 From: Ultradesu Date: Sat, 19 Jul 2025 12:56:25 +0300 Subject: [PATCH] Fixed web ui. Added deprecation feature --- Cargo.toml | 2 +- src/client.rs | 30 +- src/db.rs | 960 ++++++++++++++++++++++++++++------------------ src/main.rs | 10 +- src/server.rs | 115 +++--- src/web.rs | 112 +++--- static/index.html | 12 +- static/script.js | 222 +++++++++-- static/style.css | 125 ++++++ 9 files changed, 1055 insertions(+), 533 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index fd5b4ea..c13ae76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "khm" -version = "0.8.1" +version = "0.6.1" edition = "2021" authors = ["AB "] diff --git a/src/client.rs b/src/client.rs index 416efd2..1910144 100644 --- a/src/client.rs +++ b/src/client.rs @@ -28,10 +28,10 @@ fn read_known_hosts(file_path: &str) -> io::Result> { if parts.len() >= 2 { let server = parts[0].to_string(); let public_key = parts[1..].join(" "); - keys.push(SshKey { - server, + keys.push(SshKey { + server, public_key, - deprecated: false, // Keys from known_hosts are not deprecated + deprecated: false, // Keys from known_hosts are not deprecated }); } } @@ -55,7 +55,10 @@ fn write_known_hosts(file_path: &str, keys: &[SshKey]) -> io::Result<()> { for key in active_keys { writeln!(file, "{} {}", key.server, key.public_key)?; } - info!("Wrote {} active keys to known_hosts file (filtered out deprecated keys)", active_count); + info!( + "Wrote {} active keys to known_hosts file (filtered out deprecated keys)", + active_count + ); Ok(()) } @@ -172,12 +175,15 @@ async fn get_keys_from_server( pub async fn run_client(args: crate::Args) -> std::io::Result<()> { info!("Client mode: Reading known_hosts file"); - + let keys = match read_known_hosts(&args.known_hosts) { Ok(keys) => keys, Err(e) => { if e.kind() == io::ErrorKind::NotFound { - info!("known_hosts file not found: {}. Starting with empty key list.", args.known_hosts); + info!( + "known_hosts file not found: {}. Starting with empty key list.", + args.known_hosts + ); Vec::new() } else { error!("Failed to read known_hosts file: {}", e); @@ -188,10 +194,13 @@ pub async fn run_client(args: crate::Args) -> std::io::Result<()> { let host = args.host.expect("host is required in client mode"); info!("Client mode: Sending keys to server at {}", host); - + if let Err(e) = send_keys_to_server(&host, keys, &args.basic_auth).await { error!("Failed to send keys to server: {}", e); - return Err(io::Error::new(io::ErrorKind::Other, format!("Network error: {}", e))); + return Err(io::Error::new( + io::ErrorKind::Other, + format!("Network error: {}", e), + )); } if args.in_place { @@ -200,7 +209,10 @@ pub async fn run_client(args: crate::Args) -> std::io::Result<()> { Ok(keys) => keys, Err(e) => { error!("Failed to get keys from server: {}", e); - return Err(io::Error::new(io::ErrorKind::Other, format!("Network error: {}", e))); + return Err(io::Error::new( + io::ErrorKind::Other, + format!("Network error: {}", e), + )); } }; diff --git a/src/db.rs b/src/db.rs index 5e9d4ef..9ba4fa2 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,8 +1,10 @@ use crate::server::SshKey; -use log::info; +use log::{error, info, warn}; use std::collections::HashMap; use std::collections::HashSet; -use tokio_postgres::Client; +use tokio_postgres::tls::NoTlsStream; +use tokio_postgres::Socket; +use tokio_postgres::{Client, Connection, NoTls}; // Structure for storing key processing statistics pub struct KeyInsertStats { @@ -12,443 +14,663 @@ pub struct KeyInsertStats { pub key_id_map: Vec<(SshKey, i32)>, // Mapping of keys to their IDs in the database } -pub async fn initialize_db_schema(client: &Client) -> Result<(), tokio_postgres::Error> { - info!("Checking and initializing database schema if needed"); +// Simple database client that exits on connection errors +pub struct DbClient { + client: Client, +} - // Check if tables exist by querying information_schema - let tables_exist = client - .query( - "SELECT EXISTS ( - SELECT FROM information_schema.tables - WHERE table_schema = 'public' - AND table_name = 'keys' - ) AND EXISTS ( - SELECT FROM information_schema.tables - WHERE table_schema = 'public' - AND table_name = 'flows' - )", - &[], - ) - .await? - .get(0) - .map(|row| row.get::<_, bool>(0)) - .unwrap_or(false); +impl DbClient { + pub async fn connect( + connection_string: &str, + ) -> Result<(Self, Connection), tokio_postgres::Error> { + info!("Connecting to database..."); + let (client, connection) = tokio_postgres::connect(connection_string, NoTls).await?; + info!("Successfully connected to database"); - if !tables_exist { - info!("Database schema doesn't exist. Creating tables..."); + Ok((DbClient { client }, connection)) + } - // Create the keys table - client - .execute( - "CREATE TABLE IF NOT EXISTS public.keys ( - key_id SERIAL PRIMARY KEY, - host VARCHAR(255) NOT NULL, - key TEXT NOT NULL, - updated TIMESTAMP WITH TIME ZONE NOT NULL, - deprecated BOOLEAN NOT NULL DEFAULT FALSE, - CONSTRAINT unique_host_key UNIQUE (host, key) - )", - &[], - ) - .await?; + // Helper function to handle database errors - exits the application on connection errors + fn handle_db_error( + result: Result, + operation: &str, + ) -> Result { + match result { + Ok(value) => Ok(value), + Err(e) => { + if Self::is_connection_error(&e) { + error!("Database connection lost during {}: {}", operation, e); + error!("Exiting application due to database connection failure"); + std::process::exit(1); + } else { + // For non-connection errors, just return the error + Err(e) + } + } + } + } - // Create the flows table - client - .execute( - "CREATE TABLE IF NOT EXISTS public.flows ( - flow_id SERIAL PRIMARY KEY, - name VARCHAR(255) NOT NULL, - key_id INTEGER NOT NULL, - CONSTRAINT fk_key - FOREIGN KEY(key_id) - REFERENCES public.keys(key_id) - ON DELETE CASCADE, - CONSTRAINT unique_flow_key UNIQUE (name, key_id) - )", - &[], - ) - .await?; + fn is_connection_error(error: &tokio_postgres::Error) -> bool { + // Check if the error is related to connection issues + let error_str = error.to_string(); + error_str.contains("connection closed") + || error_str.contains("connection reset") + || error_str.contains("broken pipe") + || error_str.contains("Connection refused") + || error_str.contains("connection terminated") + || error.as_db_error().is_none() // Non-database errors are often connection issues + } - // Create an index for faster lookups - client - .execute( - "CREATE INDEX IF NOT EXISTS idx_flows_name ON public.flows(name)", - &[], - ) - .await?; + pub async fn initialize_schema(&self) -> Result<(), tokio_postgres::Error> { + info!("Checking and initializing database schema if needed"); - info!("Database schema created successfully"); - } else { - info!("Database schema already exists"); - - // Check if deprecated column exists, add it if missing (migration) - let column_exists = client + // Check if tables exist by querying information_schema + let result = self + .client .query( "SELECT EXISTS ( - SELECT FROM information_schema.columns + SELECT FROM information_schema.tables WHERE table_schema = 'public' - AND table_name = 'keys' - AND column_name = 'deprecated' + AND table_name = 'keys' + ) AND EXISTS ( + SELECT FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = 'flows' )", &[], ) - .await? + .await; + + let tables_exist = Self::handle_db_error(result, "checking table existence")? .get(0) .map(|row| row.get::<_, bool>(0)) .unwrap_or(false); - - if !column_exists { - info!("Adding deprecated column to existing keys table..."); - client + + if !tables_exist { + info!("Database schema doesn't exist. Creating tables..."); + + // Create the keys table + let result = self + .client .execute( - "ALTER TABLE public.keys ADD COLUMN deprecated BOOLEAN NOT NULL DEFAULT FALSE", + "CREATE TABLE IF NOT EXISTS public.keys ( + key_id SERIAL PRIMARY KEY, + host VARCHAR(255) NOT NULL, + key TEXT NOT NULL, + updated TIMESTAMP WITH TIME ZONE NOT NULL, + deprecated BOOLEAN NOT NULL DEFAULT FALSE, + CONSTRAINT unique_host_key UNIQUE (host, key) + )", &[], ) - .await?; - info!("Migration completed: deprecated column added"); - } - } + .await; + Self::handle_db_error(result, "creating keys table")?; - Ok(()) -} + // Create the flows table + let result = self + .client + .execute( + "CREATE TABLE IF NOT EXISTS public.flows ( + flow_id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + key_id INTEGER NOT NULL, + CONSTRAINT fk_key + FOREIGN KEY(key_id) + REFERENCES public.keys(key_id) + ON DELETE CASCADE, + CONSTRAINT unique_flow_key UNIQUE (name, key_id) + )", + &[], + ) + .await; + Self::handle_db_error(result, "creating flows table")?; -pub async fn batch_insert_keys( - client: &Client, - keys: &[SshKey], -) -> Result { - if keys.is_empty() { - return Ok(KeyInsertStats { - total: 0, - inserted: 0, - unchanged: 0, - key_id_map: Vec::new(), - }); - } + // Create an index for faster lookups + let result = self + .client + .execute( + "CREATE INDEX IF NOT EXISTS idx_flows_name ON public.flows(name)", + &[], + ) + .await; + Self::handle_db_error(result, "creating index")?; - // Prepare arrays for batch insertion - let mut host_values: Vec<&str> = Vec::with_capacity(keys.len()); - let mut key_values: Vec<&str> = Vec::with_capacity(keys.len()); - - for key in keys { - host_values.push(&key.server); - key_values.push(&key.public_key); - } - - // First, check which keys already exist in the database (including deprecated status) - let mut existing_keys = HashMap::new(); - let mut key_query = String::from("SELECT host, key, key_id, deprecated FROM public.keys WHERE "); - - for i in 0..keys.len() { - if i > 0 { - key_query.push_str(" OR "); - } - key_query.push_str(&format!("(host = ${} AND key = ${})", i * 2 + 1, i * 2 + 2)); - } - - let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = - Vec::with_capacity(keys.len() * 2); - for i in 0..keys.len() { - params.push(&host_values[i]); - params.push(&key_values[i]); - } - - let rows = client.query(&key_query, ¶ms[..]).await?; - - for row in rows { - let host: String = row.get(0); - let key: String = row.get(1); - let key_id: i32 = row.get(2); - let deprecated: bool = row.get(3); - existing_keys.insert((host, key), (key_id, deprecated)); - } - - // Determine which keys need to be inserted and which already exist - let mut keys_to_insert = Vec::new(); - let mut unchanged_keys = Vec::new(); - let mut ignored_deprecated = 0; - - for key in keys { - let key_tuple = (key.server.clone(), key.public_key.clone()); - if let Some((key_id, is_deprecated)) = existing_keys.get(&key_tuple) { - if *is_deprecated { - // Ignore deprecated keys - don't add them to any flow - ignored_deprecated += 1; - } else { - // Key exists and is not deprecated - add to unchanged - unchanged_keys.push((key.clone(), *key_id)); - } + info!("Database schema created successfully"); } else { - // Key doesn't exist - add to insert list - keys_to_insert.push(key.clone()); + info!("Database schema already exists"); + + // Check if deprecated column exists, add it if missing (migration) + let result = self + .client + .query( + "SELECT EXISTS ( + SELECT FROM information_schema.columns + WHERE table_schema = 'public' + AND table_name = 'keys' + AND column_name = 'deprecated' + )", + &[], + ) + .await; + + let column_exists = Self::handle_db_error(result, "checking deprecated column")? + .get(0) + .map(|row| row.get::<_, bool>(0)) + .unwrap_or(false); + + if !column_exists { + info!("Adding deprecated column to existing keys table..."); + let result = self.client + .execute( + "ALTER TABLE public.keys ADD COLUMN deprecated BOOLEAN NOT NULL DEFAULT FALSE", + &[], + ) + .await; + Self::handle_db_error(result, "adding deprecated column")?; + info!("Migration completed: deprecated column added"); + } } + + Ok(()) } - let mut inserted_keys = Vec::new(); + pub async fn batch_insert_keys( + &self, + keys: &[SshKey], + ) -> Result { + if keys.is_empty() { + return Ok(KeyInsertStats { + total: 0, + inserted: 0, + unchanged: 0, + key_id_map: Vec::new(), + }); + } - // If there are keys to insert, perform the insertion - if !keys_to_insert.is_empty() { - let mut insert_sql = String::from("INSERT INTO public.keys (host, key, updated) VALUES "); + // Prepare arrays for batch insertion + let mut host_values: Vec<&str> = Vec::with_capacity(keys.len()); + let mut key_values: Vec<&str> = Vec::with_capacity(keys.len()); - let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = Vec::new(); - let mut param_count = 1; + for key in keys { + host_values.push(&key.server); + key_values.push(&key.public_key); + } - for (i, key) in keys_to_insert.iter().enumerate() { + // First, check which keys already exist in the database (including deprecated status) + let mut existing_keys = HashMap::new(); + let mut key_query = + String::from("SELECT host, key, key_id, deprecated FROM public.keys WHERE "); + + for i in 0..keys.len() { if i > 0 { - insert_sql.push_str(", "); + key_query.push_str(" OR "); } - insert_sql.push_str(&format!("(${}, ${}, NOW())", param_count, param_count + 1)); - insert_params.push(&key.server); - insert_params.push(&key.public_key); - param_count += 2; + key_query.push_str(&format!("(host = ${} AND key = ${})", i * 2 + 1, i * 2 + 2)); } - insert_sql.push_str(" RETURNING key_id, host, key"); + let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = + Vec::with_capacity(keys.len() * 2); + for i in 0..keys.len() { + params.push(&host_values[i]); + params.push(&key_values[i]); + } - let inserted_rows = client.query(&insert_sql, &insert_params[..]).await?; + let result = self.client.query(&key_query, ¶ms[..]).await; + let rows = Self::handle_db_error(result, "checking existing keys")?; - for row in inserted_rows { - let host: String = row.get(1); - let key_text: String = row.get(2); - let key_id: i32 = row.get(0); + for row in rows { + let host: String = row.get(0); + let key: String = row.get(1); + let key_id: i32 = row.get(2); + let deprecated: bool = row.get(3); + existing_keys.insert((host, key), (key_id, deprecated)); + } - if let Some(orig_key) = keys_to_insert - .iter() - .find(|k| k.server == host && k.public_key == key_text) - { - inserted_keys.push((orig_key.clone(), key_id)); + // Determine which keys need to be inserted and which already exist + let mut keys_to_insert = Vec::new(); + let mut unchanged_keys = Vec::new(); + let mut ignored_deprecated = 0; + + for key in keys { + let key_tuple = (key.server.clone(), key.public_key.clone()); + if let Some((key_id, is_deprecated)) = existing_keys.get(&key_tuple) { + if *is_deprecated { + // Ignore deprecated keys - don't add them to any flow + ignored_deprecated += 1; + } else { + // Key exists and is not deprecated - add to unchanged + unchanged_keys.push((key.clone(), *key_id)); + } + } else { + // Key doesn't exist - add to insert list + keys_to_insert.push(key.clone()); } } - } - // Save the number of elements before combining - let inserted_count = inserted_keys.len(); - let unchanged_count = unchanged_keys.len(); + let mut inserted_keys = Vec::new(); - // Combine results and generate statistics - let mut key_id_map = Vec::with_capacity(unchanged_count + inserted_count); - key_id_map.extend(unchanged_keys); - key_id_map.extend(inserted_keys); + // If there are keys to insert, perform the insertion + if !keys_to_insert.is_empty() { + let mut insert_sql = + String::from("INSERT INTO public.keys (host, key, updated) VALUES "); - let stats = KeyInsertStats { - total: keys.len(), - inserted: inserted_count, - unchanged: unchanged_count, - key_id_map, - }; + let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = Vec::new(); + let mut param_count = 1; - info!( - "Keys stats: received={}, new={}, unchanged={}, ignored_deprecated={}", - stats.total, stats.inserted, stats.unchanged, ignored_deprecated - ); + for (i, key) in keys_to_insert.iter().enumerate() { + if i > 0 { + insert_sql.push_str(", "); + } + insert_sql.push_str(&format!("(${}, ${}, NOW())", param_count, param_count + 1)); + insert_params.push(&key.server); + insert_params.push(&key.public_key); + param_count += 2; + } - Ok(stats) -} + insert_sql.push_str(" RETURNING key_id, host, key"); -pub async fn batch_insert_flow_keys( - client: &Client, - flow_name: &str, - key_ids: &[i32], -) -> Result { - if key_ids.is_empty() { - info!("No keys to associate with flow '{}'", flow_name); - return Ok(0); - } + let result = self.client.query(&insert_sql, &insert_params[..]).await; + let inserted_rows = Self::handle_db_error(result, "inserting keys")?; - // First, check which associations already exist - let mut existing_query = - String::from("SELECT key_id FROM public.flows WHERE name = $1 AND key_id IN ("); + for row in inserted_rows { + let host: String = row.get(1); + let key_text: String = row.get(2); + let key_id: i32 = row.get(0); - for i in 0..key_ids.len() { - if i > 0 { - existing_query.push_str(", "); + if let Some(orig_key) = keys_to_insert + .iter() + .find(|k| k.server == host && k.public_key == key_text) + { + inserted_keys.push((orig_key.clone(), key_id)); + } + } } - existing_query.push_str(&format!("${}", i + 2)); - } - existing_query.push_str(")"); - let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = - Vec::with_capacity(key_ids.len() + 1); - params.push(&flow_name); - for key_id in key_ids { - params.push(key_id); - } + // Save the number of elements before combining + let inserted_count = inserted_keys.len(); + let unchanged_count = unchanged_keys.len(); - let rows = client.query(&existing_query, ¶ms[..]).await?; + // Combine results and generate statistics + let mut key_id_map = Vec::with_capacity(unchanged_count + inserted_count); + key_id_map.extend(unchanged_keys); + key_id_map.extend(inserted_keys); - let mut existing_associations = HashSet::new(); - for row in rows { - let key_id: i32 = row.get(0); - existing_associations.insert(key_id); - } + let stats = KeyInsertStats { + total: keys.len(), + inserted: inserted_count, + unchanged: unchanged_count, + key_id_map, + }; - // Filter only keys that are not yet associated with the flow - let new_key_ids: Vec<&i32> = key_ids - .iter() - .filter(|&id| !existing_associations.contains(id)) - .collect(); - - if new_key_ids.is_empty() { info!( - "All {} keys are already associated with flow '{}'", - key_ids.len(), - flow_name + "Keys stats: received={}, new={}, unchanged={}, ignored_deprecated={}", + stats.total, stats.inserted, stats.unchanged, ignored_deprecated ); - return Ok(0); + + Ok(stats) } - // Build SQL query with multiple values only for new associations - let mut sql = String::from("INSERT INTO public.flows (name, key_id) VALUES "); - - for i in 0..new_key_ids.len() { - if i > 0 { - sql.push_str(", "); + pub async fn batch_insert_flow_keys( + &self, + flow_name: &str, + key_ids: &[i32], + ) -> Result { + if key_ids.is_empty() { + info!("No keys to associate with flow '{}'", flow_name); + return Ok(0); } - sql.push_str(&format!("($1, ${})", i + 2)); + + // First, check which associations already exist + let mut existing_query = + String::from("SELECT key_id FROM public.flows WHERE name = $1 AND key_id IN ("); + + for i in 0..key_ids.len() { + if i > 0 { + existing_query.push_str(", "); + } + existing_query.push_str(&format!("${}", i + 2)); + } + existing_query.push_str(")"); + + let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = + Vec::with_capacity(key_ids.len() + 1); + params.push(&flow_name); + for key_id in key_ids { + params.push(key_id); + } + + let result = self.client.query(&existing_query, ¶ms[..]).await; + let rows = Self::handle_db_error(result, "checking existing flow associations")?; + + let mut existing_associations = HashSet::new(); + for row in rows { + let key_id: i32 = row.get(0); + existing_associations.insert(key_id); + } + + // Filter only keys that are not yet associated with the flow + let new_key_ids: Vec<&i32> = key_ids + .iter() + .filter(|&id| !existing_associations.contains(id)) + .collect(); + + if new_key_ids.is_empty() { + info!( + "All {} keys are already associated with flow '{}'", + key_ids.len(), + flow_name + ); + return Ok(0); + } + + // Build SQL query with multiple values only for new associations + let mut sql = String::from("INSERT INTO public.flows (name, key_id) VALUES "); + + for i in 0..new_key_ids.len() { + if i > 0 { + sql.push_str(", "); + } + sql.push_str(&format!("($1, ${})", i + 2)); + } + + sql.push_str(" ON CONFLICT (name, key_id) DO NOTHING"); + + // Prepare parameters for the query + let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = + Vec::with_capacity(new_key_ids.len() + 1); + insert_params.push(&flow_name); + for key_id in &new_key_ids { + insert_params.push(*key_id); + } + + // Execute query + let result = self.client.execute(&sql, &insert_params[..]).await; + let affected = Self::handle_db_error(result, "inserting flow associations")?; + + let affected_usize = affected as usize; + + info!( + "Added {} new key-flow associations for flow '{}' (skipped {} existing)", + affected_usize, + flow_name, + existing_associations.len() + ); + + Ok(affected_usize) } - sql.push_str(" ON CONFLICT (name, key_id) DO NOTHING"); + pub async fn get_keys_from_db( + &self, + ) -> Result, tokio_postgres::Error> { + let result = self.client.query( + "SELECT k.host, k.key, k.deprecated, f.name FROM public.keys k INNER JOIN public.flows f ON k.key_id = f.key_id", + &[] + ).await; + let rows = Self::handle_db_error(result, "getting keys from database")?; - // Prepare parameters for the query - let mut insert_params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = - Vec::with_capacity(new_key_ids.len() + 1); - insert_params.push(&flow_name); - for key_id in &new_key_ids { - insert_params.push(*key_id); + let mut flows_map: HashMap = HashMap::new(); + + for row in rows { + let host: String = row.get(0); + let key: String = row.get(1); + let deprecated: bool = row.get(2); + let flow: String = row.get(3); + + let ssh_key = SshKey { + server: host, + public_key: key, + deprecated, + }; + + if let Some(flow_entry) = flows_map.get_mut(&flow) { + flow_entry.servers.push(ssh_key); + } else { + flows_map.insert( + flow.clone(), + crate::server::Flow { + name: flow, + servers: vec![ssh_key], + }, + ); + } + } + + info!("Retrieved {} flows from database", flows_map.len()); + Ok(flows_map.into_values().collect()) } - // Execute query - let affected = client.execute(&sql, &insert_params[..]).await?; - - let affected_usize = affected as usize; - - info!( - "Added {} new key-flow associations for flow '{}' (skipped {} existing)", - affected_usize, - flow_name, - existing_associations.len() - ); - - Ok(affected_usize) -} - -// Function to deprecate keys instead of deleting them -pub async fn deprecate_key_by_server( - client: &Client, - server_name: &str, - flow_name: &str, -) -> Result { - // Update keys to deprecated status for the given server - let affected = client - .execute( - "UPDATE public.keys - SET deprecated = TRUE, updated = NOW() - WHERE host = $1 - AND key_id IN ( - SELECT key_id FROM public.flows WHERE name = $2 - )", - &[&server_name, &flow_name], - ) - .await?; - - info!( - "Deprecated {} key(s) for server '{}' in flow '{}'", - affected, server_name, flow_name - ); - - Ok(affected) -} - -// Function to restore deprecated key back to active -pub async fn restore_key_by_server( - client: &Client, - server_name: &str, - flow_name: &str, -) -> Result { - // Update keys to active status for the given server in the flow - let affected = client - .execute( - "UPDATE public.keys - SET deprecated = FALSE, updated = NOW() - WHERE host = $1 - AND deprecated = TRUE - AND key_id IN ( - SELECT key_id FROM public.flows WHERE name = $2 - )", - &[&server_name, &flow_name], - ) - .await?; - - info!( - "Restored {} key(s) for server '{}' in flow '{}'", - affected, server_name, flow_name - ); - - Ok(affected) -} - -// Function to permanently delete keys from database -pub async fn permanently_delete_key_by_server( - client: &Client, - server_name: &str, - flow_name: &str, -) -> Result { - // First, find the key_ids for the given server in the flow - let key_rows = client - .query( - "SELECT k.key_id FROM public.keys k - INNER JOIN public.flows f ON k.key_id = f.key_id - WHERE k.host = $1 AND f.name = $2", - &[&server_name, &flow_name] - ) - .await?; - - if key_rows.is_empty() { - return Ok(0); - } - - let key_ids: Vec = key_rows.iter().map(|row| row.get::<_, i32>(0)).collect(); - - // Delete flow associations first - let mut flow_delete_count = 0; - for key_id in &key_ids { - let deleted = client + pub async fn deprecate_key_by_server( + &self, + server_name: &str, + flow_name: &str, + ) -> Result { + // Update keys to deprecated status for the given server + let result = self + .client .execute( - "DELETE FROM public.flows WHERE name = $1 AND key_id = $2", - &[&flow_name, key_id], + "UPDATE public.keys + SET deprecated = TRUE, updated = NOW() + WHERE host = $1 + AND key_id IN ( + SELECT key_id FROM public.flows WHERE name = $2 + )", + &[&server_name, &flow_name], ) - .await?; - flow_delete_count += deleted; + .await; + let affected = Self::handle_db_error(result, "deprecating key")?; + + info!( + "Deprecated {} key(s) for server '{}' in flow '{}'", + affected, server_name, flow_name + ); + + Ok(affected) } - // Check if any of these keys are used in other flows - let mut keys_to_delete = Vec::new(); - for key_id in &key_ids { - let count: i64 = client - .query_one( - "SELECT COUNT(*) FROM public.flows WHERE key_id = $1", - &[key_id], + pub async fn restore_key_by_server( + &self, + server_name: &str, + flow_name: &str, + ) -> Result { + // Update keys to active status for the given server in the flow + let result = self + .client + .execute( + "UPDATE public.keys + SET deprecated = FALSE, updated = NOW() + WHERE host = $1 + AND deprecated = TRUE + AND key_id IN ( + SELECT key_id FROM public.flows WHERE name = $2 + )", + &[&server_name, &flow_name], ) - .await? - .get(0); + .await; + let affected = Self::handle_db_error(result, "restoring key")?; - if count == 0 { - keys_to_delete.push(*key_id); + info!( + "Restored {} key(s) for server '{}' in flow '{}'", + affected, server_name, flow_name + ); + + Ok(affected) + } + + pub async fn permanently_delete_key_by_server( + &self, + server_name: &str, + flow_name: &str, + ) -> Result { + // First, find the key_ids for the given server in the flow + let result = self + .client + .query( + "SELECT k.key_id FROM public.keys k + INNER JOIN public.flows f ON k.key_id = f.key_id + WHERE k.host = $1 AND f.name = $2", + &[&server_name, &flow_name], + ) + .await; + let key_rows = Self::handle_db_error(result, "finding keys to delete")?; + + if key_rows.is_empty() { + return Ok(0); + } + + let key_ids: Vec = key_rows.iter().map(|row| row.get::<_, i32>(0)).collect(); + + // Delete flow associations first + let mut flow_delete_count = 0; + for key_id in &key_ids { + let result = self + .client + .execute( + "DELETE FROM public.flows WHERE name = $1 AND key_id = $2", + &[&flow_name, key_id], + ) + .await; + let deleted = Self::handle_db_error(result, "deleting flow association")?; + flow_delete_count += deleted; + } + + // Check if any of these keys are used in other flows + let mut keys_to_delete = Vec::new(); + for key_id in &key_ids { + let result = self + .client + .query_one( + "SELECT COUNT(*) FROM public.flows WHERE key_id = $1", + &[key_id], + ) + .await; + let count: i64 = Self::handle_db_error(result, "checking key references")?.get(0); + + if count == 0 { + keys_to_delete.push(*key_id); + } + } + + // Permanently delete keys that are no longer referenced by any flow + let mut total_deleted = 0; + for key_id in keys_to_delete { + let result = self + .client + .execute("DELETE FROM public.keys WHERE key_id = $1", &[&key_id]) + .await; + let deleted = Self::handle_db_error(result, "deleting key")?; + total_deleted += deleted; + } + + info!( + "Permanently deleted {} flow associations and {} orphaned keys for server '{}' in flow '{}'", + flow_delete_count, total_deleted, server_name, flow_name + ); + + Ok(std::cmp::max(flow_delete_count, total_deleted)) + } +} + +// Compatibility wrapper for transition +pub struct ReconnectingDbClient { + inner: Option, +} + +impl ReconnectingDbClient { + pub fn new(_connection_string: String) -> Self { + Self { inner: None } + } + + pub async fn connect(&mut self, connection_string: &str) -> Result<(), tokio_postgres::Error> { + let (client, connection) = DbClient::connect(connection_string).await?; + + // Spawn connection handler that will exit on error + tokio::spawn(async move { + if let Err(e) = connection.await { + error!("Database connection error: {}", e); + error!("Exiting application due to database connection failure"); + std::process::exit(1); + } + }); + + self.inner = Some(client); + Ok(()) + } + + pub async fn initialize_schema(&self) -> Result<(), tokio_postgres::Error> { + match &self.inner { + Some(client) => client.initialize_schema().await, + None => panic!("Database client not initialized"), } } - // Permanently delete keys that are no longer referenced by any flow - let mut total_deleted = 0; - for key_id in keys_to_delete { - let deleted = client - .execute("DELETE FROM public.keys WHERE key_id = $1", &[&key_id]) - .await?; - total_deleted += deleted; + pub async fn batch_insert_keys_reconnecting( + &self, + keys: Vec, + ) -> Result { + match &self.inner { + Some(client) => client.batch_insert_keys(&keys).await, + None => panic!("Database client not initialized"), + } } - info!( - "Permanently deleted {} flow associations and {} orphaned keys for server '{}' in flow '{}'", - flow_delete_count, total_deleted, server_name, flow_name - ); + pub async fn batch_insert_flow_keys_reconnecting( + &self, + flow_name: String, + key_ids: Vec, + ) -> Result { + match &self.inner { + Some(client) => client.batch_insert_flow_keys(&flow_name, &key_ids).await, + None => panic!("Database client not initialized"), + } + } - Ok(std::cmp::max(flow_delete_count, total_deleted)) + pub async fn get_keys_from_db_reconnecting( + &self, + ) -> Result, tokio_postgres::Error> { + match &self.inner { + Some(client) => client.get_keys_from_db().await, + None => panic!("Database client not initialized"), + } + } + + pub async fn deprecate_key_by_server_reconnecting( + &self, + server_name: String, + flow_name: String, + ) -> Result { + match &self.inner { + Some(client) => { + client + .deprecate_key_by_server(&server_name, &flow_name) + .await + } + None => panic!("Database client not initialized"), + } + } + + pub async fn restore_key_by_server_reconnecting( + &self, + server_name: String, + flow_name: String, + ) -> Result { + match &self.inner { + Some(client) => client.restore_key_by_server(&server_name, &flow_name).await, + None => panic!("Database client not initialized"), + } + } + + pub async fn permanently_delete_key_by_server_reconnecting( + &self, + server_name: String, + flow_name: String, + ) -> Result { + match &self.inner { + Some(client) => { + client + .permanently_delete_key_by_server(&server_name, &flow_name) + .await + } + None => panic!("Database client not initialized"), + } + } } diff --git a/src/main.rs b/src/main.rs index cfaaab3..de83045 100644 --- a/src/main.rs +++ b/src/main.rs @@ -126,8 +126,14 @@ async fn main() -> std::io::Result<()> { eprintln!("Error: You must specify either server mode (--server) or client mode (--host)"); eprintln!(); eprintln!("Examples:"); - eprintln!(" Server mode: {} --server --db-user admin --db-password pass --flows work,home", env!("CARGO_PKG_NAME")); - eprintln!(" Client mode: {} --host https://khm.example.com/work", env!("CARGO_PKG_NAME")); + eprintln!( + " Server mode: {} --server --db-user admin --db-password pass --flows work,home", + env!("CARGO_PKG_NAME") + ); + eprintln!( + " Client mode: {} --host https://khm.example.com/work", + env!("CARGO_PKG_NAME") + ); eprintln!(); eprintln!("Use --help for more information."); std::process::exit(1); diff --git a/src/server.rs b/src/server.rs index 39147cc..d1d2f8a 100644 --- a/src/server.rs +++ b/src/server.rs @@ -2,11 +2,9 @@ use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer, Responder}; use log::{error, info}; use regex::Regex; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; use std::sync::{Arc, Mutex}; -use tokio_postgres::{Client, NoTls}; -use crate::db; +use crate::db::ReconnectingDbClient; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct SshKey { @@ -37,43 +35,6 @@ pub fn is_valid_ssh_key(key: &str) -> bool { || ed25519_re.is_match(key) } -pub async fn get_keys_from_db(client: &Client) -> Result, tokio_postgres::Error> { - let rows = client.query( - "SELECT k.host, k.key, k.deprecated, f.name FROM public.keys k INNER JOIN public.flows f ON k.key_id = f.key_id", - &[] - ).await?; - - let mut flows_map: HashMap = HashMap::new(); - - for row in rows { - let host: String = row.get(0); - let key: String = row.get(1); - let deprecated: bool = row.get(2); - let flow: String = row.get(3); - - let ssh_key = SshKey { - server: host, - public_key: key, - deprecated, - }; - - if let Some(flow_entry) = flows_map.get_mut(&flow) { - flow_entry.servers.push(ssh_key); - } else { - flows_map.insert( - flow.clone(), - Flow { - name: flow, - servers: vec![ssh_key], - }, - ); - } - } - - info!("Retrieved {} flows from database", flows_map.len()); - Ok(flows_map.into_values().collect()) -} - // Extract client hostname from request headers fn get_client_hostname(req: &HttpRequest) -> String { if let Some(hostname) = req.headers().get("X-Client-Hostname") { @@ -110,10 +71,11 @@ pub async fn get_keys( let flows = flows.lock().unwrap(); if let Some(flow) = flows.iter().find(|flow| flow.name == flow_id_str) { // Check if we should include deprecated keys (default: false for CLI clients) - let include_deprecated = query.get("include_deprecated") + let include_deprecated = query + .get("include_deprecated") .map(|v| v == "true") .unwrap_or(false); - + let servers: Vec<&SshKey> = if include_deprecated { // Return all keys (for web interface) flow.servers.iter().collect() @@ -121,7 +83,7 @@ pub async fn get_keys( // Return only active keys (for CLI clients) flow.servers.iter().filter(|key| !key.deprecated).collect() }; - + info!( "Returning {} keys ({} total, deprecated filtered: {}) for flow '{}' to client '{}'", servers.len(), @@ -144,7 +106,7 @@ pub async fn add_keys( flows: web::Data, flow_id: web::Path, new_keys: web::Json>, - db_client: web::Data>, + db_client: web::Data>, allowed_flows: web::Data>, req: HttpRequest, ) -> impl Responder { @@ -190,7 +152,10 @@ pub async fn add_keys( ); // Batch insert keys with statistics - let key_stats = match crate::db::batch_insert_keys(&db_client, &valid_keys).await { + let key_stats = match db_client + .batch_insert_keys_reconnecting(valid_keys.clone()) + .await + { Ok(stats) => stats, Err(e) => { error!( @@ -208,7 +173,9 @@ pub async fn add_keys( let key_ids: Vec = key_stats.key_id_map.iter().map(|(_, id)| *id).collect(); // Batch insert key-flow associations - if let Err(e) = crate::db::batch_insert_flow_keys(&db_client, &flow_id_str, &key_ids).await + if let Err(e) = db_client + .batch_insert_flow_keys_reconnecting(flow_id_str.clone(), key_ids.clone()) + .await { error!( "Failed to batch insert flow keys from client '{}' into database: {}", @@ -232,7 +199,7 @@ pub async fn add_keys( } // Get updated data - let updated_flows = match get_keys_from_db(&db_client).await { + let updated_flows = match db_client.get_keys_from_db_reconnecting().await { Ok(flows) => flows, Err(e) => { error!( @@ -287,28 +254,22 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> { args.db_host, db_user, db_password, args.db_name ); - info!("Connecting to database at {}", args.db_host); - let (db_client, connection) = match tokio_postgres::connect(&db_conn_str, NoTls).await { - Ok((client, conn)) => (client, conn), - Err(e) => { - error!("Failed to connect to the database: {}", e); - return Err(std::io::Error::new( - std::io::ErrorKind::ConnectionRefused, - format!("Database connection error: {}", e), - )); - } - }; - let db_client = Arc::new(db_client); + info!("Creating database client for {}", args.db_host); + let mut db_client_temp = ReconnectingDbClient::new(db_conn_str.clone()); - // Spawn a new thread to run the database connection - tokio::spawn(async move { - if let Err(e) = connection.await { - error!("Connection error: {}", e); - } - }); + // Initial connection + if let Err(e) = db_client_temp.connect(&db_conn_str).await { + error!("Failed to connect to the database: {}", e); + return Err(std::io::Error::new( + std::io::ErrorKind::ConnectionRefused, + format!("Database connection error: {}", e), + )); + } + + let db_client = Arc::new(db_client_temp); // Initialize database schema if needed - if let Err(e) = db::initialize_db_schema(&db_client).await { + if let Err(e) = db_client.initialize_schema().await { error!("Failed to initialize database schema: {}", e); return Err(std::io::Error::new( std::io::ErrorKind::Other, @@ -316,7 +277,7 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> { )); } - let mut initial_flows = match get_keys_from_db(&db_client).await { + let mut initial_flows = match db_client.get_keys_from_db_reconnecting().await { Ok(flows) => flows, Err(e) => { error!("Failed to get initial flows from database: {}", e); @@ -345,15 +306,27 @@ pub async fn run_server(args: crate::Args) -> std::io::Result<()> { .app_data(allowed_flows.clone()) // API routes .route("/api/flows", web::get().to(crate::web::get_flows_api)) - .route("/{flow_id}/keys/{server}", web::delete().to(crate::web::delete_key_by_server)) - .route("/{flow_id}/keys/{server}/restore", web::post().to(crate::web::restore_key_by_server)) - .route("/{flow_id}/keys/{server}/delete", web::delete().to(crate::web::permanently_delete_key_by_server)) + .route( + "/{flow_id}/keys/{server}", + web::delete().to(crate::web::delete_key_by_server), + ) + .route( + "/{flow_id}/keys/{server}/restore", + web::post().to(crate::web::restore_key_by_server), + ) + .route( + "/{flow_id}/keys/{server}/delete", + web::delete().to(crate::web::permanently_delete_key_by_server), + ) // Original API routes .route("/{flow_id}/keys", web::get().to(get_keys)) .route("/{flow_id}/keys", web::post().to(add_keys)) // Web interface routes .route("/", web::get().to(crate::web::serve_web_interface)) - .route("/static/{filename:.*}", web::get().to(crate::web::serve_static_file)) + .route( + "/static/{filename:.*}", + web::get().to(crate::web::serve_static_file), + ) }) .bind((args.ip.as_str(), args.port))? .run() diff --git a/src/web.rs b/src/web.rs index 34ab83f..137087a 100644 --- a/src/web.rs +++ b/src/web.rs @@ -2,9 +2,10 @@ use actix_web::{web, HttpResponse, Result}; use log::info; use rust_embed::RustEmbed; use serde_json::json; -use tokio_postgres::Client; +use std::sync::Arc; -use crate::server::{get_keys_from_db, Flows}; +use crate::db::ReconnectingDbClient; +use crate::server::Flows; #[derive(RustEmbed)] #[folder = "static/"] @@ -20,12 +21,15 @@ pub async fn get_flows_api(allowed_flows: web::Data>) -> Result, path: web::Path<(String, String)>, - db_client: web::Data>, + db_client: web::Data>, allowed_flows: web::Data>, ) -> Result { let (flow_id_str, server_name) = path.into_inner(); - info!("API request to deprecate key for server '{}' in flow '{}'", server_name, flow_id_str); + info!( + "API request to deprecate key for server '{}' in flow '{}'", + server_name, flow_id_str + ); if !allowed_flows.contains(&flow_id_str) { return Ok(HttpResponse::Forbidden().json(json!({ @@ -34,13 +38,19 @@ pub async fn delete_key_by_server( } // Deprecate in database - match crate::db::deprecate_key_by_server(&db_client, &server_name, &flow_id_str).await { + match db_client + .deprecate_key_by_server_reconnecting(server_name.clone(), flow_id_str.clone()) + .await + { Ok(deprecated_count) => { if deprecated_count > 0 { - info!("Deprecated {} key(s) for server '{}' in flow '{}'", deprecated_count, server_name, flow_id_str); - + info!( + "Deprecated {} key(s) for server '{}' in flow '{}'", + deprecated_count, server_name, flow_id_str + ); + // Refresh the in-memory flows - let updated_flows = match get_keys_from_db(&db_client).await { + let updated_flows = match db_client.get_keys_from_db_reconnecting().await { Ok(flows) => flows, Err(e) => { return Ok(HttpResponse::InternalServerError().json(json!({ @@ -62,11 +72,9 @@ pub async fn delete_key_by_server( }))) } } - Err(e) => { - Ok(HttpResponse::InternalServerError().json(json!({ - "error": format!("Failed to deprecate key: {}", e) - }))) - } + Err(e) => Ok(HttpResponse::InternalServerError().json(json!({ + "error": format!("Failed to deprecate key: {}", e) + }))), } } @@ -74,12 +82,15 @@ pub async fn delete_key_by_server( pub async fn restore_key_by_server( flows: web::Data, path: web::Path<(String, String)>, - db_client: web::Data>, + db_client: web::Data>, allowed_flows: web::Data>, ) -> Result { let (flow_id_str, server_name) = path.into_inner(); - info!("API request to restore key for server '{}' in flow '{}'", server_name, flow_id_str); + info!( + "API request to restore key for server '{}' in flow '{}'", + server_name, flow_id_str + ); if !allowed_flows.contains(&flow_id_str) { return Ok(HttpResponse::Forbidden().json(json!({ @@ -88,13 +99,19 @@ pub async fn restore_key_by_server( } // Restore in database - match crate::db::restore_key_by_server(&db_client, &server_name, &flow_id_str).await { + match db_client + .restore_key_by_server_reconnecting(server_name.clone(), flow_id_str.clone()) + .await + { Ok(restored_count) => { if restored_count > 0 { - info!("Restored {} key(s) for server '{}' in flow '{}'", restored_count, server_name, flow_id_str); - + info!( + "Restored {} key(s) for server '{}' in flow '{}'", + restored_count, server_name, flow_id_str + ); + // Refresh the in-memory flows - let updated_flows = match get_keys_from_db(&db_client).await { + let updated_flows = match db_client.get_keys_from_db_reconnecting().await { Ok(flows) => flows, Err(e) => { return Ok(HttpResponse::InternalServerError().json(json!({ @@ -116,11 +133,9 @@ pub async fn restore_key_by_server( }))) } } - Err(e) => { - Ok(HttpResponse::InternalServerError().json(json!({ - "error": format!("Failed to restore key: {}", e) - }))) - } + Err(e) => Ok(HttpResponse::InternalServerError().json(json!({ + "error": format!("Failed to restore key: {}", e) + }))), } } @@ -128,12 +143,15 @@ pub async fn restore_key_by_server( pub async fn permanently_delete_key_by_server( flows: web::Data, path: web::Path<(String, String)>, - db_client: web::Data>, + db_client: web::Data>, allowed_flows: web::Data>, ) -> Result { let (flow_id_str, server_name) = path.into_inner(); - info!("API request to permanently delete key for server '{}' in flow '{}'", server_name, flow_id_str); + info!( + "API request to permanently delete key for server '{}' in flow '{}'", + server_name, flow_id_str + ); if !allowed_flows.contains(&flow_id_str) { return Ok(HttpResponse::Forbidden().json(json!({ @@ -142,13 +160,19 @@ pub async fn permanently_delete_key_by_server( } // Permanently delete from database - match crate::db::permanently_delete_key_by_server(&db_client, &server_name, &flow_id_str).await { + match db_client + .permanently_delete_key_by_server_reconnecting(server_name.clone(), flow_id_str.clone()) + .await + { Ok(deleted_count) => { if deleted_count > 0 { - info!("Permanently deleted {} key(s) for server '{}' in flow '{}'", deleted_count, server_name, flow_id_str); - + info!( + "Permanently deleted {} key(s) for server '{}' in flow '{}'", + deleted_count, server_name, flow_id_str + ); + // Refresh the in-memory flows - let updated_flows = match get_keys_from_db(&db_client).await { + let updated_flows = match db_client.get_keys_from_db_reconnecting().await { Ok(flows) => flows, Err(e) => { return Ok(HttpResponse::InternalServerError().json(json!({ @@ -170,23 +194,21 @@ pub async fn permanently_delete_key_by_server( }))) } } - Err(e) => { - Ok(HttpResponse::InternalServerError().json(json!({ - "error": format!("Failed to delete key: {}", e) - }))) - } + Err(e) => Ok(HttpResponse::InternalServerError().json(json!({ + "error": format!("Failed to delete key: {}", e) + }))), } } // Serve static files from embedded assets pub async fn serve_static_file(path: web::Path) -> Result { let file_path = path.into_inner(); - + match StaticAssets::get(&file_path) { Some(content) => { let content_type = match std::path::Path::new(&file_path) .extension() - .and_then(|s| s.to_str()) + .and_then(|s| s.to_str()) { Some("html") => "text/html; charset=utf-8", Some("css") => "text/css; charset=utf-8", @@ -201,22 +223,16 @@ pub async fn serve_static_file(path: web::Path) -> Result .content_type(content_type) .body(content.data.as_ref().to_vec())) } - None => { - Ok(HttpResponse::NotFound().body(format!("File not found: {}", file_path))) - } + None => Ok(HttpResponse::NotFound().body(format!("File not found: {}", file_path))), } } // Serve the main web interface from embedded assets pub async fn serve_web_interface() -> Result { match StaticAssets::get("index.html") { - Some(content) => { - Ok(HttpResponse::Ok() - .content_type("text/html; charset=utf-8") - .body(content.data.as_ref().to_vec())) - } - None => { - Ok(HttpResponse::NotFound().body("Web interface not found")) - } + Some(content) => Ok(HttpResponse::Ok() + .content_type("text/html; charset=utf-8") + .body(content.data.as_ref().to_vec())), + None => Ok(HttpResponse::NotFound().body("Web interface not found")), } } diff --git a/static/index.html b/static/index.html index 724a713..17c8311 100644 --- a/static/index.html +++ b/static/index.html @@ -36,6 +36,14 @@ + +
+ +
+ @@ -48,9 +56,9 @@ - Server - Key Type + Server/Type Key Preview + Actions diff --git a/static/script.js b/static/script.js index 2108277..9357eb9 100644 --- a/static/script.js +++ b/static/script.js @@ -3,9 +3,13 @@ class SSHKeyManager { this.currentFlow = null; this.keys = []; this.filteredKeys = []; + this.groupedKeys = {}; + this.expandedGroups = new Set(); this.currentPage = 1; this.keysPerPage = 20; + this.serversPerPage = 10; this.selectedKeys = new Set(); + this.showDeprecatedOnly = false; this.initializeEventListeners(); this.loadFlows(); @@ -50,6 +54,21 @@ class SSHKeyManager { this.filterKeys(e.target.value); }); + // Deprecated filter checkbox + document.getElementById('showDeprecatedOnly').addEventListener('change', (e) => { + this.showDeprecatedOnly = e.target.checked; + + // Update visual state + const filterLabel = e.target.closest('.filter-label'); + if (e.target.checked) { + filterLabel.classList.add('active'); + } else { + filterLabel.classList.remove('active'); + } + + this.filterKeys(document.getElementById('searchInput').value); + }); + // Select all checkbox document.getElementById('selectAll').addEventListener('change', (e) => { this.toggleSelectAll(e.target.checked); @@ -146,11 +165,12 @@ class SSHKeyManager { try { this.showLoading(); - const response = await fetch(`/${this.currentFlow}/keys`); + const response = await fetch(`/${this.currentFlow}/keys?include_deprecated=true`); if (!response.ok) throw new Error('Failed to load keys'); this.keys = await response.json(); - this.filteredKeys = [...this.keys]; + this.groupKeys(); + this.filterKeys(); this.updateStats(); this.renderTable(); this.selectedKeys.clear(); @@ -163,16 +183,37 @@ class SSHKeyManager { } } + groupKeys() { + this.groupedKeys = {}; + this.keys.forEach(key => { + if (!this.groupedKeys[key.server]) { + this.groupedKeys[key.server] = []; + } + this.groupedKeys[key.server].push(key); + }); + + // Groups are closed by default - no auto-expand + } + filterKeys(searchTerm) { - if (!searchTerm.trim()) { - this.filteredKeys = [...this.keys]; + let keys = [...this.keys]; + + // Apply deprecated filter first + if (this.showDeprecatedOnly) { + keys = keys.filter(key => key.deprecated); + } + + // Then apply search filter + if (!searchTerm || !searchTerm.trim()) { + this.filteredKeys = keys; } else { const term = searchTerm.toLowerCase(); - this.filteredKeys = this.keys.filter(key => + this.filteredKeys = keys.filter(key => key.server.toLowerCase().includes(term) || key.public_key.toLowerCase().includes(term) ); } + this.currentPage = 1; this.renderTable(); } @@ -184,6 +225,17 @@ class SSHKeyManager { document.getElementById('uniqueServers').textContent = uniqueServers.size; } + getGroupedFilteredKeys() { + const groupedFilteredKeys = {}; + this.filteredKeys.forEach(key => { + if (!groupedFilteredKeys[key.server]) { + groupedFilteredKeys[key.server] = []; + } + groupedFilteredKeys[key.server].push(key); + }); + return groupedFilteredKeys; + } + renderTable() { const tbody = document.getElementById('keysTableBody'); const noKeysMessage = document.getElementById('noKeysMessage'); @@ -197,37 +249,78 @@ class SSHKeyManager { noKeysMessage.style.display = 'none'; - const startIndex = (this.currentPage - 1) * this.keysPerPage; - const endIndex = startIndex + this.keysPerPage; - const pageKeys = this.filteredKeys.slice(startIndex, endIndex); + // Group filtered keys by server + const groupedFilteredKeys = this.getGroupedFilteredKeys(); - tbody.innerHTML = pageKeys.map((key, index) => { - const keyType = this.getKeyType(key.public_key); - const keyPreview = this.getKeyPreview(key.public_key); - const keyId = `${key.server}-${key.public_key}`; + // Calculate pagination for grouped view + const servers = Object.keys(groupedFilteredKeys).sort(); + + // For pagination, we'll show a reasonable number of server groups per page + const startServerIndex = (this.currentPage - 1) * this.serversPerPage; + const endServerIndex = startServerIndex + this.serversPerPage; + const pageServers = servers.slice(startServerIndex, endServerIndex); + + let html = ''; + + pageServers.forEach(server => { + const serverKeys = groupedFilteredKeys[server]; + const activeCount = serverKeys.filter(k => !k.deprecated).length; + const deprecatedCount = serverKeys.filter(k => k.deprecated).length; + const isExpanded = this.expandedGroups.has(server); - return ` - + // Server group header + html += ` + - + - - ${this.escapeHtml(key.server)} - ${key.deprecated ? 'DEPRECATED' : ''} - - ${keyType} - ${keyPreview} - - - ${key.deprecated ? - ` - ` : - `` - } + + ${isExpanded ? '▼' : '▶'} + ${this.escapeHtml(server)} + + ${serverKeys.length} keys + ${deprecatedCount > 0 ? `${deprecatedCount} deprecated` : ''} + `; - }).join(''); + + // Server keys (if expanded) + if (isExpanded) { + serverKeys.forEach(key => { + const keyType = this.getKeyType(key.public_key); + const keyPreview = this.getKeyPreview(key.public_key); + const keyId = `${key.server}-${key.public_key}`; + + html += ` + + + + + + ${keyType} + ${key.deprecated ? 'DEPRECATED' : ''} + + ${keyPreview} + + + + ${key.deprecated ? + ` + ` : + `` + } + + + `; + }); + } + }); + + tbody.innerHTML = html; // Add event listeners for checkboxes tbody.querySelectorAll('input[type="checkbox"]').forEach(checkbox => { @@ -240,14 +333,78 @@ class SSHKeyManager { } this.updateBulkDeleteButton(); this.updateSelectAllCheckbox(); + this.updateGroupCheckboxes(); // Update group checkboxes when individual keys change }); }); + // Update group checkboxes to show correct indeterminate state + this.updateGroupCheckboxes(); this.updatePagination(); } + toggleGroup(server) { + if (this.expandedGroups.has(server)) { + this.expandedGroups.delete(server); + } else { + this.expandedGroups.add(server); + } + this.renderTable(); + } + + toggleGroupSelection(server, isChecked) { + const groupedFilteredKeys = this.getGroupedFilteredKeys(); + const serverKeys = groupedFilteredKeys[server] || []; + + serverKeys.forEach(key => { + const keyId = `${key.server}-${key.public_key}`; + if (isChecked) { + this.selectedKeys.add(keyId); + } else { + this.selectedKeys.delete(keyId); + } + }); + + this.updateBulkDeleteButton(); + this.updateSelectAllCheckbox(); + this.updateGroupCheckboxes(); + + // Update individual checkboxes without full re-render + const tbody = document.getElementById('keysTableBody'); + serverKeys.forEach(key => { + const keyId = `${key.server}-${key.public_key}`; + const checkbox = tbody.querySelector(`input[data-key-id="${keyId}"]`); + if (checkbox) { + checkbox.checked = this.selectedKeys.has(keyId); + } + }); + } + + updateGroupCheckboxes() { + const groupedFilteredKeys = this.getGroupedFilteredKeys(); + const tbody = document.getElementById('keysTableBody'); + + Object.keys(groupedFilteredKeys).forEach(server => { + const serverKeys = groupedFilteredKeys[server]; + const groupCheckbox = tbody.querySelector(`input[data-group="${server}"]`); + + if (groupCheckbox) { + const allSelected = serverKeys.every(key => + this.selectedKeys.has(`${key.server}-${key.public_key}`) + ); + const someSelected = serverKeys.some(key => + this.selectedKeys.has(`${key.server}-${key.public_key}`) + ); + + groupCheckbox.checked = allSelected; + groupCheckbox.indeterminate = someSelected && !allSelected; + } + }); + } + updatePagination() { - const totalPages = Math.ceil(this.filteredKeys.length / this.keysPerPage); + const groupedFilteredKeys = this.getGroupedFilteredKeys(); + const totalServers = Object.keys(groupedFilteredKeys).length; + const totalPages = Math.ceil(totalServers / this.serversPerPage); document.getElementById('pageInfo').textContent = `Page ${this.currentPage} of ${totalPages}`; document.getElementById('prevPage').disabled = this.currentPage <= 1; @@ -255,7 +412,10 @@ class SSHKeyManager { } changePage(newPage) { - const totalPages = Math.ceil(this.filteredKeys.length / this.keysPerPage); + const groupedFilteredKeys = this.getGroupedFilteredKeys(); + const totalServers = Object.keys(groupedFilteredKeys).length; + const totalPages = Math.ceil(totalServers / this.serversPerPage); + if (newPage >= 1 && newPage <= totalPages) { this.currentPage = newPage; this.renderTable(); diff --git a/static/style.css b/static/style.css index d4ce567..8f7c943 100644 --- a/static/style.css +++ b/static/style.css @@ -170,6 +170,46 @@ header h1 { flex-wrap: wrap; } +.filter-controls { + display: flex; + align-items: center; + gap: 1rem; +} + +.filter-label { + display: flex; + align-items: center; + gap: 0.5rem; + font-size: 0.875rem; + color: var(--text-primary); + cursor: pointer; + user-select: none; + padding: 0.5rem 0.75rem; + border-radius: var(--border-radius); + transition: background-color 0.2s ease; +} + +.filter-label:hover { + background-color: var(--background); +} + +.filter-label.active { + background-color: var(--primary-color); + color: white; +} + +.filter-label.active input[type="checkbox"] { + accent-color: white; +} + +.filter-label input[type="checkbox"] { + margin: 0; +} + +.filter-label span { + white-space: nowrap; +} + .search-box input { padding: 0.5rem 1rem; border: 1px solid var(--border); @@ -225,6 +265,79 @@ header h1 { color: var(--text-secondary); } +.host-group-header { + background-color: #f1f5f9; + font-weight: 600; + transition: background-color 0.2s ease; + border-left: 4px solid var(--primary-color); +} + +.host-group-header:hover { + background-color: #e2e8f0; +} + +.host-group-header.collapsed { + border-left-color: var(--secondary-color); +} + +.host-group-header .expand-icon { + transition: transform 0.2s ease; + display: inline-block; + margin-right: 0.5rem; + user-select: none; +} + +.host-group-header.collapsed .expand-icon { + transform: rotate(-90deg); +} + +.host-group-header input[type="checkbox"] { + margin: 0; +} + +.host-group-header td:first-child { + width: 50px; + text-align: center; +} + +.host-group-header td:nth-child(2) { + cursor: pointer; + user-select: none; +} + +.key-row { + border-left: 4px solid transparent; +} + +.key-row.hidden { + display: none; +} + +.host-summary { + font-size: 0.875rem; + color: var(--text-secondary); +} + +.key-count { + background-color: var(--primary-color); + color: white; + padding: 0.125rem 0.375rem; + border-radius: 0.25rem; + font-size: 0.75rem; + font-weight: 500; + margin-left: 0.5rem; +} + +.deprecated-count { + background-color: var(--danger-color); + color: white; + padding: 0.125rem 0.375rem; + border-radius: 0.25rem; + font-size: 0.75rem; + font-weight: 500; + margin-left: 0.25rem; +} + .key-preview { font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; font-size: 0.875rem; @@ -480,6 +593,11 @@ header h1 { .actions-panel { flex-direction: column; align-items: stretch; + gap: 1rem; + } + + .filter-controls { + justify-content: center; } .search-box input { @@ -515,6 +633,13 @@ input[type="checkbox"] { accent-color: var(--primary-color); } +/* Indeterminate checkbox styling */ +input[type="checkbox"]:indeterminate { + background-color: var(--primary-color); + background-image: linear-gradient(90deg, transparent 40%, white 40%, white 60%, transparent 60%); + border-color: var(--primary-color); +} + /* Action buttons in table */ .table-actions { display: flex;