Files
duty-ai-ops/src/k8s/pods.rs

150 lines
5.0 KiB
Rust
Raw Normal View History

2025-12-24 01:36:12 +00:00
use super::client::KubeClient;
use super::types::{ContainerStatus, PodCondition, PodDetails, PodLogs};
use k8s_openapi::api::core::v1::Pod;
use kube::{api::LogParams, Api};
impl KubeClient {
/// Gets detailed information about a specific pod for diagnostics
pub async fn get_pod_details(
&self,
namespace: &str,
pod_name: &str,
) -> Result<PodDetails, Box<dyn std::error::Error>> {
let pods: Api<Pod> = Api::namespaced(self.inner().clone(), namespace);
let pod = pods.get(pod_name).await?;
// Extract phase
let phase = pod
.status
.as_ref()
.and_then(|s| s.phase.clone())
.unwrap_or_else(|| "Unknown".to_string());
// Extract node name
let node_name = pod.spec.as_ref().and_then(|s| s.node_name.clone());
// Extract conditions
let conditions = pod
.status
.as_ref()
.and_then(|s| s.conditions.as_ref())
.map(|conds| {
conds
.iter()
.map(|c| PodCondition {
type_: c.type_.clone(),
status: c.status.clone(),
reason: c.reason.clone(),
message: c.message.clone(),
})
.collect()
})
.unwrap_or_default();
// Extract container statuses
let container_statuses: Vec<ContainerStatus> = pod
.status
.as_ref()
.and_then(|s| s.container_statuses.as_ref())
.map(|cs| {
cs.iter()
.map(|c| {
let (state, state_reason, state_message) = if let Some(waiting) =
&c.state.as_ref().and_then(|s| s.waiting.as_ref())
{
(
"Waiting".to_string(),
waiting.reason.clone(),
waiting.message.clone(),
)
} else if let Some(_running) =
&c.state.as_ref().and_then(|s| s.running.as_ref())
{
("Running".to_string(), None, None)
} else if let Some(terminated) =
&c.state.as_ref().and_then(|s| s.terminated.as_ref())
{
(
"Terminated".to_string(),
terminated.reason.clone(),
terminated.message.clone(),
)
} else {
("Unknown".to_string(), None, None)
};
ContainerStatus {
name: c.name.clone(),
ready: c.ready,
restart_count: c.restart_count,
state,
state_reason,
state_message,
}
})
.collect()
})
.unwrap_or_default();
// Calculate total restart count
let restart_count: i32 = container_statuses.iter().map(|c| c.restart_count).sum();
// Extract start time
let start_time = pod
.status
.as_ref()
.and_then(|s| s.start_time.as_ref())
.map(|t| t.0.to_rfc3339());
Ok(PodDetails {
name: pod_name.to_string(),
namespace: namespace.to_string(),
phase,
node_name,
conditions,
container_statuses,
restart_count,
start_time,
})
}
/// Gets recent logs from a pod's container
pub async fn get_pod_logs(
&self,
namespace: &str,
pod_name: &str,
container_name: Option<&str>,
tail_lines: Option<i64>,
) -> Result<PodLogs, Box<dyn std::error::Error>> {
let pods: Api<Pod> = Api::namespaced(self.inner().clone(), namespace);
// Determine which container to get logs from
let container = if let Some(name) = container_name {
name.to_string()
} else {
// Get first container name
let pod = pods.get(pod_name).await?;
pod.spec
.as_ref()
.and_then(|s| s.containers.first())
.map(|c| c.name.clone())
.ok_or("No containers found in pod")?
};
let log_params = LogParams {
container: Some(container.clone()),
tail_lines,
..Default::default()
};
let logs = pods.logs(pod_name, &log_params).await?;
Ok(PodLogs {
pod_name: pod_name.to_string(),
namespace: namespace.to_string(),
container_name: container,
logs,
})
}
}