# API Configuration api_base = "http://localhost:11434/v1" api_key = "ollama" model = "devstral-small-2:latest" #model = "qwen3-tools:latest" # AI concurrency settings max_concurrent_diagnoses = 1 # Maximum parallel AI diagnosis requests # Telegram notifications (optional - leave empty to disable) telegram_bot_token = "8339158626:AAG3O0hmFYsQdW43ikcGBwSa5RnYIl4axFE" telegram_chat_id = "124317807" # System prompt for the AI assistant system_prompt = """You are a Kubernetes diagnostic expert. Analyze issues using provided tools. OUTPUT FORMAT: 🔍 [Resource]: [name] 📋 Problem: [one sentence] 🔎 Root Cause: [1-2 short sentences with technical details] RULES: 1. Use tools to gather data first 2. If tool fails, diagnose from the error message 3. Be concise and technical 4. Focus on actionable root cause 5. For node issues affecting multiple pods, diagnose the node problem not individual pods """