feat(auth): replace cookie/api-key auth with JWT Bearer tokens, separate UI from API
Publish Metadata Agent Image / build-and-push-image (push) Successful in 6m3s
Publish Node Player Image / build-and-push-image (push) Failing after 58s
Publish Web Player Image / build-and-push-image (push) Has been cancelled

- Add JWT Bearer token validation to Rust API via OIDC provider JWKS
  with automatic key rotation and 1-hour cache
- Remove x-api-key auth support and built-in web UI from furumi-web-player,
  leaving it as a pure API server
- Add /auth/token endpoint to Node player server to expose OIDC access
  tokens to the frontend
- Move Node player auth endpoints from /api/* to /auth/* to avoid
  path conflicts with Rust API
- Add static file serving to Node Express server for production
  single-container deployment
- Fix SameSite=Strict cookie issue breaking OIDC redirect flow (use Lax)
- Add Dockerfile.node-player with multi-stage Node.js build
- Add CI workflows for node-player Docker image (dev + release)
- Optimize Rust Dockerfiles with dependency caching layer
- Update docker-compose with OIDC env vars and OLLAMA_MODEL support
- Cherry-pick agent LLM client fixes from DEV branch

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Ultradesu
2026-04-08 14:51:52 +01:00
parent 94d14e8fc8
commit e99cacae8b
20 changed files with 515 additions and 161 deletions
+101 -25
View File
@@ -25,16 +25,37 @@ pub async fn normalize(
) -> anyhow::Result<NormalizedFields> {
let user_message = build_user_message(raw, hints, similar_artists, similar_albums, folder_ctx);
let schema = normalize_schema();
let response = call_ollama(
&state.config.ollama_url,
&state.config.ollama_model,
&state.system_prompt,
&user_message,
state.config.ollama_auth.as_deref(),
0.5,
512,
Some(("normalized_metadata", schema.clone())),
)
.await?;
parse_response(&response)
match parse_response(&response) {
Ok(fields) => Ok(fields),
Err(e) => {
tracing::warn!(error = %e, "LLM parse failed, retrying with higher frequency_penalty");
let response2 = call_ollama(
&state.config.ollama_url,
&state.config.ollama_model,
&state.system_prompt,
&user_message,
state.config.ollama_auth.as_deref(),
1.5,
512,
Some(("normalized_metadata", schema)),
)
.await?;
parse_response(&response2)
}
}
}
fn build_user_message(
@@ -113,32 +134,49 @@ fn build_user_message(
}
#[derive(Serialize)]
struct OllamaRequest {
struct ChatRequest {
model: String,
messages: Vec<OllamaMessage>,
format: String,
messages: Vec<ChatMessage>,
#[serde(skip_serializing_if = "Option::is_none")]
response_format: Option<ChatResponseFormat>,
stream: bool,
options: OllamaOptions,
temperature: f64,
max_tokens: u32,
frequency_penalty: f64,
}
#[derive(Serialize)]
struct OllamaMessage {
struct ChatMessage {
role: String,
content: String,
}
#[derive(Serialize)]
struct OllamaOptions {
temperature: f64,
struct ChatResponseFormat {
#[serde(rename = "type")]
kind: String,
json_schema: JsonSchemaWrapper,
}
#[derive(Serialize)]
struct JsonSchemaWrapper {
name: String,
strict: bool,
schema: serde_json::Value,
}
#[derive(Deserialize)]
struct OllamaResponse {
message: OllamaResponseMessage,
struct ChatResponse {
choices: Vec<ChatChoice>,
}
#[derive(Deserialize)]
struct OllamaResponseMessage {
struct ChatChoice {
message: ChatResponseMessage,
}
#[derive(Deserialize)]
struct ChatResponseMessage {
content: String,
}
@@ -148,30 +186,40 @@ pub async fn call_ollama(
system_prompt: &str,
user_message: &str,
auth: Option<&str>,
frequency_penalty: f64,
max_tokens: u32,
schema: Option<(&str, serde_json::Value)>,
) -> anyhow::Result<String> {
let client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(120))
.build()?;
let request = OllamaRequest {
let response_format = schema.map(|(name, schema)| ChatResponseFormat {
kind: "json_schema".to_owned(),
json_schema: JsonSchemaWrapper { name: name.to_owned(), strict: true, schema },
});
let request = ChatRequest {
model: model.to_owned(),
messages: vec![
OllamaMessage {
ChatMessage {
role: "system".to_owned(),
content: system_prompt.to_owned(),
},
OllamaMessage {
ChatMessage {
role: "user".to_owned(),
content: user_message.to_owned(),
},
],
format: "json".to_owned(),
response_format,
stream: false,
options: OllamaOptions { temperature: 0.1 },
temperature: 0.1,
max_tokens,
frequency_penalty,
};
let url = format!("{}/api/chat", base_url.trim_end_matches('/'));
tracing::info!(%url, model, prompt_len = user_message.len(), "Calling Ollama API...");
let url = format!("{}/v1/chat/completions", base_url.trim_end_matches('/'));
tracing::info!(%url, model, prompt_len = user_message.len(), "Calling LLM API...");
let start = std::time::Instant::now();
let mut req = client.post(&url).json(&request);
@@ -184,18 +232,45 @@ pub async fn call_ollama(
if !resp.status().is_success() {
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
tracing::error!(%status, body = &body[..body.len().min(500)], "Ollama API error");
anyhow::bail!("Ollama returned {}: {}", status, body);
tracing::error!(%status, body = &body[..body.len().min(500)], "LLM API error");
anyhow::bail!("LLM returned {}: {}", status, body);
}
let ollama_resp: OllamaResponse = resp.json().await?;
let chat_resp: ChatResponse = resp.json().await?;
let content = chat_resp
.choices
.into_iter()
.next()
.ok_or_else(|| anyhow::anyhow!("LLM returned empty choices"))?
.message
.content;
tracing::info!(
elapsed_ms = elapsed.as_millis() as u64,
response_len = ollama_resp.message.content.len(),
"Ollama response received"
response_len = content.len(),
"LLM response received"
);
tracing::debug!(raw_response = %ollama_resp.message.content, "LLM raw output");
Ok(ollama_resp.message.content)
tracing::debug!(raw_response = %content, "LLM raw output");
Ok(content)
}
fn normalize_schema() -> serde_json::Value {
serde_json::json!({
"type": "object",
"properties": {
"artist": { "type": ["string", "null"] },
"album": { "type": ["string", "null"] },
"title": { "type": ["string", "null"] },
"year": { "type": ["integer", "null"] },
"track_number": { "type": ["integer", "null"] },
"genre": { "type": ["string", "null"] },
"featured_artists": { "type": "array", "items": { "type": "string" } },
"release_kind": { "type": ["string", "null"] },
"confidence": { "type": ["number", "null"] },
"notes": { "type": ["string", "null"] }
},
"required": ["artist", "album", "title", "year", "track_number", "genre", "featured_artists", "release_kind", "confidence", "notes"],
"additionalProperties": false
})
}
/// Parse the LLM JSON response into NormalizedFields.
@@ -222,6 +297,7 @@ fn parse_response(response: &str) -> anyhow::Result<NormalizedFields> {
genre: Option<String>,
#[serde(default)]
featured_artists: Vec<String>,
#[serde(rename = "release_kind")]
release_type: Option<String>,
confidence: Option<f64>,
notes: Option<String>,
+27
View File
@@ -35,12 +35,39 @@ pub async fn propose_merge(state: &Arc<AppState>, merge_id: Uuid) -> anyhow::Res
let user_message = build_merge_message(&artists_data);
let schema = serde_json::json!({
"type": "object",
"properties": {
"canonical_artist_name": { "type": "string" },
"winner_artist_id": { "type": "integer" },
"album_mappings": {
"type": "array",
"items": {
"type": "object",
"properties": {
"source_album_id": { "type": "integer" },
"canonical_name": { "type": "string" },
"merge_into_album_id": { "type": ["integer", "null"] }
},
"required": ["source_album_id", "canonical_name", "merge_into_album_id"],
"additionalProperties": false
}
},
"notes": { "type": "string" }
},
"required": ["canonical_artist_name", "winner_artist_id", "album_mappings", "notes"],
"additionalProperties": false
});
let response = call_ollama(
&state.config.ollama_url,
&state.config.ollama_model,
&state.merge_prompt,
&user_message,
state.config.ollama_auth.as_deref(),
0.5,
4096,
Some(("artist_merge", schema)),
).await?;
let proposal = parse_merge_response(&response)?;