Merge WS v1 engine and web debug runtime-config integration

This commit is contained in:
Xin Wang
2026-02-09 08:19:55 +08:00
13 changed files with 986 additions and 298 deletions

View File

@@ -5,7 +5,7 @@ import uuid
from datetime import datetime
from ..db import get_db
from ..models import Assistant
from ..models import Assistant, LLMModel, ASRModel, Voice
from ..schemas import (
AssistantCreate, AssistantUpdate, AssistantOut
)
@@ -13,6 +13,73 @@ from ..schemas import (
router = APIRouter(prefix="/assistants", tags=["Assistants"])
def _is_siliconflow_vendor(vendor: Optional[str]) -> bool:
return (vendor or "").strip().lower() in {"siliconflow", "硅基流动"}
def _resolve_runtime_metadata(db: Session, assistant: Assistant) -> dict:
metadata = {
"systemPrompt": assistant.prompt or "",
"greeting": assistant.opener or "",
"services": {},
}
warnings = []
if assistant.llm_model_id:
llm = db.query(LLMModel).filter(LLMModel.id == assistant.llm_model_id).first()
if llm:
metadata["services"]["llm"] = {
"provider": "openai",
"model": llm.model_name or llm.name,
"apiKey": llm.api_key,
"baseUrl": llm.base_url,
}
else:
warnings.append(f"LLM model not found: {assistant.llm_model_id}")
if assistant.asr_model_id:
asr = db.query(ASRModel).filter(ASRModel.id == assistant.asr_model_id).first()
if asr:
asr_provider = "siliconflow" if _is_siliconflow_vendor(asr.vendor) else "buffered"
metadata["services"]["asr"] = {
"provider": asr_provider,
"model": asr.model_name or asr.name,
"apiKey": asr.api_key if asr_provider == "siliconflow" else None,
}
else:
warnings.append(f"ASR model not found: {assistant.asr_model_id}")
if assistant.voice:
voice = db.query(Voice).filter(Voice.id == assistant.voice).first()
if voice:
tts_provider = "siliconflow" if _is_siliconflow_vendor(voice.vendor) else "edge"
metadata["services"]["tts"] = {
"provider": tts_provider,
"model": voice.model,
"apiKey": voice.api_key if tts_provider == "siliconflow" else None,
"voice": voice.voice_key or voice.id,
"speed": assistant.speed or voice.speed,
}
else:
# Keep assistant.voice as direct voice identifier fallback
metadata["services"]["tts"] = {
"voice": assistant.voice,
"speed": assistant.speed or 1.0,
}
warnings.append(f"Voice resource not found: {assistant.voice}")
return {
"assistantId": assistant.id,
"sessionStartMetadata": metadata,
"sources": {
"llmModelId": assistant.llm_model_id,
"asrModelId": assistant.asr_model_id,
"voiceId": assistant.voice,
},
"warnings": warnings,
}
def assistant_to_dict(assistant: Assistant) -> dict:
return {
"id": assistant.id,
@@ -84,6 +151,15 @@ def get_assistant(id: str, db: Session = Depends(get_db)):
return assistant_to_dict(assistant)
@router.get("/{id}/runtime-config")
def get_assistant_runtime_config(id: str, db: Session = Depends(get_db)):
"""Resolve assistant runtime config for engine WS session.start metadata."""
assistant = db.query(Assistant).filter(Assistant.id == id).first()
if not assistant:
raise HTTPException(status_code=404, detail="Assistant not found")
return _resolve_runtime_metadata(db, assistant)
@router.post("", response_model=AssistantOut)
def create_assistant(data: AssistantCreate, db: Session = Depends(get_db)):
"""创建新助手"""
@@ -139,4 +215,3 @@ def delete_assistant(id: str, db: Session = Depends(get_db)):
db.delete(assistant)
db.commit()
return {"message": "Deleted successfully"}

View File

@@ -166,3 +166,39 @@ class TestAssistantAPI:
response = client.post("/api/assistants", json=sample_assistant_data)
assert response.status_code == 200
assert response.json()["language"] == lang
def test_get_runtime_config(self, client, sample_assistant_data, sample_llm_model_data, sample_asr_model_data, sample_voice_data):
"""Test resolved runtime config endpoint for WS session.start metadata."""
llm_resp = client.post("/api/llm", json=sample_llm_model_data)
assert llm_resp.status_code == 200
asr_resp = client.post("/api/asr", json=sample_asr_model_data)
assert asr_resp.status_code == 200
voice_resp = client.post("/api/voices", json=sample_voice_data)
assert voice_resp.status_code == 200
voice_id = voice_resp.json()["id"]
sample_assistant_data.update({
"llmModelId": sample_llm_model_data["id"],
"asrModelId": sample_asr_model_data["id"],
"voice": voice_id,
"prompt": "runtime prompt",
"opener": "runtime opener",
"speed": 1.1,
})
assistant_resp = client.post("/api/assistants", json=sample_assistant_data)
assert assistant_resp.status_code == 200
assistant_id = assistant_resp.json()["id"]
runtime_resp = client.get(f"/api/assistants/{assistant_id}/runtime-config")
assert runtime_resp.status_code == 200
payload = runtime_resp.json()
assert payload["assistantId"] == assistant_id
metadata = payload["sessionStartMetadata"]
assert metadata["systemPrompt"] == "runtime prompt"
assert metadata["greeting"] == "runtime opener"
assert metadata["services"]["llm"]["model"] == sample_llm_model_data["model_name"]
assert metadata["services"]["asr"]["model"] == sample_asr_model_data["model_name"]
assert metadata["services"]["tts"]["voice"] == sample_voice_data["voice_key"]