Unify db api
This commit is contained in:
252
engine/tests/test_agent_config.py
Normal file
252
engine/tests/test_agent_config.py
Normal file
@@ -0,0 +1,252 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
os.environ.setdefault("LLM_API_KEY", "test-openai-key")
|
||||
os.environ.setdefault("TTS_API_KEY", "test-tts-key")
|
||||
os.environ.setdefault("ASR_API_KEY", "test-asr-key")
|
||||
|
||||
from app.config import load_settings
|
||||
|
||||
|
||||
def _write_yaml(path: Path, content: str) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(content, encoding="utf-8")
|
||||
|
||||
|
||||
def _full_agent_yaml(llm_model: str = "gpt-4o-mini", llm_key: str = "test-openai-key") -> str:
|
||||
return f"""
|
||||
agent:
|
||||
vad:
|
||||
type: silero
|
||||
model_path: data/vad/silero_vad.onnx
|
||||
threshold: 0.63
|
||||
min_speech_duration_ms: 100
|
||||
eou_threshold_ms: 800
|
||||
|
||||
llm:
|
||||
provider: openai_compatible
|
||||
model: {llm_model}
|
||||
temperature: 0.2
|
||||
api_key: {llm_key}
|
||||
api_url: https://example-llm.invalid/v1
|
||||
|
||||
tts:
|
||||
provider: openai_compatible
|
||||
api_key: test-tts-key
|
||||
api_url: https://example-tts.invalid/v1/audio/speech
|
||||
model: FunAudioLLM/CosyVoice2-0.5B
|
||||
voice: anna
|
||||
speed: 1.0
|
||||
|
||||
asr:
|
||||
provider: openai_compatible
|
||||
api_key: test-asr-key
|
||||
api_url: https://example-asr.invalid/v1/audio/transcriptions
|
||||
model: FunAudioLLM/SenseVoiceSmall
|
||||
interim_interval_ms: 500
|
||||
min_audio_ms: 300
|
||||
start_min_speech_ms: 160
|
||||
pre_speech_ms: 240
|
||||
final_tail_ms: 120
|
||||
|
||||
duplex:
|
||||
enabled: true
|
||||
system_prompt: You are a strict test assistant.
|
||||
|
||||
barge_in:
|
||||
min_duration_ms: 200
|
||||
silence_tolerance_ms: 60
|
||||
""".strip()
|
||||
|
||||
|
||||
def test_cli_profile_loads_agent_yaml(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
config_dir = tmp_path / "config" / "agents"
|
||||
_write_yaml(
|
||||
config_dir / "support.yaml",
|
||||
_full_agent_yaml(llm_model="gpt-4.1-mini"),
|
||||
)
|
||||
|
||||
settings = load_settings(
|
||||
argv=["--agent-profile", "support"],
|
||||
)
|
||||
|
||||
assert settings.llm_model == "gpt-4.1-mini"
|
||||
assert settings.llm_temperature == 0.2
|
||||
assert settings.vad_threshold == 0.63
|
||||
assert settings.agent_config_source == "cli_profile"
|
||||
assert settings.agent_config_path == str((config_dir / "support.yaml").resolve())
|
||||
|
||||
|
||||
def test_cli_path_has_higher_priority_than_env(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
env_file = tmp_path / "config" / "agents" / "env.yaml"
|
||||
cli_file = tmp_path / "config" / "agents" / "cli.yaml"
|
||||
|
||||
_write_yaml(env_file, _full_agent_yaml(llm_model="env-model"))
|
||||
_write_yaml(cli_file, _full_agent_yaml(llm_model="cli-model"))
|
||||
|
||||
monkeypatch.setenv("AGENT_CONFIG_PATH", str(env_file))
|
||||
|
||||
settings = load_settings(argv=["--agent-config", str(cli_file)])
|
||||
|
||||
assert settings.llm_model == "cli-model"
|
||||
assert settings.agent_config_source == "cli_path"
|
||||
assert settings.agent_config_path == str(cli_file.resolve())
|
||||
|
||||
|
||||
def test_default_yaml_is_loaded_without_args_or_env(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
default_file = tmp_path / "config" / "agents" / "default.yaml"
|
||||
_write_yaml(default_file, _full_agent_yaml(llm_model="from-default"))
|
||||
|
||||
monkeypatch.delenv("AGENT_CONFIG_PATH", raising=False)
|
||||
monkeypatch.delenv("AGENT_PROFILE", raising=False)
|
||||
|
||||
settings = load_settings(argv=[])
|
||||
|
||||
assert settings.llm_model == "from-default"
|
||||
assert settings.agent_config_source == "default"
|
||||
assert settings.agent_config_path == str(default_file.resolve())
|
||||
|
||||
|
||||
def test_missing_required_agent_settings_fail(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
file_path = tmp_path / "missing-required.yaml"
|
||||
_write_yaml(
|
||||
file_path,
|
||||
"""
|
||||
agent:
|
||||
llm:
|
||||
model: gpt-4o-mini
|
||||
""".strip(),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing required agent settings in YAML"):
|
||||
load_settings(argv=["--agent-config", str(file_path)])
|
||||
|
||||
|
||||
def test_blank_required_provider_key_fails(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
file_path = tmp_path / "blank-key.yaml"
|
||||
_write_yaml(file_path, _full_agent_yaml(llm_key=""))
|
||||
|
||||
with pytest.raises(ValueError, match="Missing required agent settings in YAML"):
|
||||
load_settings(argv=["--agent-config", str(file_path)])
|
||||
|
||||
|
||||
def test_missing_tts_api_url_fails(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
file_path = tmp_path / "missing-tts-url.yaml"
|
||||
_write_yaml(
|
||||
file_path,
|
||||
_full_agent_yaml().replace(
|
||||
" api_url: https://example-tts.invalid/v1/audio/speech\n",
|
||||
"",
|
||||
),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing required agent settings in YAML"):
|
||||
load_settings(argv=["--agent-config", str(file_path)])
|
||||
|
||||
|
||||
def test_missing_asr_api_url_fails(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
file_path = tmp_path / "missing-asr-url.yaml"
|
||||
_write_yaml(
|
||||
file_path,
|
||||
_full_agent_yaml().replace(
|
||||
" api_url: https://example-asr.invalid/v1/audio/transcriptions\n",
|
||||
"",
|
||||
),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing required agent settings in YAML"):
|
||||
load_settings(argv=["--agent-config", str(file_path)])
|
||||
|
||||
|
||||
def test_agent_yaml_unknown_key_fails(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
file_path = tmp_path / "bad-agent.yaml"
|
||||
_write_yaml(file_path, _full_agent_yaml() + "\n unknown_option: true")
|
||||
|
||||
with pytest.raises(ValueError, match="Unknown agent config keys"):
|
||||
load_settings(argv=["--agent-config", str(file_path)])
|
||||
|
||||
|
||||
def test_legacy_siliconflow_section_fails(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
file_path = tmp_path / "legacy-siliconflow.yaml"
|
||||
_write_yaml(
|
||||
file_path,
|
||||
"""
|
||||
agent:
|
||||
siliconflow:
|
||||
api_key: x
|
||||
""".strip(),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="Section 'siliconflow' is no longer supported"):
|
||||
load_settings(argv=["--agent-config", str(file_path)])
|
||||
|
||||
|
||||
def test_agent_yaml_missing_env_reference_fails(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
file_path = tmp_path / "bad-ref.yaml"
|
||||
_write_yaml(
|
||||
file_path,
|
||||
_full_agent_yaml(llm_key="${UNSET_LLM_API_KEY}"),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing environment variable"):
|
||||
load_settings(argv=["--agent-config", str(file_path)])
|
||||
|
||||
|
||||
def test_agent_yaml_tools_list_is_loaded(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
file_path = tmp_path / "tools-agent.yaml"
|
||||
_write_yaml(
|
||||
file_path,
|
||||
_full_agent_yaml()
|
||||
+ """
|
||||
|
||||
tools:
|
||||
- current_time
|
||||
- name: weather
|
||||
description: Get weather by city.
|
||||
parameters:
|
||||
type: object
|
||||
properties:
|
||||
city:
|
||||
type: string
|
||||
required: [city]
|
||||
executor: server
|
||||
""",
|
||||
)
|
||||
|
||||
settings = load_settings(argv=["--agent-config", str(file_path)])
|
||||
|
||||
assert isinstance(settings.tools, list)
|
||||
assert settings.tools[0] == "current_time"
|
||||
assert settings.tools[1]["name"] == "weather"
|
||||
assert settings.tools[1]["executor"] == "server"
|
||||
|
||||
|
||||
def test_agent_yaml_tools_must_be_list(monkeypatch, tmp_path):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
file_path = tmp_path / "bad-tools-agent.yaml"
|
||||
_write_yaml(
|
||||
file_path,
|
||||
_full_agent_yaml()
|
||||
+ """
|
||||
|
||||
tools:
|
||||
weather:
|
||||
executor: server
|
||||
""",
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="Agent config key 'tools' must be a list"):
|
||||
load_settings(argv=["--agent-config", str(file_path)])
|
||||
150
engine/tests/test_backend_adapters.py
Normal file
150
engine/tests/test_backend_adapters.py
Normal file
@@ -0,0 +1,150 @@
|
||||
import aiohttp
|
||||
import pytest
|
||||
|
||||
from app.backend_adapters import (
|
||||
HistoryDisabledBackendAdapter,
|
||||
HttpBackendAdapter,
|
||||
NullBackendAdapter,
|
||||
build_backend_adapter,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_build_backend_adapter_without_url_returns_null_adapter():
|
||||
adapter = build_backend_adapter(
|
||||
backend_url=None,
|
||||
backend_mode="auto",
|
||||
history_enabled=True,
|
||||
timeout_sec=3,
|
||||
)
|
||||
assert isinstance(adapter, NullBackendAdapter)
|
||||
|
||||
assert await adapter.fetch_assistant_config("assistant_1") is None
|
||||
assert (
|
||||
await adapter.create_call_record(
|
||||
user_id=1,
|
||||
assistant_id="assistant_1",
|
||||
source="debug",
|
||||
)
|
||||
is None
|
||||
)
|
||||
assert (
|
||||
await adapter.add_transcript(
|
||||
call_id="call_1",
|
||||
turn_index=0,
|
||||
speaker="human",
|
||||
content="hi",
|
||||
start_ms=0,
|
||||
end_ms=100,
|
||||
confidence=0.9,
|
||||
duration_ms=100,
|
||||
)
|
||||
is False
|
||||
)
|
||||
assert (
|
||||
await adapter.finalize_call_record(
|
||||
call_id="call_1",
|
||||
status="connected",
|
||||
duration_seconds=2,
|
||||
)
|
||||
is False
|
||||
)
|
||||
assert await adapter.search_knowledge_context(kb_id="kb_1", query="hello", n_results=3) == []
|
||||
assert await adapter.fetch_tool_resource("tool_1") is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_http_backend_adapter_create_call_record_posts_expected_payload(monkeypatch):
|
||||
captured = {}
|
||||
|
||||
class _FakeResponse:
|
||||
def __init__(self, status=200, payload=None):
|
||||
self.status = status
|
||||
self._payload = payload if payload is not None else {}
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc, tb):
|
||||
return None
|
||||
|
||||
async def json(self):
|
||||
return self._payload
|
||||
|
||||
def raise_for_status(self):
|
||||
if self.status >= 400:
|
||||
raise RuntimeError("http_error")
|
||||
|
||||
class _FakeClientSession:
|
||||
def __init__(self, timeout=None):
|
||||
captured["timeout"] = timeout
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc, tb):
|
||||
return None
|
||||
|
||||
def post(self, url, json=None):
|
||||
captured["url"] = url
|
||||
captured["json"] = json
|
||||
return _FakeResponse(status=200, payload={"id": "call_123"})
|
||||
|
||||
monkeypatch.setattr("app.backend_adapters.aiohttp.ClientSession", _FakeClientSession)
|
||||
|
||||
adapter = build_backend_adapter(
|
||||
backend_url="http://localhost:8100",
|
||||
backend_mode="auto",
|
||||
history_enabled=True,
|
||||
timeout_sec=7,
|
||||
)
|
||||
assert isinstance(adapter, HttpBackendAdapter)
|
||||
|
||||
call_id = await adapter.create_call_record(
|
||||
user_id=99,
|
||||
assistant_id="assistant_9",
|
||||
source="debug",
|
||||
)
|
||||
|
||||
assert call_id == "call_123"
|
||||
assert captured["url"] == "http://localhost:8100/api/history"
|
||||
assert captured["json"] == {
|
||||
"user_id": 99,
|
||||
"assistant_id": "assistant_9",
|
||||
"source": "debug",
|
||||
"status": "connected",
|
||||
}
|
||||
assert isinstance(captured["timeout"], aiohttp.ClientTimeout)
|
||||
assert captured["timeout"].total == 7
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_backend_mode_disabled_forces_null_even_with_url():
|
||||
adapter = build_backend_adapter(
|
||||
backend_url="http://localhost:8100",
|
||||
backend_mode="disabled",
|
||||
history_enabled=True,
|
||||
timeout_sec=7,
|
||||
)
|
||||
assert isinstance(adapter, NullBackendAdapter)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_history_disabled_wraps_backend_adapter():
|
||||
adapter = build_backend_adapter(
|
||||
backend_url="http://localhost:8100",
|
||||
backend_mode="auto",
|
||||
history_enabled=False,
|
||||
timeout_sec=7,
|
||||
)
|
||||
assert isinstance(adapter, HistoryDisabledBackendAdapter)
|
||||
assert await adapter.create_call_record(user_id=1, assistant_id="a1", source="debug") is None
|
||||
assert await adapter.add_transcript(
|
||||
call_id="c1",
|
||||
turn_index=0,
|
||||
speaker="human",
|
||||
content="hi",
|
||||
start_ms=0,
|
||||
end_ms=10,
|
||||
duration_ms=10,
|
||||
) is False
|
||||
147
engine/tests/test_history_bridge.py
Normal file
147
engine/tests/test_history_bridge.py
Normal file
@@ -0,0 +1,147 @@
|
||||
import asyncio
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
from core.history_bridge import SessionHistoryBridge
|
||||
|
||||
|
||||
class _FakeHistoryWriter:
|
||||
def __init__(self, *, add_delay_s: float = 0.0, add_result: bool = True):
|
||||
self.add_delay_s = add_delay_s
|
||||
self.add_result = add_result
|
||||
self.created_call_ids = []
|
||||
self.transcripts = []
|
||||
self.finalize_calls = 0
|
||||
self.finalize_statuses = []
|
||||
self.finalize_at = None
|
||||
self.last_transcript_at = None
|
||||
|
||||
async def create_call_record(self, *, user_id: int, assistant_id: str | None, source: str = "debug"):
|
||||
_ = (user_id, assistant_id, source)
|
||||
call_id = "call_test_1"
|
||||
self.created_call_ids.append(call_id)
|
||||
return call_id
|
||||
|
||||
async def add_transcript(
|
||||
self,
|
||||
*,
|
||||
call_id: str,
|
||||
turn_index: int,
|
||||
speaker: str,
|
||||
content: str,
|
||||
start_ms: int,
|
||||
end_ms: int,
|
||||
confidence: float | None = None,
|
||||
duration_ms: int | None = None,
|
||||
) -> bool:
|
||||
_ = confidence
|
||||
if self.add_delay_s > 0:
|
||||
await asyncio.sleep(self.add_delay_s)
|
||||
self.transcripts.append(
|
||||
{
|
||||
"call_id": call_id,
|
||||
"turn_index": turn_index,
|
||||
"speaker": speaker,
|
||||
"content": content,
|
||||
"start_ms": start_ms,
|
||||
"end_ms": end_ms,
|
||||
"duration_ms": duration_ms,
|
||||
}
|
||||
)
|
||||
self.last_transcript_at = time.monotonic()
|
||||
return self.add_result
|
||||
|
||||
async def finalize_call_record(self, *, call_id: str, status: str, duration_seconds: int) -> bool:
|
||||
_ = (call_id, duration_seconds)
|
||||
self.finalize_calls += 1
|
||||
self.finalize_statuses.append(status)
|
||||
self.finalize_at = time.monotonic()
|
||||
return True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_slow_backend_does_not_block_enqueue():
|
||||
writer = _FakeHistoryWriter(add_delay_s=0.15, add_result=True)
|
||||
bridge = SessionHistoryBridge(
|
||||
history_writer=writer,
|
||||
enabled=True,
|
||||
queue_max_size=32,
|
||||
retry_max_attempts=0,
|
||||
retry_backoff_sec=0.01,
|
||||
finalize_drain_timeout_sec=1.0,
|
||||
)
|
||||
|
||||
try:
|
||||
call_id = await bridge.start_call(user_id=1, assistant_id="assistant_1", source="debug")
|
||||
assert call_id == "call_test_1"
|
||||
|
||||
t0 = time.perf_counter()
|
||||
queued = bridge.enqueue_turn(role="user", text="hello world")
|
||||
elapsed_s = time.perf_counter() - t0
|
||||
|
||||
assert queued is True
|
||||
assert elapsed_s < 0.02
|
||||
|
||||
await bridge.finalize(status="connected")
|
||||
assert len(writer.transcripts) == 1
|
||||
assert writer.finalize_calls == 1
|
||||
finally:
|
||||
await bridge.shutdown()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_failing_backend_retries_but_enqueue_remains_non_blocking():
|
||||
writer = _FakeHistoryWriter(add_delay_s=0.01, add_result=False)
|
||||
bridge = SessionHistoryBridge(
|
||||
history_writer=writer,
|
||||
enabled=True,
|
||||
queue_max_size=32,
|
||||
retry_max_attempts=2,
|
||||
retry_backoff_sec=0.01,
|
||||
finalize_drain_timeout_sec=0.5,
|
||||
)
|
||||
|
||||
try:
|
||||
await bridge.start_call(user_id=1, assistant_id="assistant_1", source="debug")
|
||||
t0 = time.perf_counter()
|
||||
assert bridge.enqueue_turn(role="assistant", text="retry me")
|
||||
elapsed_s = time.perf_counter() - t0
|
||||
assert elapsed_s < 0.02
|
||||
|
||||
await bridge.finalize(status="connected")
|
||||
|
||||
# Initial try + 2 retries
|
||||
assert len(writer.transcripts) == 3
|
||||
assert writer.finalize_calls == 1
|
||||
finally:
|
||||
await bridge.shutdown()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_finalize_is_idempotent_and_waits_for_queue_drain():
|
||||
writer = _FakeHistoryWriter(add_delay_s=0.05, add_result=True)
|
||||
bridge = SessionHistoryBridge(
|
||||
history_writer=writer,
|
||||
enabled=True,
|
||||
queue_max_size=32,
|
||||
retry_max_attempts=0,
|
||||
retry_backoff_sec=0.01,
|
||||
finalize_drain_timeout_sec=1.0,
|
||||
)
|
||||
|
||||
try:
|
||||
await bridge.start_call(user_id=1, assistant_id="assistant_1", source="debug")
|
||||
assert bridge.enqueue_turn(role="user", text="first")
|
||||
|
||||
ok_1 = await bridge.finalize(status="connected")
|
||||
ok_2 = await bridge.finalize(status="connected")
|
||||
|
||||
assert ok_1 is True
|
||||
assert ok_2 is True
|
||||
assert len(writer.transcripts) == 1
|
||||
assert writer.finalize_calls == 1
|
||||
assert writer.last_transcript_at is not None
|
||||
assert writer.finalize_at is not None
|
||||
assert writer.finalize_at >= writer.last_transcript_at
|
||||
finally:
|
||||
await bridge.shutdown()
|
||||
@@ -92,16 +92,44 @@ def _build_pipeline(monkeypatch, llm_rounds: List[List[LLMStreamEvent]]) -> tupl
|
||||
return pipeline, events
|
||||
|
||||
|
||||
def test_pipeline_uses_default_tools_from_settings(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"core.duplex_pipeline.settings.tools",
|
||||
[
|
||||
"current_time",
|
||||
{
|
||||
"name": "weather",
|
||||
"description": "Get weather by city",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"city": {"type": "string"}},
|
||||
"required": ["city"],
|
||||
},
|
||||
"executor": "server",
|
||||
},
|
||||
],
|
||||
)
|
||||
pipeline, _events = _build_pipeline(monkeypatch, [[LLMStreamEvent(type="done")]])
|
||||
|
||||
cfg = pipeline.resolved_runtime_config()
|
||||
assert cfg["tools"]["allowlist"] == ["current_time", "weather"]
|
||||
|
||||
schemas = pipeline._resolved_tool_schemas()
|
||||
names = [s.get("function", {}).get("name") for s in schemas if isinstance(s, dict)]
|
||||
assert "current_time" in names
|
||||
assert "weather" in names
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ws_message_parses_tool_call_results():
|
||||
msg = parse_client_message(
|
||||
{
|
||||
"type": "tool_call.results",
|
||||
"results": [{"tool_call_id": "call_1", "status": {"code": 200, "message": "ok"}}],
|
||||
"results": [{"tool_call_id": "call_1", "name": "weather", "status": {"code": 200, "message": "ok"}}],
|
||||
}
|
||||
)
|
||||
assert isinstance(msg, ToolCallResultsMessage)
|
||||
assert msg.results[0]["tool_call_id"] == "call_1"
|
||||
assert msg.results[0].tool_call_id == "call_1"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
Reference in New Issue
Block a user