Add manual opener tool calls to Assistant model and API
- Introduced `manual_opener_tool_calls` field in the Assistant model to support custom tool calls. - Updated AssistantBase and AssistantUpdate schemas to include the new field. - Implemented normalization and migration logic for handling manual opener tool calls in the API. - Enhanced runtime metadata to include manual opener tool calls in responses. - Updated tests to validate the new functionality and ensure proper handling of tool calls. - Refactored tool ID normalization to support legacy tool names for backward compatibility.
This commit is contained in:
@@ -282,6 +282,74 @@ async def test_generated_opener_uses_tool_capable_turn_when_tools_available(monk
|
||||
assert called.get("user_text") == ""
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_manual_opener_tool_calls_emit_assistant_tool_call(monkeypatch):
|
||||
pipeline, events = _build_pipeline(monkeypatch, [[LLMStreamEvent(type="done")]])
|
||||
pipeline.apply_runtime_overrides(
|
||||
{
|
||||
"generatedOpenerEnabled": False,
|
||||
"greeting": "你好,欢迎来电",
|
||||
"output": {"mode": "text"},
|
||||
"tools": [
|
||||
{
|
||||
"type": "function",
|
||||
"executor": "client",
|
||||
"waitForResponse": False,
|
||||
"function": {
|
||||
"name": "text_msg_prompt",
|
||||
"description": "Show prompt dialog",
|
||||
"parameters": {"type": "object", "properties": {"msg": {"type": "string"}}},
|
||||
},
|
||||
}
|
||||
],
|
||||
"manualOpenerToolCalls": [
|
||||
{"toolName": "text_msg_prompt", "arguments": {"msg": "请先选择业务类型"}}
|
||||
],
|
||||
}
|
||||
)
|
||||
|
||||
await pipeline.emit_initial_greeting()
|
||||
|
||||
tool_events = [event for event in events if event.get("type") == "assistant.tool_call"]
|
||||
assert len(tool_events) == 1
|
||||
assert tool_events[0].get("tool_name") == "text_msg_prompt"
|
||||
assert tool_events[0].get("arguments") == {"msg": "请先选择业务类型"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_manual_opener_legacy_voice_message_prompt_is_normalized(monkeypatch):
|
||||
pipeline, events = _build_pipeline(monkeypatch, [[LLMStreamEvent(type="done")]])
|
||||
pipeline.apply_runtime_overrides(
|
||||
{
|
||||
"generatedOpenerEnabled": False,
|
||||
"greeting": "",
|
||||
"output": {"mode": "text"},
|
||||
"tools": [
|
||||
{
|
||||
"type": "function",
|
||||
"executor": "client",
|
||||
"waitForResponse": False,
|
||||
"function": {
|
||||
"name": "voice_message_prompt",
|
||||
"description": "Speak prompt",
|
||||
"parameters": {"type": "object", "properties": {"msg": {"type": "string"}}},
|
||||
},
|
||||
}
|
||||
],
|
||||
"manualOpenerToolCalls": [
|
||||
{"toolName": "voice_message_prompt", "arguments": {"msg": "您好"}}
|
||||
],
|
||||
}
|
||||
)
|
||||
|
||||
await pipeline.emit_initial_greeting()
|
||||
|
||||
tool_events = [event for event in events if event.get("type") == "assistant.tool_call"]
|
||||
assert len(tool_events) == 1
|
||||
assert tool_events[0].get("tool_name") == "voice_msg_prompt"
|
||||
assert tool_events[0].get("arguments") == {"msg": "您好"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ws_message_parses_tool_call_results():
|
||||
msg = parse_client_message(
|
||||
|
||||
Reference in New Issue
Block a user