412 lines
15 KiB
Python
412 lines
15 KiB
Python
import json
|
|
from types import SimpleNamespace
|
|
from typing import Any, Dict, List
|
|
|
|
import pytest
|
|
|
|
from providers.common.base import LLMMessage
|
|
from providers.llm.fastgpt import FastGPTLLMService
|
|
|
|
|
|
class _FakeResponse:
|
|
def __init__(self, events: List[Any]):
|
|
self.events = events
|
|
self.closed = False
|
|
|
|
async def close(self) -> None:
|
|
self.closed = True
|
|
|
|
|
|
class _FakeJSONResponse:
|
|
def __init__(self, payload: Dict[str, Any], status_code: int = 200):
|
|
self._payload = payload
|
|
self.status_code = status_code
|
|
|
|
def json(self) -> Dict[str, Any]:
|
|
return dict(self._payload)
|
|
|
|
def raise_for_status(self) -> None:
|
|
if self.status_code >= 400:
|
|
raise RuntimeError(f"HTTP {self.status_code}")
|
|
|
|
|
|
class _FakeAsyncStreamResponse(_FakeResponse):
|
|
def __init__(self, events: List[Any]):
|
|
super().__init__(events)
|
|
self.aclosed = False
|
|
|
|
def close(self) -> None:
|
|
raise AssertionError("sync close should not be used for async stream responses")
|
|
|
|
async def aclose(self) -> None:
|
|
self.aclosed = True
|
|
|
|
|
|
class _FakeAsyncChatClient:
|
|
responses: List[_FakeResponse] = []
|
|
init_payload: Dict[str, Any] | None = None
|
|
|
|
def __init__(self, api_key: str, base_url: str):
|
|
self.api_key = api_key
|
|
self.base_url = base_url
|
|
self.requests: List[Dict[str, Any]] = []
|
|
self.init_requests: List[Dict[str, Any]] = []
|
|
|
|
async def create_chat_completion(self, **kwargs):
|
|
self.requests.append(dict(kwargs))
|
|
if not self.responses:
|
|
raise AssertionError("No fake FastGPT response queued")
|
|
return self.responses.pop(0)
|
|
|
|
async def get_chat_init(self, **kwargs):
|
|
self.init_requests.append(dict(kwargs))
|
|
return _FakeJSONResponse(
|
|
self.init_payload or {"data": {"app": {"chatConfig": {"welcomeText": ""}}}},
|
|
)
|
|
|
|
async def close(self) -> None:
|
|
return None
|
|
|
|
|
|
async def _fake_aiter_stream_events(response: _FakeResponse):
|
|
for event in response.events:
|
|
yield event
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fastgpt_provider_streams_text_from_data_event(monkeypatch):
|
|
monkeypatch.setattr("providers.llm.fastgpt.AsyncChatClient", _FakeAsyncChatClient)
|
|
monkeypatch.setattr("providers.llm.fastgpt.aiter_stream_events", _fake_aiter_stream_events)
|
|
|
|
_FakeAsyncChatClient.responses = [
|
|
_FakeResponse(
|
|
[
|
|
SimpleNamespace(
|
|
kind="data",
|
|
data={"choices": [{"delta": {"content": "Hello from FastGPT."}}]},
|
|
),
|
|
SimpleNamespace(kind="done", data={}),
|
|
]
|
|
)
|
|
]
|
|
|
|
service = FastGPTLLMService(api_key="key", base_url="https://fastgpt.example")
|
|
await service.connect()
|
|
|
|
events = [event async for event in service.generate_stream([LLMMessage(role="user", content="Hi")])]
|
|
|
|
assert [event.type for event in events] == ["text_delta", "done"]
|
|
assert events[0].text == "Hello from FastGPT."
|
|
assert service.client.requests[0]["messages"] == [{"role": "user", "content": "Hi"}]
|
|
assert service.client.requests[0]["chatId"] == service._state.chat_id
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fastgpt_provider_streams_text_from_answer_delta_event(monkeypatch):
|
|
monkeypatch.setattr("providers.llm.fastgpt.AsyncChatClient", _FakeAsyncChatClient)
|
|
monkeypatch.setattr("providers.llm.fastgpt.aiter_stream_events", _fake_aiter_stream_events)
|
|
|
|
_FakeAsyncChatClient.responses = [
|
|
_FakeResponse(
|
|
[
|
|
SimpleNamespace(
|
|
kind="answer",
|
|
data={"choices": [{"delta": {"content": "Hello from answer delta."}}]},
|
|
),
|
|
SimpleNamespace(kind="done", data={}),
|
|
]
|
|
)
|
|
]
|
|
|
|
service = FastGPTLLMService(api_key="key", base_url="https://fastgpt.example")
|
|
await service.connect()
|
|
|
|
events = [event async for event in service.generate_stream([LLMMessage(role="user", content="Hi")])]
|
|
|
|
assert [event.type for event in events] == ["text_delta", "done"]
|
|
assert events[0].text == "Hello from answer delta."
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fastgpt_provider_uses_async_close_for_stream_responses(monkeypatch):
|
|
monkeypatch.setattr("providers.llm.fastgpt.AsyncChatClient", _FakeAsyncChatClient)
|
|
monkeypatch.setattr("providers.llm.fastgpt.aiter_stream_events", _fake_aiter_stream_events)
|
|
|
|
response = _FakeAsyncStreamResponse(
|
|
[
|
|
SimpleNamespace(
|
|
kind="data",
|
|
data={"choices": [{"delta": {"content": "Hello from FastGPT."}}]},
|
|
),
|
|
SimpleNamespace(kind="done", data={}),
|
|
]
|
|
)
|
|
_FakeAsyncChatClient.responses = [response]
|
|
|
|
service = FastGPTLLMService(api_key="key", base_url="https://fastgpt.example")
|
|
await service.connect()
|
|
|
|
events = [event async for event in service.generate_stream([LLMMessage(role="user", content="Hi")])]
|
|
|
|
assert [event.type for event in events] == ["text_delta", "done"]
|
|
assert response.aclosed is True
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fastgpt_provider_loads_initial_greeting_from_chat_init(monkeypatch):
|
|
monkeypatch.setattr("providers.llm.fastgpt.AsyncChatClient", _FakeAsyncChatClient)
|
|
monkeypatch.setattr("providers.llm.fastgpt.aiter_stream_events", _fake_aiter_stream_events)
|
|
|
|
_FakeAsyncChatClient.init_payload = {
|
|
"data": {
|
|
"app": {
|
|
"chatConfig": {
|
|
"welcomeText": "Hello from FastGPT init.",
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
service = FastGPTLLMService(
|
|
api_key="key",
|
|
base_url="https://fastgpt.example",
|
|
app_id="app-123",
|
|
)
|
|
await service.connect()
|
|
|
|
greeting = await service.get_initial_greeting()
|
|
|
|
assert greeting == "Hello from FastGPT init."
|
|
assert service.client.init_requests[0] == {
|
|
"appId": "app-123",
|
|
"chatId": service._state.chat_id,
|
|
}
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fastgpt_provider_maps_interactive_event_to_client_tool(monkeypatch):
|
|
monkeypatch.setattr("providers.llm.fastgpt.AsyncChatClient", _FakeAsyncChatClient)
|
|
monkeypatch.setattr("providers.llm.fastgpt.aiter_stream_events", _fake_aiter_stream_events)
|
|
|
|
_FakeAsyncChatClient.responses = [
|
|
_FakeResponse(
|
|
[
|
|
SimpleNamespace(
|
|
kind="interactive",
|
|
data={
|
|
"type": "userSelect",
|
|
"title": "Choose a plan",
|
|
"params": {
|
|
"description": "Pick the best plan for your team.",
|
|
"userSelectOptions": [
|
|
{"id": "basic", "label": "Basic", "value": "basic", "desc": "Starter tier"},
|
|
{"id": "pro", "label": "Pro", "value": "pro", "description": "Advanced tier"},
|
|
]
|
|
},
|
|
},
|
|
)
|
|
]
|
|
)
|
|
]
|
|
|
|
service = FastGPTLLMService(api_key="key", base_url="https://fastgpt.example")
|
|
await service.connect()
|
|
|
|
events = [event async for event in service.generate_stream([LLMMessage(role="user", content="Start")])]
|
|
|
|
assert len(events) == 1
|
|
assert events[0].type == "tool_call"
|
|
tool_call = events[0].tool_call
|
|
assert tool_call["executor"] == "client"
|
|
assert tool_call["wait_for_response"] is True
|
|
assert tool_call["timeout_ms"] == 300000
|
|
assert tool_call["function"]["name"] == "fastgpt.interactive"
|
|
|
|
arguments = json.loads(tool_call["function"]["arguments"])
|
|
assert arguments["provider"] == "fastgpt"
|
|
assert arguments["version"] == "fastgpt_interactive_v1"
|
|
assert arguments["interaction"]["type"] == "userSelect"
|
|
assert arguments["interaction"]["description"] == "Pick the best plan for your team."
|
|
assert arguments["interaction"]["options"][0]["description"] == "Starter tier"
|
|
assert arguments["interaction"]["options"][1]["value"] == "pro"
|
|
assert arguments["interaction"]["options"][1]["description"] == "Advanced tier"
|
|
assert arguments["context"]["chat_id"] == service._state.chat_id
|
|
assert service._state.pending_interaction is not None
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fastgpt_provider_unwraps_nested_tool_children_interactive(monkeypatch):
|
|
monkeypatch.setattr("providers.llm.fastgpt.AsyncChatClient", _FakeAsyncChatClient)
|
|
monkeypatch.setattr("providers.llm.fastgpt.aiter_stream_events", _fake_aiter_stream_events)
|
|
|
|
_FakeAsyncChatClient.responses = [
|
|
_FakeResponse(
|
|
[
|
|
SimpleNamespace(
|
|
kind="interactive",
|
|
data={
|
|
"interactive": {
|
|
"type": "toolChildrenInteractive",
|
|
"params": {
|
|
"childrenResponse": {
|
|
"type": "userSelect",
|
|
"params": {
|
|
"description": "Please choose a workflow branch.",
|
|
"userSelectOptions": [
|
|
{"value": "A", "description": "Branch A"},
|
|
{"value": "B", "description": "Branch B"},
|
|
],
|
|
},
|
|
}
|
|
},
|
|
}
|
|
},
|
|
)
|
|
]
|
|
)
|
|
]
|
|
|
|
service = FastGPTLLMService(api_key="key", base_url="https://fastgpt.example")
|
|
await service.connect()
|
|
|
|
events = [event async for event in service.generate_stream([LLMMessage(role="user", content="Start")])]
|
|
|
|
assert len(events) == 1
|
|
arguments = json.loads(events[0].tool_call["function"]["arguments"])
|
|
assert arguments["interaction"]["type"] == "userSelect"
|
|
assert arguments["interaction"]["description"] == "Please choose a workflow branch."
|
|
assert arguments["interaction"]["options"][0]["description"] == "Branch A"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fastgpt_provider_uses_opener_for_interactive_prompt_when_prompt_missing(monkeypatch):
|
|
monkeypatch.setattr("providers.llm.fastgpt.AsyncChatClient", _FakeAsyncChatClient)
|
|
monkeypatch.setattr("providers.llm.fastgpt.aiter_stream_events", _fake_aiter_stream_events)
|
|
|
|
_FakeAsyncChatClient.responses = [
|
|
_FakeResponse(
|
|
[
|
|
SimpleNamespace(
|
|
kind="interactive",
|
|
data={
|
|
"type": "userSelect",
|
|
"opener": "请确认您是否满意本次服务。",
|
|
"params": {
|
|
"userSelectOptions": [
|
|
{"value": "是"},
|
|
{"value": "否"},
|
|
]
|
|
},
|
|
},
|
|
)
|
|
]
|
|
)
|
|
]
|
|
|
|
service = FastGPTLLMService(api_key="key", base_url="https://fastgpt.example")
|
|
await service.connect()
|
|
|
|
events = [event async for event in service.generate_stream([LLMMessage(role="user", content="Start")])]
|
|
|
|
assert len(events) == 1
|
|
tool_call = events[0].tool_call
|
|
arguments = json.loads(tool_call["function"]["arguments"])
|
|
assert tool_call["display_name"] == "请确认您是否满意本次服务。"
|
|
assert arguments["interaction"]["prompt"] == "请确认您是否满意本次服务。"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fastgpt_provider_resumes_same_chat_after_client_result(monkeypatch):
|
|
monkeypatch.setattr("providers.llm.fastgpt.AsyncChatClient", _FakeAsyncChatClient)
|
|
monkeypatch.setattr("providers.llm.fastgpt.aiter_stream_events", _fake_aiter_stream_events)
|
|
|
|
_FakeAsyncChatClient.responses = [
|
|
_FakeResponse(
|
|
[
|
|
SimpleNamespace(
|
|
kind="interactive",
|
|
data={
|
|
"type": "userSelect",
|
|
"params": {"userSelectOptions": [{"label": "Pro", "value": "pro"}]},
|
|
},
|
|
)
|
|
]
|
|
),
|
|
_FakeResponse(
|
|
[
|
|
SimpleNamespace(kind="answer", data={"text": "Resumed answer."}),
|
|
SimpleNamespace(kind="done", data={}),
|
|
]
|
|
),
|
|
]
|
|
|
|
service = FastGPTLLMService(api_key="key", base_url="https://fastgpt.example")
|
|
await service.connect()
|
|
|
|
initial_events = [event async for event in service.generate_stream([LLMMessage(role="user", content="Start")])]
|
|
call_id = initial_events[0].tool_call["id"]
|
|
|
|
resumed_events = [
|
|
event
|
|
async for event in service.resume_after_client_tool_result(
|
|
call_id,
|
|
{
|
|
"tool_call_id": call_id,
|
|
"name": "fastgpt.interactive",
|
|
"output": {
|
|
"action": "submit",
|
|
"result": {"type": "userSelect", "selected": "pro"},
|
|
},
|
|
"status": {"code": 200, "message": "ok"},
|
|
},
|
|
)
|
|
]
|
|
|
|
assert [event.type for event in resumed_events] == ["text_delta", "done"]
|
|
assert resumed_events[0].text == "Resumed answer."
|
|
assert service.client.requests[1]["chatId"] == service.client.requests[0]["chatId"]
|
|
assert service.client.requests[1]["messages"] == [{"role": "user", "content": "pro"}]
|
|
assert service._state.pending_interaction is None
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fastgpt_provider_cancel_result_clears_pending_interaction(monkeypatch):
|
|
monkeypatch.setattr("providers.llm.fastgpt.AsyncChatClient", _FakeAsyncChatClient)
|
|
monkeypatch.setattr("providers.llm.fastgpt.aiter_stream_events", _fake_aiter_stream_events)
|
|
|
|
_FakeAsyncChatClient.responses = [
|
|
_FakeResponse(
|
|
[
|
|
SimpleNamespace(
|
|
kind="interactive",
|
|
data={
|
|
"type": "userInput",
|
|
"params": {"inputForm": [{"name": "name", "label": "Name"}]},
|
|
},
|
|
)
|
|
]
|
|
)
|
|
]
|
|
|
|
service = FastGPTLLMService(api_key="key", base_url="https://fastgpt.example")
|
|
await service.connect()
|
|
|
|
initial_events = [event async for event in service.generate_stream([LLMMessage(role="user", content="Start")])]
|
|
call_id = initial_events[0].tool_call["id"]
|
|
|
|
resumed_events = [
|
|
event
|
|
async for event in service.resume_after_client_tool_result(
|
|
call_id,
|
|
{
|
|
"tool_call_id": call_id,
|
|
"name": "fastgpt.interactive",
|
|
"output": {"action": "cancel", "result": {}},
|
|
"status": {"code": 499, "message": "user_cancelled"},
|
|
},
|
|
)
|
|
]
|
|
|
|
assert [event.type for event in resumed_events] == ["done"]
|
|
assert service._state.pending_interaction is None
|