Add fastgpt as seperate assistant mode

This commit is contained in:
Xin Wang
2026-03-11 08:37:34 +08:00
parent 13684d498b
commit f3612a710d
26 changed files with 2333 additions and 210 deletions

View File

@@ -14,7 +14,13 @@ from runtime.ports.control_plane import (
KnowledgeRetriever,
ToolCatalog,
)
from runtime.ports.llm import LLMCancellable, LLMPort, LLMRuntimeConfigurable, LLMServiceSpec
from runtime.ports.llm import (
LLMCancellable,
LLMClientToolResumable,
LLMPort,
LLMRuntimeConfigurable,
LLMServiceSpec,
)
from runtime.ports.service_factory import RealtimeServiceFactory
from runtime.ports.tts import TTSPort, TTSServiceSpec
@@ -30,6 +36,7 @@ __all__ = [
"KnowledgeRetriever",
"ToolCatalog",
"LLMCancellable",
"LLMClientToolResumable",
"LLMPort",
"LLMRuntimeConfigurable",
"LLMServiceSpec",

View File

@@ -18,6 +18,7 @@ class LLMServiceSpec:
model: str
api_key: Optional[str] = None
base_url: Optional[str] = None
app_id: Optional[str] = None
system_prompt: Optional[str] = None
temperature: float = 0.7
knowledge_config: Dict[str, Any] = field(default_factory=dict)
@@ -65,3 +66,17 @@ class LLMRuntimeConfigurable(Protocol):
def set_tool_schemas(self, schemas: Optional[List[Dict[str, Any]]]) -> None:
"""Apply runtime tool schemas used for tool calling."""
class LLMClientToolResumable(Protocol):
"""Optional extension for providers that pause on client-side tool results."""
def handles_client_tool(self, tool_name: str) -> bool:
"""Return True when the provider owns the lifecycle of this client tool."""
def resume_after_client_tool_result(
self,
tool_call_id: str,
result: Dict[str, Any],
) -> AsyncIterator[LLMStreamEvent]:
"""Resume the provider stream after a correlated client-side tool result."""