feat: enhance chat CLI and TUI with initial opener handling and improved prompt logic

This commit is contained in:
Xin Wang
2026-03-10 23:55:53 +08:00
parent ef2614a70a
commit a55ca37c39
5 changed files with 278 additions and 26 deletions

View File

@@ -32,6 +32,7 @@ load_dotenv(Path(__file__).with_name(".env"))
API_KEY = os.getenv("API_KEY") API_KEY = os.getenv("API_KEY")
BASE_URL = os.getenv("BASE_URL") BASE_URL = os.getenv("BASE_URL")
APP_ID = os.getenv("APP_ID")
for stream in (sys.stdout, sys.stderr): for stream in (sys.stdout, sys.stderr):
if hasattr(stream, "reconfigure"): if hasattr(stream, "reconfigure"):
@@ -144,16 +145,85 @@ def _resolve_option_token(token: str, options: List[Dict[str, str]]) -> Optional
return None return None
def _coerce_text(value: Any) -> str:
return str(value or "").strip()
def _first_nonempty_text(*values: Any) -> str:
for value in values:
text = _coerce_text(value)
if text:
return text
return ""
def _merge_prompt_parts(*values: Any) -> str:
parts: List[str] = []
seen = set()
for value in values:
text = _coerce_text(value)
if not text or text in seen:
continue
seen.add(text)
parts.append(text)
return "\n".join(parts)
def _interactive_prompt_text(payload: Dict[str, Any], default_text: str) -> str: def _interactive_prompt_text(payload: Dict[str, Any], default_text: str) -> str:
params = payload.get("params") if isinstance(payload.get("params"), dict) else {} params = payload.get("params") if isinstance(payload.get("params"), dict) else {}
return str( opener = _first_nonempty_text(
payload.get("prompt") payload.get("opener"),
or payload.get("title") params.get("opener"),
or payload.get("text") payload.get("intro"),
or payload.get("description") params.get("intro"),
or params.get("description") )
or default_text prompt = _first_nonempty_text(
).strip() payload.get("prompt"),
params.get("prompt"),
payload.get("text"),
params.get("text"),
)
title = _first_nonempty_text(
payload.get("title"),
params.get("title"),
payload.get("nodeName"),
payload.get("label"),
)
description = _first_nonempty_text(
payload.get("description"),
payload.get("desc"),
params.get("description"),
params.get("desc"),
)
return _merge_prompt_parts(opener, prompt) or title or description or default_text
def _extract_chat_init_opener(payload: Any) -> str:
if not isinstance(payload, dict):
return ""
data = payload.get("data") if isinstance(payload.get("data"), dict) else payload
app = data.get("app") if isinstance(data.get("app"), dict) else {}
chat_config = app.get("chatConfig") if isinstance(app.get("chatConfig"), dict) else {}
return _first_nonempty_text(
chat_config.get("welcomeText"),
app.get("welcomeText"),
data.get("welcomeText"),
data.get("opener"),
app.get("opener"),
data.get("intro"),
app.get("intro"),
)
def _get_initial_app_opener(client: ChatClient, chat_id: str) -> str:
if not APP_ID:
return ""
response = client.get_chat_init(appId=APP_ID, chatId=chat_id)
response.raise_for_status()
return _extract_chat_init_opener(response.json())
def _prompt_user_select(event: FastGPTInteractiveEvent) -> Optional[str]: def _prompt_user_select(event: FastGPTInteractiveEvent) -> Optional[str]:
@@ -165,7 +235,8 @@ def _prompt_user_select(event: FastGPTInteractiveEvent) -> Optional[str]:
options = [item for index, raw in enumerate(raw_options, start=1) if (item := _normalize_option(raw, index))] options = [item for index, raw in enumerate(raw_options, start=1) if (item := _normalize_option(raw, index))]
print() print()
print(f"[INTERACTIVE] {prompt_text}") print("[INTERACTIVE]")
print(prompt_text)
for index, option in enumerate(options, start=1): for index, option in enumerate(options, start=1):
print(f" {index}. {option['label']}") print(f" {index}. {option['label']}")
if option["description"]: if option["description"]:
@@ -212,7 +283,8 @@ def _prompt_user_input(event: FastGPTInteractiveEvent) -> Optional[str]:
form_fields = params.get("inputForm") if isinstance(params.get("inputForm"), list) else [] form_fields = params.get("inputForm") if isinstance(params.get("inputForm"), list) else []
print() print()
print(f"[INTERACTIVE] {prompt_text}") print("[INTERACTIVE]")
print(prompt_text)
if not form_fields: if not form_fields:
value = input("Input (/cancel to stop): ").strip() value = input("Input (/cancel to stop): ").strip()
@@ -384,6 +456,15 @@ def main() -> None:
print(f"Using chatId: {chat_id}\n") print(f"Using chatId: {chat_id}\n")
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client: with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
try:
opener = _get_initial_app_opener(client, chat_id)
except Exception as exc:
print(f"[INIT] Failed to load app opener: {exc}\n")
opener = ""
if opener:
print(f"Assistant: {opener}\n")
while True: while True:
try: try:
user_input = input("You: ").strip() user_input = input("You: ").strip()

View File

@@ -32,8 +32,10 @@ if str(EXAMPLES_DIR) not in sys.path:
sys.path.insert(0, str(EXAMPLES_DIR)) sys.path.insert(0, str(EXAMPLES_DIR))
from chat_cli import ( from chat_cli import (
APP_ID,
API_KEY, API_KEY,
BASE_URL, BASE_URL,
_extract_chat_init_opener,
_extract_text_from_event, _extract_text_from_event,
_interactive_prompt_text, _interactive_prompt_text,
_normalize_option, _normalize_option,
@@ -451,10 +453,11 @@ class FastGPTWorkbench(App[None]):
raise RuntimeError("Set API_KEY and BASE_URL in examples/.env before starting chat_tui.py") raise RuntimeError("Set API_KEY and BASE_URL in examples/.env before starting chat_tui.py")
self._refresh_sidebar() self._refresh_sidebar()
self._set_status("Ready", "Fresh session") self._set_status("Ready", "Fresh session")
initial_message = self._initial_session_message()
self._append_message( self._append_message(
role="system", role="system",
title="Session", title="Session",
content="Start typing below. FastGPT workflow events will appear in the left rail.", content=initial_message,
) )
self.query_one("#composer", TextArea).focus() self.query_one("#composer", TextArea).focus()
@@ -464,8 +467,9 @@ class FastGPTWorkbench(App[None]):
def _refresh_sidebar(self) -> None: def _refresh_sidebar(self) -> None:
session_panel = self.query_one("#session_panel", Static) session_panel = self.query_one("#session_panel", Static)
base_url = BASE_URL or "" base_url = BASE_URL or ""
app_id = APP_ID or "(not set)"
session_panel.update( session_panel.update(
f"Session\n\nchatId: {self.chat_id}\nbaseUrl: {base_url}" f"Session\n\nchatId: {self.chat_id}\nappId: {app_id}\nbaseUrl: {base_url}"
) )
def _set_status(self, heading: str, detail: str) -> None: def _set_status(self, heading: str, detail: str) -> None:
@@ -481,6 +485,24 @@ class FastGPTWorkbench(App[None]):
except Exception: except Exception:
return content return content
def _default_session_message(self) -> str:
return "Start typing below. FastGPT workflow events will appear in the left rail."
def _initial_session_message(self) -> str:
if not APP_ID:
return self._default_session_message()
try:
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
response = client.get_chat_init(appId=APP_ID, chatId=self.chat_id)
response.raise_for_status()
opener = _extract_chat_init_opener(response.json())
except Exception as exc:
self._log_event(f"[init] Failed to load app opener: {exc}")
return self._default_session_message()
return opener or self._default_session_message()
def _append_message(self, role: str, title: str, content: str) -> str: def _append_message(self, role: str, title: str, content: str) -> str:
self._message_counter += 1 self._message_counter += 1
widget_id = f"message-{self._message_counter}" widget_id = f"message-{self._message_counter}"
@@ -546,7 +568,8 @@ class FastGPTWorkbench(App[None]):
self._start_turn(result, title="Workflow Input", role="workflow") self._start_turn(result, title="Workflow Input", role="workflow")
def _present_interactive(self, event: FastGPTInteractiveEvent) -> None: def _present_interactive(self, event: FastGPTInteractiveEvent) -> None:
self._log_event(f"[interactive] {event.interaction_type}") prompt_summary = _interactive_prompt_text(event.data, event.interaction_type).replace("\n", " / ")
self._log_event(f"[interactive] {event.interaction_type}: {prompt_summary}")
if event.interaction_type == "userInput": if event.interaction_type == "userInput":
self.push_screen(InteractiveInputScreen(event), self._handle_interactive_result) self.push_screen(InteractiveInputScreen(event), self._handle_interactive_result)
return return
@@ -569,10 +592,11 @@ class FastGPTWorkbench(App[None]):
self.query_one("#messages", VerticalScroll).remove_children() self.query_one("#messages", VerticalScroll).remove_children()
self._refresh_sidebar() self._refresh_sidebar()
self._set_status("Ready", "Started a new random session") self._set_status("Ready", "Started a new random session")
initial_message = self._initial_session_message()
self._append_message( self._append_message(
role="system", role="system",
title="Session", title="Session",
content="New chat created. Start typing below.", content=initial_message,
) )
self._log_event(f"[local] Started new chatId {self.chat_id}") self._log_event(f"[local] Started new chatId {self.chat_id}")

View File

@@ -127,8 +127,9 @@ class AsyncFastGPTClient(BaseClientMixin):
response._stream_context = stream_context response._stream_context = stream_context
response._stream_context_closed = False response._stream_context_closed = False
# Override close() to also close the stream context # Preserve the native async response closer and make both
original_close = response.close # `await response.close()` and `await response.aclose()` safe.
original_aclose = response.aclose
async def close_with_context(): async def close_with_context():
"""Close both the response and the stream context.""" """Close both the response and the stream context."""
@@ -136,10 +137,10 @@ class AsyncFastGPTClient(BaseClientMixin):
return return
try: try:
# Close the response first # Async streaming responses must use `aclose()`.
await original_close() await original_aclose()
finally: finally:
# Always close the stream context, even if response.close() fails # Always close the stream context, even if response cleanup fails
if hasattr(response, '_stream_context') and response._stream_context is not None: if hasattr(response, '_stream_context') and response._stream_context is not None:
try: try:
await response._stream_context.__aexit__(None, None, None) await response._stream_context.__aexit__(None, None, None)
@@ -150,6 +151,7 @@ class AsyncFastGPTClient(BaseClientMixin):
response._stream_context_closed = True response._stream_context_closed = True
response.close = close_with_context response.close = close_with_context
response.aclose = close_with_context
# Safety net: ensure cleanup on garbage collection # Safety net: ensure cleanup on garbage collection
def cleanup_stream_context(stream_ctx_ref): def cleanup_stream_context(stream_ctx_ref):

View File

@@ -162,13 +162,18 @@ class TestAsyncFastGPTClientSendRequest:
mock_response = Mock(spec=httpx.Response) mock_response = Mock(spec=httpx.Response)
mock_response.status_code = 200 mock_response.status_code = 200
# Track if close was called on the response original_sync_close_called = []
original_close_called = [] original_async_close_called = []
async def original_close(): def original_close():
original_close_called.append(True) original_sync_close_called.append(True)
raise AssertionError("sync close should not be used for async streaming responses")
mock_response.close = original_close async def original_aclose():
original_async_close_called.append(True)
mock_response.close = Mock(side_effect=original_close)
mock_response.aclose = AsyncMock(side_effect=original_aclose)
mock_stream_context = AsyncContextManagerMock(mock_response) mock_stream_context = AsyncContextManagerMock(mock_response)
@@ -182,8 +187,9 @@ class TestAsyncFastGPTClientSendRequest:
# Verify stream context exit was called # Verify stream context exit was called
mock_stream_context.__aexit__.assert_called_once_with(None, None, None) mock_stream_context.__aexit__.assert_called_once_with(None, None, None)
# Verify the original close was called # Verify async cleanup path was used instead of sync close()
assert len(original_close_called) == 1 assert len(original_sync_close_called) == 0
assert len(original_async_close_called) == 1
await client.close() await client.close()
@pytest.mark.asyncio @pytest.mark.asyncio

139
tests/test_chat_examples.py Normal file
View File

@@ -0,0 +1,139 @@
"""Regression tests for the interactive example helpers."""
from __future__ import annotations
import importlib.util
import sys
import types
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parents[1]
CHAT_CLI_PATH = REPO_ROOT / "examples" / "chat_cli.py"
def _load_chat_cli_module():
module_name = "_test_chat_cli"
existing = sys.modules.get(module_name)
if existing is not None:
return existing
dotenv_module = sys.modules.get("dotenv")
if dotenv_module is None:
dotenv_module = types.ModuleType("dotenv")
dotenv_module.load_dotenv = lambda *args, **kwargs: None
sys.modules["dotenv"] = dotenv_module
original_fastgpt_client = sys.modules.get("fastgpt_client")
stub_fastgpt_client = types.ModuleType("fastgpt_client")
stub_fastgpt_client.ChatClient = object
stub_fastgpt_client.FastGPTInteractiveEvent = object
stub_fastgpt_client.iter_stream_events = lambda response: iter(())
sys.modules["fastgpt_client"] = stub_fastgpt_client
spec = importlib.util.spec_from_file_location(module_name, CHAT_CLI_PATH)
assert spec is not None and spec.loader is not None
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
try:
spec.loader.exec_module(module)
finally:
if original_fastgpt_client is None:
sys.modules.pop("fastgpt_client", None)
else:
sys.modules["fastgpt_client"] = original_fastgpt_client
return module
def test_interactive_prompt_text_uses_opener_when_prompt_is_missing():
chat_cli = _load_chat_cli_module()
prompt_text = chat_cli._interactive_prompt_text(
{
"params": {
"opener": "Please tell me about your business.",
"inputForm": [{"label": "Business type"}],
}
},
"Please provide the requested input",
)
assert prompt_text == "Please tell me about your business."
def test_interactive_prompt_text_keeps_opener_and_prompt():
chat_cli = _load_chat_cli_module()
prompt_text = chat_cli._interactive_prompt_text(
{
"opener": "A few details will help me tailor the answer.",
"prompt": "Which plan are you evaluating?",
},
"Please select an option",
)
assert prompt_text == "A few details will help me tailor the answer.\nWhich plan are you evaluating?"
def test_extract_chat_init_opener_prefers_welcome_text():
chat_cli = _load_chat_cli_module()
opener = chat_cli._extract_chat_init_opener(
{
"data": {
"app": {
"chatConfig": {"welcomeText": "Welcome from chat config."},
"intro": "Fallback intro.",
}
}
}
)
assert opener == "Welcome from chat config."
def test_extract_chat_init_opener_falls_back_to_intro():
chat_cli = _load_chat_cli_module()
opener = chat_cli._extract_chat_init_opener(
{
"data": {
"app": {
"intro": "Tell me what you're working on.",
}
}
}
)
assert opener == "Tell me what you're working on."
def test_get_initial_app_opener_uses_chat_init():
chat_cli = _load_chat_cli_module()
original_app_id = chat_cli.APP_ID
chat_cli.APP_ID = "app-123"
class _Response:
def raise_for_status(self):
return None
def json(self):
return {"data": {"app": {"chatConfig": {"welcomeText": "Hello from init."}}}}
class _Client:
def __init__(self):
self.calls = []
def get_chat_init(self, **kwargs):
self.calls.append(kwargs)
return _Response()
client = _Client()
try:
opener = chat_cli._get_initial_app_opener(client, "chat-123")
finally:
chat_cli.APP_ID = original_app_id
assert opener == "Hello from init."
assert client.calls == [{"appId": "app-123", "chatId": "chat-123"}]