api has llm response event
This commit is contained in:
@@ -115,7 +115,13 @@ class WavFileClient:
|
||||
"direction": direction,
|
||||
"message": message
|
||||
})
|
||||
print(f"{direction} {message}")
|
||||
# Handle encoding errors on Windows
|
||||
try:
|
||||
print(f"{direction} {message}")
|
||||
except UnicodeEncodeError:
|
||||
# Replace problematic characters for console output
|
||||
safe_message = message.encode('ascii', errors='replace').decode('ascii')
|
||||
print(f"{direction} {safe_message}")
|
||||
|
||||
async def connect(self) -> None:
|
||||
"""Connect to WebSocket server."""
|
||||
@@ -285,6 +291,14 @@ class WavFileClient:
|
||||
elif event_type == "ttfb":
|
||||
latency_ms = event.get("latencyMs", 0)
|
||||
self.log_event("←", f"[TTFB] Server latency: {latency_ms}ms")
|
||||
elif event_type == "llmResponse":
|
||||
text = event.get("text", "")
|
||||
is_final = event.get("isFinal", False)
|
||||
if is_final:
|
||||
self.log_event("←", f"LLM Response (final): {text[:100]}{'...' if len(text) > 100 else ''}")
|
||||
elif self.verbose:
|
||||
# Show streaming chunks only in verbose mode
|
||||
self.log_event("←", f"LLM: {text}")
|
||||
elif event_type == "trackStart":
|
||||
self.track_started = True
|
||||
self.log_event("←", "Bot started speaking")
|
||||
|
||||
Reference in New Issue
Block a user