I can use text to get audio response and barge in
This commit is contained in:
@@ -1 +1,22 @@
|
||||
"""Core Components Package"""
|
||||
|
||||
from core.events import EventBus, get_event_bus
|
||||
from core.transports import BaseTransport, SocketTransport, WebRtcTransport
|
||||
from core.pipeline import AudioPipeline
|
||||
from core.session import Session
|
||||
from core.conversation import ConversationManager, ConversationState, ConversationTurn
|
||||
from core.duplex_pipeline import DuplexPipeline
|
||||
|
||||
__all__ = [
|
||||
"EventBus",
|
||||
"get_event_bus",
|
||||
"BaseTransport",
|
||||
"SocketTransport",
|
||||
"WebRtcTransport",
|
||||
"AudioPipeline",
|
||||
"Session",
|
||||
"ConversationManager",
|
||||
"ConversationState",
|
||||
"ConversationTurn",
|
||||
"DuplexPipeline",
|
||||
]
|
||||
|
||||
255
core/conversation.py
Normal file
255
core/conversation.py
Normal file
@@ -0,0 +1,255 @@
|
||||
"""Conversation management for voice AI.
|
||||
|
||||
Handles conversation context, turn-taking, and message history
|
||||
for multi-turn voice conversations.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import List, Optional, Dict, Any, Callable, Awaitable
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from loguru import logger
|
||||
|
||||
from services.base import LLMMessage
|
||||
|
||||
|
||||
class ConversationState(Enum):
|
||||
"""State of the conversation."""
|
||||
IDLE = "idle" # Waiting for user input
|
||||
LISTENING = "listening" # User is speaking
|
||||
PROCESSING = "processing" # Processing user input (LLM)
|
||||
SPEAKING = "speaking" # Bot is speaking
|
||||
INTERRUPTED = "interrupted" # Bot was interrupted
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConversationTurn:
|
||||
"""A single turn in the conversation."""
|
||||
role: str # "user" or "assistant"
|
||||
text: str
|
||||
audio_duration_ms: Optional[int] = None
|
||||
timestamp: float = field(default_factory=lambda: asyncio.get_event_loop().time())
|
||||
was_interrupted: bool = False
|
||||
|
||||
|
||||
class ConversationManager:
|
||||
"""
|
||||
Manages conversation state and history.
|
||||
|
||||
Provides:
|
||||
- Message history for LLM context
|
||||
- Turn management
|
||||
- State tracking
|
||||
- Event callbacks for state changes
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
system_prompt: Optional[str] = None,
|
||||
max_history: int = 20,
|
||||
greeting: Optional[str] = None
|
||||
):
|
||||
"""
|
||||
Initialize conversation manager.
|
||||
|
||||
Args:
|
||||
system_prompt: System prompt for LLM
|
||||
max_history: Maximum number of turns to keep
|
||||
greeting: Optional greeting message when conversation starts
|
||||
"""
|
||||
self.system_prompt = system_prompt or (
|
||||
"You are a helpful, friendly voice assistant. "
|
||||
"Keep your responses concise and conversational. "
|
||||
"Respond naturally as if having a phone conversation. "
|
||||
"If you don't understand something, ask for clarification."
|
||||
)
|
||||
self.max_history = max_history
|
||||
self.greeting = greeting
|
||||
|
||||
# State
|
||||
self.state = ConversationState.IDLE
|
||||
self.turns: List[ConversationTurn] = []
|
||||
|
||||
# Callbacks
|
||||
self._state_callbacks: List[Callable[[ConversationState, ConversationState], Awaitable[None]]] = []
|
||||
self._turn_callbacks: List[Callable[[ConversationTurn], Awaitable[None]]] = []
|
||||
|
||||
# Current turn tracking
|
||||
self._current_user_text: str = ""
|
||||
self._current_assistant_text: str = ""
|
||||
|
||||
logger.info("ConversationManager initialized")
|
||||
|
||||
def on_state_change(
|
||||
self,
|
||||
callback: Callable[[ConversationState, ConversationState], Awaitable[None]]
|
||||
) -> None:
|
||||
"""Register callback for state changes."""
|
||||
self._state_callbacks.append(callback)
|
||||
|
||||
def on_turn_complete(
|
||||
self,
|
||||
callback: Callable[[ConversationTurn], Awaitable[None]]
|
||||
) -> None:
|
||||
"""Register callback for turn completion."""
|
||||
self._turn_callbacks.append(callback)
|
||||
|
||||
async def set_state(self, new_state: ConversationState) -> None:
|
||||
"""Set conversation state and notify listeners."""
|
||||
if new_state != self.state:
|
||||
old_state = self.state
|
||||
self.state = new_state
|
||||
logger.debug(f"Conversation state: {old_state.value} -> {new_state.value}")
|
||||
|
||||
for callback in self._state_callbacks:
|
||||
try:
|
||||
await callback(old_state, new_state)
|
||||
except Exception as e:
|
||||
logger.error(f"State callback error: {e}")
|
||||
|
||||
def get_messages(self) -> List[LLMMessage]:
|
||||
"""
|
||||
Get conversation history as LLM messages.
|
||||
|
||||
Returns:
|
||||
List of LLMMessage objects including system prompt
|
||||
"""
|
||||
messages = [LLMMessage(role="system", content=self.system_prompt)]
|
||||
|
||||
# Add conversation history
|
||||
for turn in self.turns[-self.max_history:]:
|
||||
messages.append(LLMMessage(role=turn.role, content=turn.text))
|
||||
|
||||
# Add current user text if any
|
||||
if self._current_user_text:
|
||||
messages.append(LLMMessage(role="user", content=self._current_user_text))
|
||||
|
||||
return messages
|
||||
|
||||
async def start_user_turn(self) -> None:
|
||||
"""Signal that user has started speaking."""
|
||||
await self.set_state(ConversationState.LISTENING)
|
||||
self._current_user_text = ""
|
||||
|
||||
async def update_user_text(self, text: str, is_final: bool = False) -> None:
|
||||
"""
|
||||
Update current user text (from ASR).
|
||||
|
||||
Args:
|
||||
text: Transcribed text
|
||||
is_final: Whether this is the final transcript
|
||||
"""
|
||||
self._current_user_text = text
|
||||
|
||||
async def end_user_turn(self, text: str) -> None:
|
||||
"""
|
||||
End user turn and add to history.
|
||||
|
||||
Args:
|
||||
text: Final user text
|
||||
"""
|
||||
if text.strip():
|
||||
turn = ConversationTurn(role="user", text=text.strip())
|
||||
self.turns.append(turn)
|
||||
|
||||
for callback in self._turn_callbacks:
|
||||
try:
|
||||
await callback(turn)
|
||||
except Exception as e:
|
||||
logger.error(f"Turn callback error: {e}")
|
||||
|
||||
logger.info(f"User: {text[:50]}...")
|
||||
|
||||
self._current_user_text = ""
|
||||
await self.set_state(ConversationState.PROCESSING)
|
||||
|
||||
async def start_assistant_turn(self) -> None:
|
||||
"""Signal that assistant has started speaking."""
|
||||
await self.set_state(ConversationState.SPEAKING)
|
||||
self._current_assistant_text = ""
|
||||
|
||||
async def update_assistant_text(self, text: str) -> None:
|
||||
"""
|
||||
Update current assistant text (streaming).
|
||||
|
||||
Args:
|
||||
text: Text chunk from LLM
|
||||
"""
|
||||
self._current_assistant_text += text
|
||||
|
||||
async def end_assistant_turn(self, was_interrupted: bool = False) -> None:
|
||||
"""
|
||||
End assistant turn and add to history.
|
||||
|
||||
Args:
|
||||
was_interrupted: Whether the turn was interrupted by user
|
||||
"""
|
||||
text = self._current_assistant_text.strip()
|
||||
if text:
|
||||
turn = ConversationTurn(
|
||||
role="assistant",
|
||||
text=text,
|
||||
was_interrupted=was_interrupted
|
||||
)
|
||||
self.turns.append(turn)
|
||||
|
||||
for callback in self._turn_callbacks:
|
||||
try:
|
||||
await callback(turn)
|
||||
except Exception as e:
|
||||
logger.error(f"Turn callback error: {e}")
|
||||
|
||||
status = " (interrupted)" if was_interrupted else ""
|
||||
logger.info(f"Assistant{status}: {text[:50]}...")
|
||||
|
||||
self._current_assistant_text = ""
|
||||
|
||||
if was_interrupted:
|
||||
await self.set_state(ConversationState.INTERRUPTED)
|
||||
else:
|
||||
await self.set_state(ConversationState.IDLE)
|
||||
|
||||
async def interrupt(self) -> None:
|
||||
"""Handle interruption (barge-in)."""
|
||||
if self.state == ConversationState.SPEAKING:
|
||||
await self.end_assistant_turn(was_interrupted=True)
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset conversation history."""
|
||||
self.turns = []
|
||||
self._current_user_text = ""
|
||||
self._current_assistant_text = ""
|
||||
self.state = ConversationState.IDLE
|
||||
logger.info("Conversation reset")
|
||||
|
||||
@property
|
||||
def turn_count(self) -> int:
|
||||
"""Get number of turns in conversation."""
|
||||
return len(self.turns)
|
||||
|
||||
@property
|
||||
def last_user_text(self) -> Optional[str]:
|
||||
"""Get last user text."""
|
||||
for turn in reversed(self.turns):
|
||||
if turn.role == "user":
|
||||
return turn.text
|
||||
return None
|
||||
|
||||
@property
|
||||
def last_assistant_text(self) -> Optional[str]:
|
||||
"""Get last assistant text."""
|
||||
for turn in reversed(self.turns):
|
||||
if turn.role == "assistant":
|
||||
return turn.text
|
||||
return None
|
||||
|
||||
def get_context_summary(self) -> Dict[str, Any]:
|
||||
"""Get a summary of conversation context."""
|
||||
return {
|
||||
"state": self.state.value,
|
||||
"turn_count": self.turn_count,
|
||||
"last_user": self.last_user_text,
|
||||
"last_assistant": self.last_assistant_text,
|
||||
"current_user": self._current_user_text or None,
|
||||
"current_assistant": self._current_assistant_text or None
|
||||
}
|
||||
509
core/duplex_pipeline.py
Normal file
509
core/duplex_pipeline.py
Normal file
@@ -0,0 +1,509 @@
|
||||
"""Full duplex audio pipeline for AI voice conversation.
|
||||
|
||||
This module implements the core duplex pipeline that orchestrates:
|
||||
- VAD (Voice Activity Detection)
|
||||
- EOU (End of Utterance) Detection
|
||||
- ASR (Automatic Speech Recognition) - optional
|
||||
- LLM (Language Model)
|
||||
- TTS (Text-to-Speech)
|
||||
|
||||
Inspired by pipecat's frame-based architecture and active-call's
|
||||
event-driven design.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import Optional, Callable, Awaitable
|
||||
from loguru import logger
|
||||
|
||||
from core.transports import BaseTransport
|
||||
from core.conversation import ConversationManager, ConversationState
|
||||
from core.events import get_event_bus
|
||||
from processors.vad import VADProcessor, SileroVAD
|
||||
from processors.eou import EouDetector
|
||||
from services.base import BaseLLMService, BaseTTSService, BaseASRService
|
||||
from services.llm import OpenAILLMService, MockLLMService
|
||||
from services.tts import EdgeTTSService, MockTTSService
|
||||
from services.asr import BufferedASRService
|
||||
from services.siliconflow_tts import SiliconFlowTTSService
|
||||
from app.config import settings
|
||||
|
||||
|
||||
class DuplexPipeline:
|
||||
"""
|
||||
Full duplex audio pipeline for AI voice conversation.
|
||||
|
||||
Handles bidirectional audio flow with:
|
||||
- User speech detection and transcription
|
||||
- AI response generation
|
||||
- Text-to-speech synthesis
|
||||
- Barge-in (interruption) support
|
||||
|
||||
Architecture (inspired by pipecat):
|
||||
|
||||
User Audio → VAD → EOU → [ASR] → LLM → TTS → Audio Out
|
||||
↓
|
||||
Barge-in Detection → Interrupt
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
transport: BaseTransport,
|
||||
session_id: str,
|
||||
llm_service: Optional[BaseLLMService] = None,
|
||||
tts_service: Optional[BaseTTSService] = None,
|
||||
asr_service: Optional[BaseASRService] = None,
|
||||
system_prompt: Optional[str] = None,
|
||||
greeting: Optional[str] = None
|
||||
):
|
||||
"""
|
||||
Initialize duplex pipeline.
|
||||
|
||||
Args:
|
||||
transport: Transport for sending audio/events
|
||||
session_id: Session identifier
|
||||
llm_service: LLM service (defaults to OpenAI)
|
||||
tts_service: TTS service (defaults to EdgeTTS)
|
||||
asr_service: ASR service (optional)
|
||||
system_prompt: System prompt for LLM
|
||||
greeting: Optional greeting to speak on start
|
||||
"""
|
||||
self.transport = transport
|
||||
self.session_id = session_id
|
||||
self.event_bus = get_event_bus()
|
||||
|
||||
# Initialize VAD
|
||||
self.vad_model = SileroVAD(
|
||||
model_path=settings.vad_model_path,
|
||||
sample_rate=settings.sample_rate
|
||||
)
|
||||
self.vad_processor = VADProcessor(
|
||||
vad_model=self.vad_model,
|
||||
threshold=settings.vad_threshold
|
||||
)
|
||||
|
||||
# Initialize EOU detector
|
||||
self.eou_detector = EouDetector(
|
||||
silence_threshold_ms=600,
|
||||
min_speech_duration_ms=200
|
||||
)
|
||||
|
||||
# Initialize services
|
||||
self.llm_service = llm_service
|
||||
self.tts_service = tts_service
|
||||
self.asr_service = asr_service or BufferedASRService()
|
||||
|
||||
# Conversation manager
|
||||
self.conversation = ConversationManager(
|
||||
system_prompt=system_prompt,
|
||||
greeting=greeting
|
||||
)
|
||||
|
||||
# State
|
||||
self._running = True
|
||||
self._is_bot_speaking = False
|
||||
self._current_turn_task: Optional[asyncio.Task] = None
|
||||
self._audio_buffer: bytes = b""
|
||||
self._last_vad_status: str = "Silence"
|
||||
|
||||
# Interruption handling
|
||||
self._interrupt_event = asyncio.Event()
|
||||
|
||||
logger.info(f"DuplexPipeline initialized for session {session_id}")
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the pipeline and connect services."""
|
||||
try:
|
||||
# Connect LLM service
|
||||
if not self.llm_service:
|
||||
if settings.openai_api_key:
|
||||
self.llm_service = OpenAILLMService(
|
||||
api_key=settings.openai_api_key,
|
||||
base_url=settings.openai_api_url,
|
||||
model=settings.llm_model
|
||||
)
|
||||
else:
|
||||
logger.warning("No OpenAI API key - using mock LLM")
|
||||
self.llm_service = MockLLMService()
|
||||
|
||||
await self.llm_service.connect()
|
||||
|
||||
# Connect TTS service
|
||||
if not self.tts_service:
|
||||
if settings.tts_provider == "siliconflow" and settings.siliconflow_api_key:
|
||||
self.tts_service = SiliconFlowTTSService(
|
||||
api_key=settings.siliconflow_api_key,
|
||||
voice=settings.tts_voice,
|
||||
model=settings.siliconflow_tts_model,
|
||||
sample_rate=settings.sample_rate,
|
||||
speed=settings.tts_speed
|
||||
)
|
||||
logger.info("Using SiliconFlow TTS service")
|
||||
else:
|
||||
self.tts_service = EdgeTTSService(
|
||||
voice=settings.tts_voice,
|
||||
sample_rate=settings.sample_rate
|
||||
)
|
||||
logger.info("Using Edge TTS service")
|
||||
|
||||
await self.tts_service.connect()
|
||||
|
||||
# Connect ASR service
|
||||
await self.asr_service.connect()
|
||||
|
||||
logger.info("DuplexPipeline services connected")
|
||||
|
||||
# Speak greeting if configured
|
||||
if self.conversation.greeting:
|
||||
await self._speak(self.conversation.greeting)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to start pipeline: {e}")
|
||||
raise
|
||||
|
||||
async def process_audio(self, pcm_bytes: bytes) -> None:
|
||||
"""
|
||||
Process incoming audio chunk.
|
||||
|
||||
This is the main entry point for audio from the user.
|
||||
|
||||
Args:
|
||||
pcm_bytes: PCM audio data (16-bit, mono, 16kHz)
|
||||
"""
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
try:
|
||||
# 1. Process through VAD
|
||||
vad_result = self.vad_processor.process(pcm_bytes, settings.chunk_size_ms)
|
||||
|
||||
vad_status = "Silence"
|
||||
if vad_result:
|
||||
event_type, probability = vad_result
|
||||
vad_status = "Speech" if event_type == "speaking" else "Silence"
|
||||
|
||||
# Emit VAD event
|
||||
await self.event_bus.publish(event_type, {
|
||||
"trackId": self.session_id,
|
||||
"probability": probability
|
||||
})
|
||||
else:
|
||||
# No state change - keep previous status
|
||||
vad_status = self._last_vad_status
|
||||
|
||||
# Update state based on VAD
|
||||
if vad_status == "Speech" and self._last_vad_status != "Speech":
|
||||
await self._on_speech_start()
|
||||
|
||||
self._last_vad_status = vad_status
|
||||
|
||||
# 2. Check for barge-in (user speaking while bot speaking)
|
||||
if self._is_bot_speaking and vad_status == "Speech":
|
||||
await self._handle_barge_in()
|
||||
|
||||
# 3. Buffer audio for ASR
|
||||
if vad_status == "Speech" or self.conversation.state == ConversationState.LISTENING:
|
||||
self._audio_buffer += pcm_bytes
|
||||
await self.asr_service.send_audio(pcm_bytes)
|
||||
|
||||
# 4. Check for End of Utterance
|
||||
if self.eou_detector.process(vad_status):
|
||||
await self._on_end_of_utterance()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Pipeline audio processing error: {e}", exc_info=True)
|
||||
|
||||
async def process_text(self, text: str) -> None:
|
||||
"""
|
||||
Process text input (chat command).
|
||||
|
||||
Allows direct text input to bypass ASR.
|
||||
|
||||
Args:
|
||||
text: User text input
|
||||
"""
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
logger.info(f"Processing text input: {text[:50]}...")
|
||||
|
||||
# Cancel any current speaking
|
||||
await self._stop_current_speech()
|
||||
|
||||
# Start new turn
|
||||
await self.conversation.end_user_turn(text)
|
||||
self._current_turn_task = asyncio.create_task(self._handle_turn(text))
|
||||
|
||||
async def interrupt(self) -> None:
|
||||
"""Interrupt current bot speech (manual interrupt command)."""
|
||||
await self._handle_barge_in()
|
||||
|
||||
async def _on_speech_start(self) -> None:
|
||||
"""Handle user starting to speak."""
|
||||
if self.conversation.state == ConversationState.IDLE:
|
||||
await self.conversation.start_user_turn()
|
||||
self._audio_buffer = b""
|
||||
self.eou_detector.reset()
|
||||
logger.debug("User speech started")
|
||||
|
||||
async def _on_end_of_utterance(self) -> None:
|
||||
"""Handle end of user utterance."""
|
||||
if self.conversation.state != ConversationState.LISTENING:
|
||||
return
|
||||
|
||||
# Get transcribed text (if using ASR that provides it)
|
||||
user_text = ""
|
||||
if hasattr(self.asr_service, 'get_and_clear_text'):
|
||||
user_text = self.asr_service.get_and_clear_text()
|
||||
|
||||
# If no ASR text, we could use the audio buffer for external ASR
|
||||
# For now, just use placeholder if no ASR text
|
||||
if not user_text:
|
||||
# In a real implementation, you'd send audio_buffer to ASR here
|
||||
# For demo purposes, use mock text
|
||||
user_text = "[User speech detected]"
|
||||
logger.warning("No ASR text available - using placeholder")
|
||||
|
||||
logger.info(f"EOU detected - user said: {user_text[:50]}...")
|
||||
|
||||
# Clear buffers
|
||||
self._audio_buffer = b""
|
||||
|
||||
# Process the turn
|
||||
await self.conversation.end_user_turn(user_text)
|
||||
self._current_turn_task = asyncio.create_task(self._handle_turn(user_text))
|
||||
|
||||
async def _handle_turn(self, user_text: str) -> None:
|
||||
"""
|
||||
Handle a complete conversation turn.
|
||||
|
||||
Uses sentence-by-sentence streaming TTS for lower latency.
|
||||
|
||||
Args:
|
||||
user_text: User's transcribed text
|
||||
"""
|
||||
try:
|
||||
# Get AI response (streaming)
|
||||
messages = self.conversation.get_messages()
|
||||
full_response = ""
|
||||
|
||||
await self.conversation.start_assistant_turn()
|
||||
self._is_bot_speaking = True
|
||||
self._interrupt_event.clear()
|
||||
|
||||
# Sentence buffer for streaming TTS
|
||||
sentence_buffer = ""
|
||||
sentence_ends = {'.', '!', '?', '。', '!', '?', ';', '\n'}
|
||||
first_audio_sent = False
|
||||
|
||||
# Stream LLM response and TTS sentence by sentence
|
||||
async for text_chunk in self.llm_service.generate_stream(messages):
|
||||
if self._interrupt_event.is_set():
|
||||
break
|
||||
|
||||
full_response += text_chunk
|
||||
sentence_buffer += text_chunk
|
||||
await self.conversation.update_assistant_text(text_chunk)
|
||||
|
||||
# Check for sentence completion - synthesize immediately for low latency
|
||||
while any(end in sentence_buffer for end in sentence_ends):
|
||||
# Find first sentence end
|
||||
min_idx = len(sentence_buffer)
|
||||
for end in sentence_ends:
|
||||
idx = sentence_buffer.find(end)
|
||||
if idx != -1 and idx < min_idx:
|
||||
min_idx = idx
|
||||
|
||||
if min_idx < len(sentence_buffer):
|
||||
sentence = sentence_buffer[:min_idx + 1].strip()
|
||||
sentence_buffer = sentence_buffer[min_idx + 1:]
|
||||
|
||||
if sentence and not self._interrupt_event.is_set():
|
||||
# Send track start on first audio
|
||||
if not first_audio_sent:
|
||||
await self.transport.send_event({
|
||||
"event": "trackStart",
|
||||
"trackId": self.session_id,
|
||||
"timestamp": self._get_timestamp_ms()
|
||||
})
|
||||
first_audio_sent = True
|
||||
|
||||
# Synthesize and send this sentence immediately
|
||||
await self._speak_sentence(sentence)
|
||||
else:
|
||||
break
|
||||
|
||||
# Speak any remaining text
|
||||
if sentence_buffer.strip() and not self._interrupt_event.is_set():
|
||||
if not first_audio_sent:
|
||||
await self.transport.send_event({
|
||||
"event": "trackStart",
|
||||
"trackId": self.session_id,
|
||||
"timestamp": self._get_timestamp_ms()
|
||||
})
|
||||
first_audio_sent = True
|
||||
await self._speak_sentence(sentence_buffer.strip())
|
||||
|
||||
# Send track end
|
||||
if first_audio_sent:
|
||||
await self.transport.send_event({
|
||||
"event": "trackEnd",
|
||||
"trackId": self.session_id,
|
||||
"timestamp": self._get_timestamp_ms()
|
||||
})
|
||||
|
||||
# End assistant turn
|
||||
await self.conversation.end_assistant_turn(
|
||||
was_interrupted=self._interrupt_event.is_set()
|
||||
)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info("Turn handling cancelled")
|
||||
await self.conversation.end_assistant_turn(was_interrupted=True)
|
||||
except Exception as e:
|
||||
logger.error(f"Turn handling error: {e}", exc_info=True)
|
||||
await self.conversation.end_assistant_turn(was_interrupted=True)
|
||||
finally:
|
||||
self._is_bot_speaking = False
|
||||
|
||||
async def _speak_sentence(self, text: str) -> None:
|
||||
"""
|
||||
Synthesize and send a single sentence.
|
||||
|
||||
Args:
|
||||
text: Sentence to speak
|
||||
"""
|
||||
if not text.strip() or self._interrupt_event.is_set():
|
||||
return
|
||||
|
||||
try:
|
||||
async for chunk in self.tts_service.synthesize_stream(text):
|
||||
if self._interrupt_event.is_set():
|
||||
break
|
||||
await self.transport.send_audio(chunk.audio)
|
||||
await asyncio.sleep(0.005) # Small delay to prevent flooding
|
||||
except Exception as e:
|
||||
logger.error(f"TTS sentence error: {e}")
|
||||
|
||||
async def _speak(self, text: str) -> None:
|
||||
"""
|
||||
Synthesize and send speech.
|
||||
|
||||
Args:
|
||||
text: Text to speak
|
||||
"""
|
||||
if not text.strip():
|
||||
return
|
||||
|
||||
try:
|
||||
# Send track start event
|
||||
await self.transport.send_event({
|
||||
"event": "trackStart",
|
||||
"trackId": self.session_id,
|
||||
"timestamp": self._get_timestamp_ms()
|
||||
})
|
||||
|
||||
self._is_bot_speaking = True
|
||||
|
||||
# Stream TTS audio
|
||||
async for chunk in self.tts_service.synthesize_stream(text):
|
||||
if self._interrupt_event.is_set():
|
||||
logger.info("TTS interrupted by barge-in")
|
||||
break
|
||||
|
||||
# Send audio to client
|
||||
await self.transport.send_audio(chunk.audio)
|
||||
|
||||
# Small delay to prevent flooding
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
# Send track end event
|
||||
await self.transport.send_event({
|
||||
"event": "trackEnd",
|
||||
"trackId": self.session_id,
|
||||
"timestamp": self._get_timestamp_ms()
|
||||
})
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info("TTS cancelled")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"TTS error: {e}")
|
||||
finally:
|
||||
self._is_bot_speaking = False
|
||||
|
||||
async def _handle_barge_in(self) -> None:
|
||||
"""Handle user barge-in (interruption)."""
|
||||
if not self._is_bot_speaking:
|
||||
return
|
||||
|
||||
logger.info("Barge-in detected - interrupting bot speech")
|
||||
|
||||
# Signal interruption
|
||||
self._interrupt_event.set()
|
||||
|
||||
# Cancel TTS
|
||||
if self.tts_service:
|
||||
await self.tts_service.cancel()
|
||||
|
||||
# Cancel LLM
|
||||
if self.llm_service and hasattr(self.llm_service, 'cancel'):
|
||||
self.llm_service.cancel()
|
||||
|
||||
# Interrupt conversation
|
||||
await self.conversation.interrupt()
|
||||
|
||||
# Send interrupt event to client
|
||||
await self.transport.send_event({
|
||||
"event": "interrupt",
|
||||
"trackId": self.session_id,
|
||||
"timestamp": self._get_timestamp_ms()
|
||||
})
|
||||
|
||||
# Reset for new user turn
|
||||
self._is_bot_speaking = False
|
||||
await self.conversation.start_user_turn()
|
||||
self._audio_buffer = b""
|
||||
self.eou_detector.reset()
|
||||
|
||||
async def _stop_current_speech(self) -> None:
|
||||
"""Stop any current speech task."""
|
||||
if self._current_turn_task and not self._current_turn_task.done():
|
||||
self._interrupt_event.set()
|
||||
self._current_turn_task.cancel()
|
||||
try:
|
||||
await self._current_turn_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
self._is_bot_speaking = False
|
||||
self._interrupt_event.clear()
|
||||
|
||||
async def cleanup(self) -> None:
|
||||
"""Cleanup pipeline resources."""
|
||||
logger.info(f"Cleaning up DuplexPipeline for session {self.session_id}")
|
||||
|
||||
self._running = False
|
||||
await self._stop_current_speech()
|
||||
|
||||
# Disconnect services
|
||||
if self.llm_service:
|
||||
await self.llm_service.disconnect()
|
||||
if self.tts_service:
|
||||
await self.tts_service.disconnect()
|
||||
if self.asr_service:
|
||||
await self.asr_service.disconnect()
|
||||
|
||||
def _get_timestamp_ms(self) -> int:
|
||||
"""Get current timestamp in milliseconds."""
|
||||
import time
|
||||
return int(time.time() * 1000)
|
||||
|
||||
@property
|
||||
def is_speaking(self) -> bool:
|
||||
"""Check if bot is currently speaking."""
|
||||
return self._is_bot_speaking
|
||||
|
||||
@property
|
||||
def state(self) -> ConversationState:
|
||||
"""Get current conversation state."""
|
||||
return self.conversation.state
|
||||
@@ -8,6 +8,7 @@ from loguru import logger
|
||||
from core.transports import BaseTransport
|
||||
from core.pipeline import AudioPipeline
|
||||
from models.commands import parse_command, TTSCommand, ChatCommand, InterruptCommand, HangupCommand
|
||||
from app.config import settings
|
||||
|
||||
|
||||
class Session:
|
||||
@@ -15,28 +16,44 @@ class Session:
|
||||
Manages a single call session.
|
||||
|
||||
Handles command routing, audio processing, and session lifecycle.
|
||||
Supports both basic audio pipeline and full duplex voice conversation.
|
||||
"""
|
||||
|
||||
def __init__(self, session_id: str, transport: BaseTransport):
|
||||
def __init__(self, session_id: str, transport: BaseTransport, use_duplex: bool = None):
|
||||
"""
|
||||
Initialize session.
|
||||
|
||||
Args:
|
||||
session_id: Unique session identifier
|
||||
transport: Transport instance for communication
|
||||
use_duplex: Whether to use duplex pipeline (defaults to settings.duplex_enabled)
|
||||
"""
|
||||
self.id = session_id
|
||||
self.transport = transport
|
||||
self.pipeline = AudioPipeline(transport, session_id)
|
||||
|
||||
# Determine pipeline mode
|
||||
self.use_duplex = use_duplex if use_duplex is not None else settings.duplex_enabled
|
||||
|
||||
if self.use_duplex:
|
||||
from core.duplex_pipeline import DuplexPipeline
|
||||
self.pipeline = DuplexPipeline(
|
||||
transport=transport,
|
||||
session_id=session_id,
|
||||
system_prompt=settings.duplex_system_prompt,
|
||||
greeting=settings.duplex_greeting
|
||||
)
|
||||
else:
|
||||
self.pipeline = AudioPipeline(transport, session_id)
|
||||
|
||||
# Session state
|
||||
self.created_at = None
|
||||
self.state = "created" # created, invited, accepted, ringing, hungup
|
||||
self._pipeline_started = False
|
||||
|
||||
# Track IDs
|
||||
self.current_track_id: Optional[str] = str(uuid.uuid4())
|
||||
|
||||
logger.info(f"Session {self.id} created")
|
||||
logger.info(f"Session {self.id} created (duplex={self.use_duplex})")
|
||||
|
||||
async def handle_text(self, text_data: str) -> None:
|
||||
"""
|
||||
@@ -112,7 +129,10 @@ class Session:
|
||||
audio_bytes: PCM audio data
|
||||
"""
|
||||
try:
|
||||
await self.pipeline.process_input(audio_bytes)
|
||||
if self.use_duplex:
|
||||
await self.pipeline.process_audio(audio_bytes)
|
||||
else:
|
||||
await self.pipeline.process_input(audio_bytes)
|
||||
except Exception as e:
|
||||
logger.error(f"Session {self.id} handle_audio error: {e}", exc_info=True)
|
||||
|
||||
@@ -128,6 +148,15 @@ class Session:
|
||||
"timestamp": self._get_timestamp_ms()
|
||||
})
|
||||
|
||||
# Start duplex pipeline if enabled
|
||||
if self.use_duplex and not self._pipeline_started:
|
||||
try:
|
||||
await self.pipeline.start()
|
||||
self._pipeline_started = True
|
||||
logger.info(f"Session {self.id} duplex pipeline started")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to start duplex pipeline: {e}")
|
||||
|
||||
logger.info(f"Session {self.id} invited with codec: {option.get('codec', 'pcm')}")
|
||||
|
||||
async def _handle_accept(self, data: Dict[str, Any]) -> None:
|
||||
@@ -199,7 +228,10 @@ class Session:
|
||||
logger.info(f"Session {self.id} graceful interrupt")
|
||||
else:
|
||||
logger.info(f"Session {self.id} immediate interrupt")
|
||||
await self.pipeline.interrupt()
|
||||
if self.use_duplex:
|
||||
await self.pipeline.interrupt()
|
||||
else:
|
||||
await self.pipeline.interrupt()
|
||||
|
||||
async def _handle_pause(self) -> None:
|
||||
"""Handle pause command."""
|
||||
@@ -236,7 +268,10 @@ class Session:
|
||||
"""Handle chat command."""
|
||||
logger.info(f"Session {self.id} chat: {command.text[:50]}...")
|
||||
# Process text input through pipeline
|
||||
await self.pipeline.process_text_input(command.text)
|
||||
if self.use_duplex:
|
||||
await self.pipeline.process_text(command.text)
|
||||
else:
|
||||
await self.pipeline.process_text_input(command.text)
|
||||
|
||||
async def _send_error(self, sender: str, error_message: str) -> None:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user