Fix asr boundary bug

This commit is contained in:
Xin Wang
2026-02-09 18:28:43 +08:00
parent a140e3a599
commit 0135f718f3
2 changed files with 39 additions and 2 deletions

View File

@@ -119,6 +119,15 @@ class DuplexPipeline:
self._audio_buffer: bytes = b""
max_buffer_seconds = settings.max_audio_buffer_seconds if hasattr(settings, "max_audio_buffer_seconds") else 30
self._max_audio_buffer_bytes = int(settings.sample_rate * 2 * max_buffer_seconds)
# Keep a short rolling pre-speech window so VAD transition latency
# does not clip the first phoneme/character sent to ASR.
pre_speech_ms = settings.asr_pre_speech_ms if hasattr(settings, "asr_pre_speech_ms") else 240
self._asr_pre_speech_bytes = int(settings.sample_rate * 2 * (pre_speech_ms / 1000.0))
self._pre_speech_buffer: bytes = b""
# Add a tiny trailing silence tail before final ASR to avoid
# clipping the last phoneme at utterance boundaries.
asr_final_tail_ms = settings.asr_final_tail_ms if hasattr(settings, "asr_final_tail_ms") else 120
self._asr_final_tail_bytes = int(settings.sample_rate * 2 * (asr_final_tail_ms / 1000.0))
self._last_vad_status: str = "Silence"
self._process_lock = asyncio.Lock()
@@ -285,6 +294,11 @@ class DuplexPipeline:
try:
async with self._process_lock:
if pcm_bytes:
self._pre_speech_buffer += pcm_bytes
if len(self._pre_speech_buffer) > self._asr_pre_speech_bytes:
self._pre_speech_buffer = self._pre_speech_buffer[-self._asr_pre_speech_bytes:]
# 1. Process through VAD
vad_result = self.vad_processor.process(pcm_bytes, settings.chunk_size_ms)
@@ -311,7 +325,7 @@ class DuplexPipeline:
# Update state based on VAD
if vad_status == "Speech" and self._last_vad_status != "Speech":
await self._on_speech_start()
await self._on_speech_start(current_chunk=pcm_bytes)
self._last_vad_status = vad_status
@@ -418,7 +432,7 @@ class DuplexPipeline:
logger.info(f"ASR interim: {text[:100]}")
logger.debug(f"Sent transcript ({'final' if is_final else 'interim'}): {text[:50]}...")
async def _on_speech_start(self) -> None:
async def _on_speech_start(self, current_chunk: bytes = b"") -> None:
"""Handle user starting to speak."""
if self.conversation.state in (ConversationState.IDLE, ConversationState.INTERRUPTED):
await self.conversation.start_user_turn()
@@ -431,6 +445,16 @@ class DuplexPipeline:
self.asr_service.clear_buffer()
if hasattr(self.asr_service, 'start_interim_transcription'):
await self.asr_service.start_interim_transcription()
# Prime ASR with a short pre-speech context window so the utterance
# start isn't lost while waiting for VAD to transition to Speech.
pre_roll = self._pre_speech_buffer
if current_chunk and len(pre_roll) > len(current_chunk):
pre_roll = pre_roll[:-len(current_chunk)]
elif current_chunk:
pre_roll = b""
if pre_roll:
await self.asr_service.send_audio(pre_roll)
self._audio_buffer = pre_roll
logger.debug("User speech started")
@@ -439,6 +463,11 @@ class DuplexPipeline:
if self.conversation.state not in (ConversationState.LISTENING, ConversationState.INTERRUPTED):
return
# Add a tiny trailing silence tail to stabilize final-token decoding.
if self._asr_final_tail_bytes > 0:
final_tail = b"\x00" * self._asr_final_tail_bytes
await self.asr_service.send_audio(final_tail)
# Stop interim transcriptions
if hasattr(self.asr_service, 'stop_interim_transcription'):
await self.asr_service.stop_interim_transcription()