Organize config
This commit is contained in:
@@ -310,7 +310,12 @@ class DuplexPipeline:
|
||||
|
||||
def resolved_runtime_config(self) -> Dict[str, Any]:
|
||||
"""Return current effective runtime configuration without secrets."""
|
||||
llm_provider = str(self._runtime_llm.get("provider") or "openai").lower()
|
||||
llm_provider = str(self._runtime_llm.get("provider") or settings.llm_provider).lower()
|
||||
llm_base_url = (
|
||||
self._runtime_llm.get("baseUrl")
|
||||
or settings.llm_api_url
|
||||
or self._default_llm_base_url(llm_provider)
|
||||
)
|
||||
tts_provider = str(self._runtime_tts.get("provider") or settings.tts_provider).lower()
|
||||
asr_provider = str(self._runtime_asr.get("provider") or settings.asr_provider).lower()
|
||||
output_mode = str(self._runtime_output.get("mode") or "").strip().lower()
|
||||
@@ -323,18 +328,18 @@ class DuplexPipeline:
|
||||
"llm": {
|
||||
"provider": llm_provider,
|
||||
"model": str(self._runtime_llm.get("model") or settings.llm_model),
|
||||
"baseUrl": self._runtime_llm.get("baseUrl") or settings.openai_api_url,
|
||||
"baseUrl": llm_base_url,
|
||||
},
|
||||
"asr": {
|
||||
"provider": asr_provider,
|
||||
"model": str(self._runtime_asr.get("model") or settings.siliconflow_asr_model),
|
||||
"model": str(self._runtime_asr.get("model") or settings.asr_model or ""),
|
||||
"interimIntervalMs": int(self._runtime_asr.get("interimIntervalMs") or settings.asr_interim_interval_ms),
|
||||
"minAudioMs": int(self._runtime_asr.get("minAudioMs") or settings.asr_min_audio_ms),
|
||||
},
|
||||
"tts": {
|
||||
"enabled": self._tts_output_enabled(),
|
||||
"provider": tts_provider,
|
||||
"model": str(self._runtime_tts.get("model") or settings.siliconflow_tts_model),
|
||||
"model": str(self._runtime_tts.get("model") or settings.tts_model or ""),
|
||||
"voice": str(self._runtime_tts.get("voice") or settings.tts_voice),
|
||||
"speed": float(self._runtime_tts.get("speed") or settings.tts_speed),
|
||||
},
|
||||
@@ -452,6 +457,18 @@ class DuplexPipeline:
|
||||
normalized = str(provider or "").strip().lower()
|
||||
return normalized in {"openai_compatible", "openai-compatible", "siliconflow"}
|
||||
|
||||
@staticmethod
|
||||
def _is_llm_provider_supported(provider: Any) -> bool:
|
||||
normalized = str(provider or "").strip().lower()
|
||||
return normalized in {"openai", "openai_compatible", "openai-compatible", "siliconflow"}
|
||||
|
||||
@staticmethod
|
||||
def _default_llm_base_url(provider: Any) -> Optional[str]:
|
||||
normalized = str(provider or "").strip().lower()
|
||||
if normalized == "siliconflow":
|
||||
return "https://api.siliconflow.cn/v1"
|
||||
return None
|
||||
|
||||
def _tts_output_enabled(self) -> bool:
|
||||
enabled = self._coerce_bool(self._runtime_tts.get("enabled"))
|
||||
if enabled is not None:
|
||||
@@ -527,12 +544,16 @@ class DuplexPipeline:
|
||||
try:
|
||||
# Connect LLM service
|
||||
if not self.llm_service:
|
||||
llm_api_key = self._runtime_llm.get("apiKey") or settings.openai_api_key
|
||||
llm_base_url = self._runtime_llm.get("baseUrl") or settings.openai_api_url
|
||||
llm_provider = (self._runtime_llm.get("provider") or settings.llm_provider).lower()
|
||||
llm_api_key = self._runtime_llm.get("apiKey") or settings.llm_api_key
|
||||
llm_base_url = (
|
||||
self._runtime_llm.get("baseUrl")
|
||||
or settings.llm_api_url
|
||||
or self._default_llm_base_url(llm_provider)
|
||||
)
|
||||
llm_model = self._runtime_llm.get("model") or settings.llm_model
|
||||
llm_provider = (self._runtime_llm.get("provider") or "openai").lower()
|
||||
|
||||
if llm_provider == "openai" and llm_api_key:
|
||||
if self._is_llm_provider_supported(llm_provider) and llm_api_key:
|
||||
self.llm_service = OpenAILLMService(
|
||||
api_key=llm_api_key,
|
||||
base_url=llm_base_url,
|
||||
@@ -540,7 +561,7 @@ class DuplexPipeline:
|
||||
knowledge_config=self._resolved_knowledge_config(),
|
||||
)
|
||||
else:
|
||||
logger.warning("No OpenAI API key - using mock LLM")
|
||||
logger.warning("LLM provider unsupported or API key missing - using mock LLM")
|
||||
self.llm_service = MockLLMService()
|
||||
|
||||
if hasattr(self.llm_service, "set_knowledge_config"):
|
||||
@@ -556,20 +577,22 @@ class DuplexPipeline:
|
||||
if tts_output_enabled:
|
||||
if not self.tts_service:
|
||||
tts_provider = (self._runtime_tts.get("provider") or settings.tts_provider).lower()
|
||||
tts_api_key = self._runtime_tts.get("apiKey") or settings.siliconflow_api_key
|
||||
tts_api_key = self._runtime_tts.get("apiKey") or settings.tts_api_key
|
||||
tts_api_url = self._runtime_tts.get("baseUrl") or settings.tts_api_url
|
||||
tts_voice = self._runtime_tts.get("voice") or settings.tts_voice
|
||||
tts_model = self._runtime_tts.get("model") or settings.siliconflow_tts_model
|
||||
tts_model = self._runtime_tts.get("model") or settings.tts_model
|
||||
tts_speed = float(self._runtime_tts.get("speed") or settings.tts_speed)
|
||||
|
||||
if self._is_openai_compatible_provider(tts_provider) and tts_api_key:
|
||||
self.tts_service = OpenAICompatibleTTSService(
|
||||
api_key=tts_api_key,
|
||||
api_url=tts_api_url,
|
||||
voice=tts_voice,
|
||||
model=tts_model,
|
||||
model=tts_model or "FunAudioLLM/CosyVoice2-0.5B",
|
||||
sample_rate=settings.sample_rate,
|
||||
speed=tts_speed
|
||||
)
|
||||
logger.info("Using OpenAI-compatible TTS service (SiliconFlow implementation)")
|
||||
logger.info(f"Using OpenAI-compatible TTS service (provider={tts_provider})")
|
||||
else:
|
||||
self.tts_service = EdgeTTSService(
|
||||
voice=tts_voice,
|
||||
@@ -592,21 +615,23 @@ class DuplexPipeline:
|
||||
# Connect ASR service
|
||||
if not self.asr_service:
|
||||
asr_provider = (self._runtime_asr.get("provider") or settings.asr_provider).lower()
|
||||
asr_api_key = self._runtime_asr.get("apiKey") or settings.siliconflow_api_key
|
||||
asr_model = self._runtime_asr.get("model") or settings.siliconflow_asr_model
|
||||
asr_api_key = self._runtime_asr.get("apiKey") or settings.asr_api_key
|
||||
asr_api_url = self._runtime_asr.get("baseUrl") or settings.asr_api_url
|
||||
asr_model = self._runtime_asr.get("model") or settings.asr_model
|
||||
asr_interim_interval = int(self._runtime_asr.get("interimIntervalMs") or settings.asr_interim_interval_ms)
|
||||
asr_min_audio_ms = int(self._runtime_asr.get("minAudioMs") or settings.asr_min_audio_ms)
|
||||
|
||||
if self._is_openai_compatible_provider(asr_provider) and asr_api_key:
|
||||
self.asr_service = OpenAICompatibleASRService(
|
||||
api_key=asr_api_key,
|
||||
model=asr_model,
|
||||
api_url=asr_api_url,
|
||||
model=asr_model or "FunAudioLLM/SenseVoiceSmall",
|
||||
sample_rate=settings.sample_rate,
|
||||
interim_interval_ms=asr_interim_interval,
|
||||
min_audio_for_interim_ms=asr_min_audio_ms,
|
||||
on_transcript=self._on_transcript_callback
|
||||
)
|
||||
logger.info("Using OpenAI-compatible ASR service (SiliconFlow implementation)")
|
||||
logger.info(f"Using OpenAI-compatible ASR service (provider={asr_provider})")
|
||||
else:
|
||||
self.asr_service = BufferedASRService(
|
||||
sample_rate=settings.sample_rate
|
||||
|
||||
Reference in New Issue
Block a user