Add opener audio functionality to Assistant model and related schemas, enabling audio generation and playback features. Update API routes and frontend components to support opener audio management, including status retrieval and generation controls.

This commit is contained in:
Xin Wang
2026-02-26 14:31:50 +08:00
parent 833cb0d4c4
commit fb95e2abe2
9 changed files with 551 additions and 4 deletions

View File

@@ -36,6 +36,10 @@ const mapAssistant = (raw: AnyRecord): Assistant => ({
firstTurnMode: readField(raw, ['firstTurnMode', 'first_turn_mode'], 'bot_first') as 'bot_first' | 'user_first',
opener: readField(raw, ['opener'], ''),
generatedOpenerEnabled: Boolean(readField(raw, ['generatedOpenerEnabled', 'generated_opener_enabled'], false)),
openerAudioEnabled: Boolean(readField(raw, ['openerAudioEnabled', 'opener_audio_enabled'], false)),
openerAudioReady: Boolean(readField(raw, ['openerAudioReady', 'opener_audio_ready'], false)),
openerAudioDurationMs: Number(readField(raw, ['openerAudioDurationMs', 'opener_audio_duration_ms'], 0)),
openerAudioUpdatedAt: readField(raw, ['openerAudioUpdatedAt', 'opener_audio_updated_at'], ''),
prompt: readField(raw, ['prompt'], ''),
knowledgeBaseId: readField(raw, ['knowledgeBaseId', 'knowledge_base_id'], ''),
language: readField(raw, ['language'], 'zh') as 'zh' | 'en',
@@ -228,6 +232,7 @@ export const createAssistant = async (data: Partial<Assistant>): Promise<Assista
firstTurnMode: data.firstTurnMode || 'bot_first',
opener: data.opener || '',
generatedOpenerEnabled: data.generatedOpenerEnabled ?? false,
openerAudioEnabled: data.openerAudioEnabled ?? false,
prompt: data.prompt || '',
knowledgeBaseId: data.knowledgeBaseId || '',
language: data.language || 'zh',
@@ -256,6 +261,7 @@ export const updateAssistant = async (id: string, data: Partial<Assistant>): Pro
firstTurnMode: data.firstTurnMode,
opener: data.opener,
generatedOpenerEnabled: data.generatedOpenerEnabled,
openerAudioEnabled: data.openerAudioEnabled,
prompt: data.prompt,
knowledgeBaseId: data.knowledgeBaseId,
language: data.language,
@@ -295,10 +301,36 @@ export interface AssistantRuntimeConfigResponse {
warnings?: string[];
}
export interface AssistantOpenerAudioStatus {
enabled: boolean;
ready: boolean;
encoding: string;
sample_rate_hz: number;
channels: number;
duration_ms: number;
updated_at?: string | null;
text_hash?: string | null;
tts_fingerprint?: string | null;
}
export const fetchAssistantRuntimeConfig = async (assistantId: string): Promise<AssistantRuntimeConfigResponse> => {
return apiRequest<AssistantRuntimeConfigResponse>(`/assistants/${assistantId}/config`);
};
export const fetchAssistantOpenerAudioStatus = async (assistantId: string): Promise<AssistantOpenerAudioStatus> => {
return apiRequest<AssistantOpenerAudioStatus>(`/assistants/${assistantId}/opener-audio`);
};
export const generateAssistantOpenerAudio = async (
assistantId: string,
payload?: { text?: string }
): Promise<AssistantOpenerAudioStatus> => {
return apiRequest<AssistantOpenerAudioStatus>(`/assistants/${assistantId}/opener-audio/generate`, {
method: 'POST',
body: payload || {},
});
};
export const fetchVoices = async (): Promise<Voice[]> => {
const response = await apiRequest<{ list?: AnyRecord[] } | AnyRecord[]>(withLimit('/voices'));
const list = Array.isArray(response) ? response : (response.list || []);