Files
AI-VideoAssistant-Engine-V2/.env.example
2026-02-25 17:49:58 +08:00

64 lines
1.9 KiB
Plaintext

# -----------------------------------------------------------------------------
# Engine .env example (safe template)
# Notes:
# - Never commit real API keys.
# - Start with defaults below, then tune from logs.
# -----------------------------------------------------------------------------
# Server
HOST=0.0.0.0
PORT=8000
# EXTERNAL_IP=1.2.3.4
# Backend bridge (optional)
# BACKEND_MODE=auto|http|disabled
BACKEND_MODE=auto
BACKEND_URL=http://127.0.0.1:8100
BACKEND_TIMEOUT_SEC=10
HISTORY_ENABLED=true
HISTORY_DEFAULT_USER_ID=1
HISTORY_QUEUE_MAX_SIZE=256
HISTORY_RETRY_MAX_ATTEMPTS=2
HISTORY_RETRY_BACKOFF_SEC=0.2
HISTORY_FINALIZE_DRAIN_TIMEOUT_SEC=1.5
# Audio
SAMPLE_RATE=16000
# 20ms is recommended for VAD stability and latency.
# 100ms works but usually worsens start-of-speech accuracy.
CHUNK_SIZE_MS=20
DEFAULT_CODEC=pcm
MAX_AUDIO_BUFFER_SECONDS=30
# Agent profile selection (optional fallback when CLI args are not used)
# Prefer CLI:
# python -m app.main --agent-config config/agents/default.yaml
# python -m app.main --agent-profile default
# AGENT_CONFIG_PATH=config/agents/default.yaml
# AGENT_PROFILE=default
AGENT_CONFIG_DIR=config/agents
# Optional: provider credentials referenced from YAML, e.g. ${LLM_API_KEY}
# LLM_API_KEY=your_llm_api_key_here
# LLM_API_URL=https://api.openai.com/v1
# TTS_API_KEY=your_tts_api_key_here
# TTS_API_URL=https://api.example.com/v1/audio/speech
# ASR_API_KEY=your_asr_api_key_here
# ASR_API_URL=https://api.example.com/v1/audio/transcriptions
# Logging
LOG_LEVEL=INFO
# json is better for production/observability; text is easier locally.
LOG_FORMAT=json
# WebSocket behavior
INACTIVITY_TIMEOUT_SEC=60
HEARTBEAT_INTERVAL_SEC=50
WS_PROTOCOL_VERSION=v1
# WS_API_KEY=replace_with_shared_secret
WS_REQUIRE_AUTH=false
# CORS / ICE (JSON strings)
CORS_ORIGINS=["http://localhost:3000","http://localhost:8080"]
ICE_SERVERS=[{"urls":"stun:stun.l.google.com:19302"}]