Add backend api and engine
This commit is contained in:
66
api/.gitignore
vendored
Normal file
66
api/.gitignore
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.so
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# Virtual environments
|
||||
venv/
|
||||
ENV/
|
||||
env/
|
||||
.venv
|
||||
|
||||
# Environment variables
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# Database
|
||||
*.db
|
||||
*.sqlite
|
||||
*.sqlite3
|
||||
|
||||
# Vector store data
|
||||
data/vector_store/
|
||||
!data/vector_store/.gitkeep
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Docker
|
||||
.docker/
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
|
||||
# Pytest
|
||||
.pytest_cache/
|
||||
.coverage
|
||||
htmlcov/
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
17
api/Dockerfile
Normal file
17
api/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# 安装依赖
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# 复制代码
|
||||
COPY . .
|
||||
|
||||
# 创建数据目录
|
||||
RUN mkdir -p /app/data
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"]
|
||||
104
api/README.md
104
api/README.md
@@ -1 +1,103 @@
|
||||
# Backend Service
|
||||
# AI VideoAssistant Backend
|
||||
|
||||
Python 后端 API,配合前端 `ai-videoassistant-frontend` 使用。
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 1. 安装依赖
|
||||
|
||||
```bash
|
||||
cd ~/Code/ai-videoassistant-backend
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### 2. 初始化数据库
|
||||
|
||||
```bash
|
||||
python init_db.py
|
||||
```
|
||||
|
||||
这会:
|
||||
- 创建 `data/app.db` SQLite 数据库
|
||||
- 初始化默认声音数据
|
||||
|
||||
### 3. 启动服务
|
||||
|
||||
```bash
|
||||
# 开发模式 (热重载)
|
||||
python -m uvicorn main:app --reload --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
### 4. 测试 API
|
||||
|
||||
```bash
|
||||
# 健康检查
|
||||
curl http://localhost:8000/health
|
||||
|
||||
# 获取助手列表
|
||||
curl http://localhost:8000/api/assistants
|
||||
|
||||
# 获取声音列表
|
||||
curl http://localhost:8000/api/voices
|
||||
|
||||
# 获取通话历史
|
||||
curl http://localhost:8000/api/history
|
||||
```
|
||||
|
||||
## API 文档
|
||||
|
||||
| 端点 | 方法 | 说明 |
|
||||
|------|------|------|
|
||||
| `/api/assistants` | GET | 助手列表 |
|
||||
| `/api/assistants` | POST | 创建助手 |
|
||||
| `/api/assistants/{id}` | GET | 助手详情 |
|
||||
| `/api/assistants/{id}` | PUT | 更新助手 |
|
||||
| `/api/assistants/{id}` | DELETE | 删除助手 |
|
||||
| `/api/voices` | GET | 声音库列表 |
|
||||
| `/api/history` | GET | 通话历史列表 |
|
||||
| `/api/history/{id}` | GET | 通话详情 |
|
||||
| `/api/history/{id}/transcripts` | POST | 添加转写 |
|
||||
| `/api/history/{id}/audio/{turn}` | GET | 获取音频 |
|
||||
|
||||
## 使用 Docker 启动
|
||||
|
||||
```bash
|
||||
cd ~/Code/ai-videoassistant-backend
|
||||
|
||||
# 启动所有服务
|
||||
docker-compose up -d
|
||||
|
||||
# 查看日志
|
||||
docker-compose logs -f backend
|
||||
```
|
||||
|
||||
## 目录结构
|
||||
|
||||
```
|
||||
backend/
|
||||
├── app/
|
||||
│ ├── __init__.py
|
||||
│ ├── main.py # FastAPI 入口
|
||||
│ ├── db.py # SQLite 连接
|
||||
│ ├── models.py # 数据模型
|
||||
│ ├── schemas.py # Pydantic 模型
|
||||
│ ├── storage.py # MinIO 存储
|
||||
│ └── routers/
|
||||
│ ├── __init__.py
|
||||
│ ├── assistants.py # 助手 API
|
||||
│ └── history.py # 通话记录 API
|
||||
├── data/ # 数据库文件
|
||||
├── requirements.txt
|
||||
├── .env
|
||||
└── docker-compose.yml
|
||||
```
|
||||
|
||||
## 环境变量
|
||||
|
||||
| 变量 | 默认值 | 说明 |
|
||||
|------|--------|------|
|
||||
| `DATABASE_URL` | `sqlite:///./data/app.db` | 数据库连接 |
|
||||
| `MINIO_ENDPOINT` | `localhost:9000` | MinIO 地址 |
|
||||
| `MINIO_ACCESS_KEY` | `admin` | MinIO 密钥 |
|
||||
| `MINIO_SECRET_KEY` | `password123` | MinIO 密码 |
|
||||
| `MINIO_BUCKET` | `ai-audio` | 存储桶名称 |
|
||||
|
||||
19
api/app/db.py
Normal file
19
api/app/db.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker, DeclarativeBase
|
||||
|
||||
DATABASE_URL = "sqlite:///./data/app.db"
|
||||
|
||||
engine = create_engine(DATABASE_URL, connect_args={"check_same_thread": False})
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
|
||||
|
||||
class Base(DeclarativeBase):
|
||||
pass
|
||||
|
||||
|
||||
def get_db():
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
72
api/app/main.py
Normal file
72
api/app/main.py
Normal file
@@ -0,0 +1,72 @@
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from contextlib import asynccontextmanager
|
||||
import os
|
||||
|
||||
from .db import Base, engine
|
||||
from .routers import assistants, history
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# 启动时创建表
|
||||
Base.metadata.create_all(bind=engine)
|
||||
yield
|
||||
|
||||
|
||||
app = FastAPI(
|
||||
title="AI VideoAssistant API",
|
||||
description="Backend API for AI VideoAssistant",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan
|
||||
)
|
||||
|
||||
# CORS
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# 路由
|
||||
app.include_router(assistants.router, prefix="/api")
|
||||
app.include_router(history.router, prefix="/api")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
def root():
|
||||
return {"message": "AI VideoAssistant API", "version": "1.0.0"}
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
# 初始化默认数据
|
||||
@app.on_event("startup")
|
||||
def init_default_data():
|
||||
from sqlalchemy.orm import Session
|
||||
from .db import SessionLocal
|
||||
from .models import Voice
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
# 检查是否已有数据
|
||||
if db.query(Voice).count() == 0:
|
||||
# 插入默认声音
|
||||
voices = [
|
||||
Voice(id="v1", name="Xiaoyun", vendor="Ali", gender="Female", language="zh", description="Gentle and professional."),
|
||||
Voice(id="v2", name="Kevin", vendor="Volcano", gender="Male", language="en", description="Deep and authoritative."),
|
||||
Voice(id="v3", name="Abby", vendor="Minimax", gender="Female", language="en", description="Cheerful and lively."),
|
||||
Voice(id="v4", name="Guang", vendor="Ali", gender="Male", language="zh", description="Standard newscast style."),
|
||||
Voice(id="v5", name="Doubao", vendor="Volcano", gender="Female", language="zh", description="Cute and young."),
|
||||
]
|
||||
for v in voices:
|
||||
db.add(v)
|
||||
db.commit()
|
||||
print("✅ 默认声音数据已初始化")
|
||||
finally:
|
||||
db.close()
|
||||
165
api/app/models.py
Normal file
165
api/app/models.py
Normal file
@@ -0,0 +1,165 @@
|
||||
from datetime import datetime
|
||||
from typing import List, Optional
|
||||
from sqlalchemy import String, Integer, DateTime, Text, Float, ForeignKey, JSON
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
|
||||
from .db import Base
|
||||
|
||||
|
||||
class User(Base):
|
||||
__tablename__ = "users"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True)
|
||||
email: Mapped[str] = mapped_column(String(255), unique=True, index=True, nullable=False)
|
||||
password_hash: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow)
|
||||
|
||||
|
||||
class Voice(Base):
|
||||
__tablename__ = "voices"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(64), primary_key=True)
|
||||
name: Mapped[str] = mapped_column(String(128), nullable=False)
|
||||
vendor: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||
gender: Mapped[str] = mapped_column(String(32), nullable=False)
|
||||
language: Mapped[str] = mapped_column(String(16), nullable=False)
|
||||
description: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
voice_params: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
|
||||
|
||||
class Assistant(Base):
|
||||
__tablename__ = "assistants"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(64), primary_key=True)
|
||||
user_id: Mapped[int] = mapped_column(Integer, ForeignKey("users.id"), index=True)
|
||||
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
call_count: Mapped[int] = mapped_column(Integer, default=0)
|
||||
opener: Mapped[str] = mapped_column(Text, default="")
|
||||
prompt: Mapped[str] = mapped_column(Text, default="")
|
||||
knowledge_base_id: Mapped[Optional[str]] = mapped_column(String(64), nullable=True)
|
||||
language: Mapped[str] = mapped_column(String(16), default="zh")
|
||||
voice: Mapped[Optional[str]] = mapped_column(String(64), nullable=True)
|
||||
speed: Mapped[float] = mapped_column(Float, default=1.0)
|
||||
hotwords: Mapped[dict] = mapped_column(JSON, default=list)
|
||||
tools: Mapped[dict] = mapped_column(JSON, default=list)
|
||||
interruption_sensitivity: Mapped[int] = mapped_column(Integer, default=500)
|
||||
config_mode: Mapped[str] = mapped_column(String(32), default="platform")
|
||||
api_url: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
|
||||
api_key: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow)
|
||||
updated_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow)
|
||||
|
||||
user = relationship("User")
|
||||
call_records = relationship("CallRecord", back_populates="assistant")
|
||||
|
||||
|
||||
class KnowledgeBase(Base):
|
||||
__tablename__ = "knowledge_bases"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(64), primary_key=True)
|
||||
user_id: Mapped[int] = mapped_column(Integer, ForeignKey("users.id"), index=True)
|
||||
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
description: Mapped[str] = mapped_column(Text, default="")
|
||||
embedding_model: Mapped[str] = mapped_column(String(64), default="text-embedding-3-small")
|
||||
chunk_size: Mapped[int] = mapped_column(Integer, default=500)
|
||||
chunk_overlap: Mapped[int] = mapped_column(Integer, default=50)
|
||||
doc_count: Mapped[int] = mapped_column(Integer, default=0)
|
||||
chunk_count: Mapped[int] = mapped_column(Integer, default=0)
|
||||
status: Mapped[str] = mapped_column(String(32), default="active")
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow)
|
||||
updated_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow)
|
||||
|
||||
user = relationship("User")
|
||||
documents = relationship("KnowledgeDocument", back_populates="kb")
|
||||
|
||||
|
||||
class KnowledgeDocument(Base):
|
||||
__tablename__ = "knowledge_documents"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(64), primary_key=True)
|
||||
kb_id: Mapped[str] = mapped_column(String(64), ForeignKey("knowledge_bases.id"), index=True)
|
||||
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
size: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||
file_type: Mapped[str] = mapped_column(String(32), default="txt")
|
||||
storage_url: Mapped[Optional[str]] = mapped_column(String(512), nullable=True)
|
||||
status: Mapped[str] = mapped_column(String(32), default="pending") # pending/processing/completed/failed
|
||||
chunk_count: Mapped[int] = mapped_column(Integer, default=0)
|
||||
error_message: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
|
||||
upload_date: Mapped[str] = mapped_column(String(32), nullable=False)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow)
|
||||
processed_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True)
|
||||
|
||||
kb = relationship("KnowledgeBase", back_populates="documents")
|
||||
|
||||
|
||||
class Workflow(Base):
|
||||
__tablename__ = "workflows"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(64), primary_key=True)
|
||||
user_id: Mapped[int] = mapped_column(Integer, ForeignKey("users.id"), index=True)
|
||||
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
node_count: Mapped[int] = mapped_column(Integer, default=0)
|
||||
created_at: Mapped[str] = mapped_column(String(32), default="")
|
||||
updated_at: Mapped[str] = mapped_column(String(32), default="")
|
||||
global_prompt: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
|
||||
nodes: Mapped[dict] = mapped_column(JSON, default=list)
|
||||
edges: Mapped[dict] = mapped_column(JSON, default=list)
|
||||
|
||||
user = relationship("User")
|
||||
|
||||
|
||||
class CallRecord(Base):
|
||||
__tablename__ = "call_records"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(64), primary_key=True)
|
||||
user_id: Mapped[int] = mapped_column(Integer, ForeignKey("users.id"), index=True)
|
||||
assistant_id: Mapped[Optional[str]] = mapped_column(String(64), ForeignKey("assistants.id"), index=True)
|
||||
source: Mapped[str] = mapped_column(String(32), default="debug")
|
||||
status: Mapped[str] = mapped_column(String(32), default="connected")
|
||||
started_at: Mapped[str] = mapped_column(String(32), nullable=False)
|
||||
ended_at: Mapped[Optional[str]] = mapped_column(String(32), nullable=True)
|
||||
duration_seconds: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
|
||||
summary: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
|
||||
cost: Mapped[float] = mapped_column(Float, default=0.0)
|
||||
call_metadata: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow)
|
||||
|
||||
user = relationship("User")
|
||||
assistant = relationship("Assistant", back_populates="call_records")
|
||||
transcripts = relationship("CallTranscript", back_populates="call_record")
|
||||
audio_segments = relationship("CallAudioSegment", back_populates="call_record")
|
||||
|
||||
|
||||
class CallTranscript(Base):
|
||||
__tablename__ = "call_transcripts"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True)
|
||||
call_id: Mapped[str] = mapped_column(String(64), ForeignKey("call_records.id"), index=True)
|
||||
turn_index: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
speaker: Mapped[str] = mapped_column(String(16), nullable=False) # human/ai
|
||||
content: Mapped[str] = mapped_column(Text, nullable=False)
|
||||
confidence: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
|
||||
start_ms: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
end_ms: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
duration_ms: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
|
||||
emotion: Mapped[Optional[str]] = mapped_column(String(32), nullable=True)
|
||||
|
||||
call_record = relationship("CallRecord", back_populates="transcripts")
|
||||
|
||||
|
||||
class CallAudioSegment(Base):
|
||||
__tablename__ = "call_audio_segments"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True)
|
||||
call_id: Mapped[str] = mapped_column(String(64), ForeignKey("call_records.id"), index=True)
|
||||
transcript_id: Mapped[Optional[int]] = mapped_column(Integer, ForeignKey("call_transcripts.id"), nullable=True)
|
||||
turn_index: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
|
||||
audio_url: Mapped[str] = mapped_column(String(512), nullable=False)
|
||||
audio_format: Mapped[str] = mapped_column(String(16), default="mp3")
|
||||
file_size_bytes: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
|
||||
start_ms: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
end_ms: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
duration_ms: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow)
|
||||
|
||||
call_record = relationship("CallRecord", back_populates="audio_segments")
|
||||
11
api/app/routers/__init__.py
Normal file
11
api/app/routers/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from fastapi import APIRouter
|
||||
|
||||
from . import assistants
|
||||
from . import history
|
||||
from . import knowledge
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
router.include_router(assistants.router)
|
||||
router.include_router(history.router)
|
||||
router.include_router(knowledge.router)
|
||||
157
api/app/routers/assistants.py
Normal file
157
api/app/routers/assistants.py
Normal file
@@ -0,0 +1,157 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import List
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
from ..db import get_db
|
||||
from ..models import Assistant, Voice, Workflow
|
||||
from ..schemas import (
|
||||
AssistantCreate, AssistantUpdate, AssistantOut,
|
||||
VoiceOut,
|
||||
WorkflowCreate, WorkflowUpdate, WorkflowOut
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# ============ Voices ============
|
||||
@router.get("/voices", response_model=List[VoiceOut])
|
||||
def list_voices(db: Session = Depends(get_db)):
|
||||
"""获取声音库列表"""
|
||||
voices = db.query(Voice).all()
|
||||
return voices
|
||||
|
||||
|
||||
# ============ Assistants ============
|
||||
@router.get("/assistants")
|
||||
def list_assistants(
|
||||
page: int = 1,
|
||||
limit: int = 50,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""获取助手列表"""
|
||||
query = db.query(Assistant)
|
||||
total = query.count()
|
||||
assistants = query.order_by(Assistant.created_at.desc()) \
|
||||
.offset((page-1)*limit).limit(limit).all()
|
||||
return {"total": total, "page": page, "limit": limit, "list": assistants}
|
||||
|
||||
|
||||
@router.get("/assistants/{id}", response_model=AssistantOut)
|
||||
def get_assistant(id: str, db: Session = Depends(get_db)):
|
||||
"""获取单个助手详情"""
|
||||
assistant = db.query(Assistant).filter(Assistant.id == id).first()
|
||||
if not assistant:
|
||||
raise HTTPException(status_code=404, detail="Assistant not found")
|
||||
return assistant
|
||||
|
||||
|
||||
@router.post("/assistants", response_model=AssistantOut)
|
||||
def create_assistant(data: AssistantCreate, db: Session = Depends(get_db)):
|
||||
"""创建新助手"""
|
||||
assistant = Assistant(
|
||||
id=str(uuid.uuid4())[:8],
|
||||
user_id=1, # 默认用户,后续添加认证
|
||||
name=data.name,
|
||||
opener=data.opener,
|
||||
prompt=data.prompt,
|
||||
knowledge_base_id=data.knowledgeBaseId,
|
||||
language=data.language,
|
||||
voice=data.voice,
|
||||
speed=data.speed,
|
||||
hotwords=data.hotwords,
|
||||
tools=data.tools,
|
||||
interruption_sensitivity=data.interruptionSensitivity,
|
||||
config_mode=data.configMode,
|
||||
api_url=data.apiUrl,
|
||||
api_key=data.apiKey,
|
||||
)
|
||||
db.add(assistant)
|
||||
db.commit()
|
||||
db.refresh(assistant)
|
||||
return assistant
|
||||
|
||||
|
||||
@router.put("/assistants/{id}")
|
||||
def update_assistant(id: str, data: AssistantUpdate, db: Session = Depends(get_db)):
|
||||
"""更新助手"""
|
||||
assistant = db.query(Assistant).filter(Assistant.id == id).first()
|
||||
if not assistant:
|
||||
raise HTTPException(status_code=404, detail="Assistant not found")
|
||||
|
||||
update_data = data.model_dump(exclude_unset=True)
|
||||
for field, value in update_data.items():
|
||||
setattr(assistant, field, value)
|
||||
|
||||
assistant.updated_at = datetime.utcnow()
|
||||
db.commit()
|
||||
db.refresh(assistant)
|
||||
return assistant
|
||||
|
||||
|
||||
@router.delete("/assistants/{id}")
|
||||
def delete_assistant(id: str, db: Session = Depends(get_db)):
|
||||
"""删除助手"""
|
||||
assistant = db.query(Assistant).filter(Assistant.id == id).first()
|
||||
if not assistant:
|
||||
raise HTTPException(status_code=404, detail="Assistant not found")
|
||||
db.delete(assistant)
|
||||
db.commit()
|
||||
return {"message": "Deleted successfully"}
|
||||
|
||||
|
||||
# ============ Workflows ============
|
||||
@router.get("/workflows", response_model=List[WorkflowOut])
|
||||
def list_workflows(db: Session = Depends(get_db)):
|
||||
"""获取工作流列表"""
|
||||
workflows = db.query(Workflow).all()
|
||||
return workflows
|
||||
|
||||
|
||||
@router.post("/workflows", response_model=WorkflowOut)
|
||||
def create_workflow(data: WorkflowCreate, db: Session = Depends(get_db)):
|
||||
"""创建工作流"""
|
||||
workflow = Workflow(
|
||||
id=str(uuid.uuid4())[:8],
|
||||
user_id=1,
|
||||
name=data.name,
|
||||
node_count=data.nodeCount,
|
||||
created_at=data.createdAt or datetime.utcnow().isoformat(),
|
||||
updated_at=data.updatedAt or "",
|
||||
global_prompt=data.globalPrompt,
|
||||
nodes=data.nodes,
|
||||
edges=data.edges,
|
||||
)
|
||||
db.add(workflow)
|
||||
db.commit()
|
||||
db.refresh(workflow)
|
||||
return workflow
|
||||
|
||||
|
||||
@router.put("/workflows/{id}", response_model=WorkflowOut)
|
||||
def update_workflow(id: str, data: WorkflowUpdate, db: Session = Depends(get_db)):
|
||||
"""更新工作流"""
|
||||
workflow = db.query(Workflow).filter(Workflow.id == id).first()
|
||||
if not workflow:
|
||||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||
|
||||
update_data = data.model_dump(exclude_unset=True)
|
||||
for field, value in update_data.items():
|
||||
setattr(workflow, field, value)
|
||||
|
||||
workflow.updated_at = datetime.utcnow().isoformat()
|
||||
db.commit()
|
||||
db.refresh(workflow)
|
||||
return workflow
|
||||
|
||||
|
||||
@router.delete("/workflows/{id}")
|
||||
def delete_workflow(id: str, db: Session = Depends(get_db)):
|
||||
"""删除工作流"""
|
||||
workflow = db.query(Workflow).filter(Workflow.id == id).first()
|
||||
if not workflow:
|
||||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||
db.delete(workflow)
|
||||
db.commit()
|
||||
return {"message": "Deleted successfully"}
|
||||
188
api/app/routers/history.py
Normal file
188
api/app/routers/history.py
Normal file
@@ -0,0 +1,188 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, List
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
from ..db import get_db
|
||||
from ..models import CallRecord, CallTranscript, CallAudioSegment
|
||||
from ..storage import get_audio_url
|
||||
|
||||
router = APIRouter(prefix="/history", tags=["history"])
|
||||
|
||||
|
||||
@router.get("")
|
||||
def list_history(
|
||||
assistant_id: Optional[str] = None,
|
||||
status: Optional[str] = None,
|
||||
page: int = 1,
|
||||
limit: int = 20,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""获取通话记录列表"""
|
||||
query = db.query(CallRecord)
|
||||
|
||||
if assistant_id:
|
||||
query = query.filter(CallRecord.assistant_id == assistant_id)
|
||||
if status:
|
||||
query = query.filter(CallRecord.status == status)
|
||||
|
||||
total = query.count()
|
||||
records = query.order_by(CallRecord.started_at.desc()) \
|
||||
.offset((page-1)*limit).limit(limit).all()
|
||||
|
||||
return {"total": total, "page": page, "limit": limit, "list": records}
|
||||
|
||||
|
||||
@router.get("/{call_id}")
|
||||
def get_history_detail(call_id: str, db: Session = Depends(get_db)):
|
||||
"""获取通话详情"""
|
||||
record = db.query(CallRecord).filter(CallRecord.id == call_id).first()
|
||||
if not record:
|
||||
raise HTTPException(status_code=404, detail="Call record not found")
|
||||
|
||||
# 获取转写
|
||||
transcripts = db.query(CallTranscript) \
|
||||
.filter(CallTranscript.call_id == call_id) \
|
||||
.order_by(CallTranscript.turn_index).all()
|
||||
|
||||
# 补充音频 URL
|
||||
transcript_list = []
|
||||
for t in transcripts:
|
||||
audio_url = t.audio_url or get_audio_url(call_id, t.turn_index)
|
||||
transcript_list.append({
|
||||
"turnIndex": t.turn_index,
|
||||
"speaker": t.speaker,
|
||||
"content": t.content,
|
||||
"confidence": t.confidence,
|
||||
"startMs": t.start_ms,
|
||||
"endMs": t.end_ms,
|
||||
"durationMs": t.duration_ms,
|
||||
"audioUrl": audio_url,
|
||||
})
|
||||
|
||||
return {
|
||||
"id": record.id,
|
||||
"user_id": record.user_id,
|
||||
"assistant_id": record.assistant_id,
|
||||
"source": record.source,
|
||||
"status": record.status,
|
||||
"started_at": record.started_at,
|
||||
"ended_at": record.ended_at,
|
||||
"duration_seconds": record.duration_seconds,
|
||||
"summary": record.summary,
|
||||
"transcripts": transcript_list,
|
||||
}
|
||||
|
||||
|
||||
@router.post("")
|
||||
def create_call_record(
|
||||
user_id: int,
|
||||
assistant_id: Optional[str] = None,
|
||||
source: str = "debug",
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""创建通话记录(引擎回调使用)"""
|
||||
record = CallRecord(
|
||||
id=str(uuid.uuid4())[:8],
|
||||
user_id=user_id,
|
||||
assistant_id=assistant_id,
|
||||
source=source,
|
||||
status="connected",
|
||||
started_at=datetime.utcnow().isoformat(),
|
||||
)
|
||||
db.add(record)
|
||||
db.commit()
|
||||
db.refresh(record)
|
||||
return record
|
||||
|
||||
|
||||
@router.put("/{call_id}")
|
||||
def update_call_record(
|
||||
call_id: str,
|
||||
status: Optional[str] = None,
|
||||
summary: Optional[str] = None,
|
||||
duration_seconds: Optional[int] = None,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""更新通话记录"""
|
||||
record = db.query(CallRecord).filter(CallRecord.id == call_id).first()
|
||||
if not record:
|
||||
raise HTTPException(status_code=404, detail="Call record not found")
|
||||
|
||||
if status:
|
||||
record.status = status
|
||||
if summary:
|
||||
record.summary = summary
|
||||
if duration_seconds:
|
||||
record.duration_seconds = duration_seconds
|
||||
record.ended_at = datetime.utcnow().isoformat()
|
||||
|
||||
db.commit()
|
||||
return {"message": "Updated successfully"}
|
||||
|
||||
|
||||
@router.post("/{call_id}/transcripts")
|
||||
def add_transcript(
|
||||
call_id: str,
|
||||
turn_index: int,
|
||||
speaker: str,
|
||||
content: str,
|
||||
start_ms: int,
|
||||
end_ms: int,
|
||||
confidence: Optional[float] = None,
|
||||
duration_ms: Optional[int] = None,
|
||||
emotion: Optional[str] = None,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""添加转写片段"""
|
||||
transcript = CallTranscript(
|
||||
call_id=call_id,
|
||||
turn_index=turn_index,
|
||||
speaker=speaker,
|
||||
content=content,
|
||||
confidence=confidence,
|
||||
start_ms=start_ms,
|
||||
end_ms=end_ms,
|
||||
duration_ms=duration_ms,
|
||||
emotion=emotion,
|
||||
)
|
||||
db.add(transcript)
|
||||
db.commit()
|
||||
db.refresh(transcript)
|
||||
|
||||
# 补充音频 URL
|
||||
audio_url = get_audio_url(call_id, turn_index)
|
||||
|
||||
return {
|
||||
"id": transcript.id,
|
||||
"turn_index": turn_index,
|
||||
"speaker": speaker,
|
||||
"content": content,
|
||||
"confidence": confidence,
|
||||
"start_ms": start_ms,
|
||||
"end_ms": end_ms,
|
||||
"duration_ms": duration_ms,
|
||||
"audio_url": audio_url,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/{call_id}/audio/{turn_index}")
|
||||
def get_audio(call_id: str, turn_index: int):
|
||||
"""获取音频文件"""
|
||||
audio_url = get_audio_url(call_id, turn_index)
|
||||
if not audio_url:
|
||||
raise HTTPException(status_code=404, detail="Audio not found")
|
||||
from fastapi.responses import RedirectResponse
|
||||
return RedirectResponse(audio_url)
|
||||
|
||||
|
||||
@router.delete("/{call_id}")
|
||||
def delete_call_record(call_id: str, db: Session = Depends(get_db)):
|
||||
"""删除通话记录"""
|
||||
record = db.query(CallRecord).filter(CallRecord.id == call_id).first()
|
||||
if not record:
|
||||
raise HTTPException(status_code=404, detail="Call record not found")
|
||||
db.delete(record)
|
||||
db.commit()
|
||||
return {"message": "Deleted successfully"}
|
||||
234
api/app/routers/knowledge.py
Normal file
234
api/app/routers/knowledge.py
Normal file
@@ -0,0 +1,234 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional
|
||||
import uuid
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
from ..db import get_db
|
||||
from ..models import KnowledgeBase, KnowledgeDocument
|
||||
from ..schemas import (
|
||||
KnowledgeBaseCreate, KnowledgeBaseUpdate, KnowledgeBaseOut,
|
||||
KnowledgeSearchQuery, KnowledgeSearchResult, KnowledgeStats,
|
||||
DocumentIndexRequest,
|
||||
)
|
||||
from ..vector_store import (
|
||||
vector_store, search_knowledge, index_document, delete_document_from_vector
|
||||
)
|
||||
|
||||
router = APIRouter(prefix="/knowledge", tags=["knowledge"])
|
||||
|
||||
|
||||
def kb_to_dict(kb: KnowledgeBase) -> dict:
|
||||
return {
|
||||
"id": kb.id,
|
||||
"user_id": kb.user_id,
|
||||
"name": kb.name,
|
||||
"description": kb.description,
|
||||
"embedding_model": kb.embedding_model,
|
||||
"chunk_size": kb.chunk_size,
|
||||
"chunk_overlap": kb.chunk_overlap,
|
||||
"doc_count": kb.doc_count,
|
||||
"chunk_count": kb.chunk_count,
|
||||
"status": kb.status,
|
||||
"created_at": kb.created_at.isoformat() if kb.created_at else None,
|
||||
"updated_at": kb.updated_at.isoformat() if kb.updated_at else None,
|
||||
}
|
||||
|
||||
|
||||
def doc_to_dict(d: KnowledgeDocument) -> dict:
|
||||
return {
|
||||
"id": d.id,
|
||||
"kb_id": d.kb_id,
|
||||
"name": d.name,
|
||||
"size": d.size,
|
||||
"file_type": d.file_type,
|
||||
"storage_url": d.storage_url,
|
||||
"status": d.status,
|
||||
"chunk_count": d.chunk_count,
|
||||
"error_message": d.error_message,
|
||||
"upload_date": d.upload_date,
|
||||
"created_at": d.created_at.isoformat() if d.created_at else None,
|
||||
"processed_at": d.processed_at.isoformat() if d.processed_at else None,
|
||||
}
|
||||
|
||||
|
||||
# ============ Knowledge Bases ============
|
||||
@router.get("/bases")
|
||||
def list_knowledge_bases(user_id: int = 1, db: Session = Depends(get_db)):
|
||||
kbs = db.query(KnowledgeBase).filter(KnowledgeBase.user_id == user_id).all()
|
||||
result = []
|
||||
for kb in kbs:
|
||||
docs = db.query(KnowledgeDocument).filter(KnowledgeDocument.kb_id == kb.id).all()
|
||||
kb_data = kb_to_dict(kb)
|
||||
kb_data["documents"] = [doc_to_dict(d) for d in docs]
|
||||
result.append(kb_data)
|
||||
return {"total": len(result), "list": result}
|
||||
|
||||
|
||||
@router.get("/bases/{kb_id}")
|
||||
def get_knowledge_base(kb_id: str, db: Session = Depends(get_db)):
|
||||
kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == kb_id).first()
|
||||
if not kb:
|
||||
raise HTTPException(status_code=404, detail="Knowledge base not found")
|
||||
docs = db.query(KnowledgeDocument).filter(KnowledgeDocument.kb_id == kb_id).all()
|
||||
kb_data = kb_to_dict(kb)
|
||||
kb_data["documents"] = [doc_to_dict(d) for d in docs]
|
||||
return kb_data
|
||||
|
||||
|
||||
@router.post("/bases")
|
||||
def create_knowledge_base(data: KnowledgeBaseCreate, user_id: int = 1, db: Session = Depends(get_db)):
|
||||
kb = KnowledgeBase(
|
||||
id=str(uuid.uuid4())[:8],
|
||||
user_id=user_id,
|
||||
name=data.name,
|
||||
description=data.description,
|
||||
embedding_model=data.embeddingModel,
|
||||
chunk_size=data.chunkSize,
|
||||
chunk_overlap=data.chunkOverlap,
|
||||
)
|
||||
db.add(kb)
|
||||
db.commit()
|
||||
db.refresh(kb)
|
||||
vector_store.create_collection(kb.id, data.embeddingModel)
|
||||
return kb_to_dict(kb)
|
||||
|
||||
|
||||
@router.put("/bases/{kb_id}")
|
||||
def update_knowledge_base(kb_id: str, data: KnowledgeBaseUpdate, db: Session = Depends(get_db)):
|
||||
kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == kb_id).first()
|
||||
if not kb:
|
||||
raise HTTPException(status_code=404, detail="Knowledge base not found")
|
||||
update_data = data.model_dump(exclude_unset=True)
|
||||
for field, value in update_data.items():
|
||||
setattr(kb, field, value)
|
||||
kb.updated_at = datetime.utcnow()
|
||||
db.commit()
|
||||
db.refresh(kb)
|
||||
return kb_to_dict(kb)
|
||||
|
||||
|
||||
@router.delete("/bases/{kb_id}")
|
||||
def delete_knowledge_base(kb_id: str, db: Session = Depends(get_db)):
|
||||
kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == kb_id).first()
|
||||
if not kb:
|
||||
raise HTTPException(status_code=404, detail="Knowledge base not found")
|
||||
vector_store.delete_collection(kb_id)
|
||||
docs = db.query(KnowledgeDocument).filter(KnowledgeDocument.kb_id == kb_id).all()
|
||||
for doc in docs:
|
||||
db.delete(doc)
|
||||
db.delete(kb)
|
||||
db.commit()
|
||||
return {"message": "Deleted successfully"}
|
||||
|
||||
|
||||
# ============ Documents ============
|
||||
@router.post("/bases/{kb_id}/documents")
|
||||
def upload_document(
|
||||
kb_id: str,
|
||||
name: str = Query(...),
|
||||
size: str = Query(...),
|
||||
file_type: str = Query("txt"),
|
||||
storage_url: Optional[str] = Query(None),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == kb_id).first()
|
||||
if not kb:
|
||||
raise HTTPException(status_code=404, detail="Knowledge base not found")
|
||||
doc = KnowledgeDocument(
|
||||
id=str(uuid.uuid4())[:8],
|
||||
kb_id=kb_id,
|
||||
name=name,
|
||||
size=size,
|
||||
file_type=file_type,
|
||||
storage_url=storage_url,
|
||||
status="pending",
|
||||
upload_date=datetime.utcnow().isoformat()
|
||||
)
|
||||
db.add(doc)
|
||||
db.commit()
|
||||
db.refresh(doc)
|
||||
return {"id": doc.id, "name": doc.name, "status": doc.status, "message": "Document created"}
|
||||
|
||||
|
||||
@router.post("/bases/{kb_id}/documents/{doc_id}/index")
|
||||
def index_document_content(kb_id: str, doc_id: str, request: DocumentIndexRequest, db: Session = Depends(get_db)):
|
||||
# 检查文档是否存在,不存在则创建
|
||||
doc = db.query(KnowledgeDocument).filter(
|
||||
KnowledgeDocument.id == doc_id,
|
||||
KnowledgeDocument.kb_id == kb_id
|
||||
).first()
|
||||
|
||||
if not doc:
|
||||
doc = KnowledgeDocument(
|
||||
id=doc_id,
|
||||
kb_id=kb_id,
|
||||
name=f"doc-{doc_id}.txt",
|
||||
size=str(len(request.content)),
|
||||
file_type="txt",
|
||||
status="pending",
|
||||
upload_date=datetime.utcnow().isoformat()
|
||||
)
|
||||
db.add(doc)
|
||||
db.commit()
|
||||
db.refresh(doc)
|
||||
else:
|
||||
# 更新已有文档
|
||||
doc.size = str(len(request.content))
|
||||
doc.status = "pending"
|
||||
db.commit()
|
||||
|
||||
try:
|
||||
chunk_count = index_document(kb_id, doc_id, request.content)
|
||||
doc.status = "completed"
|
||||
doc.chunk_count = chunk_count
|
||||
doc.processed_at = datetime.utcnow()
|
||||
kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == kb_id).first()
|
||||
kb.doc_count = db.query(KnowledgeDocument).filter(
|
||||
KnowledgeDocument.kb_id == kb_id,
|
||||
KnowledgeDocument.status == "completed"
|
||||
).count()
|
||||
kb.chunk_count += chunk_count
|
||||
db.commit()
|
||||
return {"message": "Document indexed", "chunkCount": chunk_count}
|
||||
except Exception as e:
|
||||
doc.status = "failed"
|
||||
doc.error_message = str(e)
|
||||
db.commit()
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.delete("/bases/{kb_id}/documents/{doc_id}")
|
||||
def delete_document(kb_id: str, doc_id: str, db: Session = Depends(get_db)):
|
||||
doc = db.query(KnowledgeDocument).filter(
|
||||
KnowledgeDocument.id == doc_id,
|
||||
KnowledgeDocument.kb_id == kb_id
|
||||
).first()
|
||||
if not doc:
|
||||
raise HTTPException(status_code=404, detail="Document not found")
|
||||
try:
|
||||
delete_document_from_vector(kb_id, doc_id)
|
||||
except Exception:
|
||||
pass
|
||||
kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == kb_id).first()
|
||||
kb.chunk_count -= doc.chunk_count
|
||||
kb.doc_count -= 1
|
||||
db.delete(doc)
|
||||
db.commit()
|
||||
return {"message": "Deleted successfully"}
|
||||
|
||||
|
||||
# ============ Search ============
|
||||
@router.post("/search")
|
||||
def search_knowledge_base(query: KnowledgeSearchQuery):
|
||||
return search_knowledge(kb_id=query.kb_id, query=query.query, n_results=query.nResults)
|
||||
|
||||
|
||||
# ============ Stats ============
|
||||
@router.get("/bases/{kb_id}/stats")
|
||||
def get_knowledge_stats(kb_id: str, db: Session = Depends(get_db)):
|
||||
kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == kb_id).first()
|
||||
if not kb:
|
||||
raise HTTPException(status_code=404, detail="Knowledge base not found")
|
||||
return {"kb_id": kb_id, "docCount": kb.doc_count, "chunkCount": kb.chunk_count}
|
||||
271
api/app/schemas.py
Normal file
271
api/app/schemas.py
Normal file
@@ -0,0 +1,271 @@
|
||||
from datetime import datetime
|
||||
from typing import List, Optional
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
# ============ Voice ============
|
||||
class VoiceBase(BaseModel):
|
||||
name: str
|
||||
vendor: str
|
||||
gender: str
|
||||
language: str
|
||||
description: str
|
||||
|
||||
|
||||
class VoiceOut(VoiceBase):
|
||||
id: str
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ============ Assistant ============
|
||||
class AssistantBase(BaseModel):
|
||||
name: str
|
||||
opener: str = ""
|
||||
prompt: str = ""
|
||||
knowledgeBaseId: Optional[str] = None
|
||||
language: str = "zh"
|
||||
voice: Optional[str] = None
|
||||
speed: float = 1.0
|
||||
hotwords: List[str] = []
|
||||
tools: List[str] = []
|
||||
interruptionSensitivity: int = 500
|
||||
configMode: str = "platform"
|
||||
apiUrl: Optional[str] = None
|
||||
apiKey: Optional[str] = None
|
||||
|
||||
|
||||
class AssistantCreate(AssistantBase):
|
||||
pass
|
||||
|
||||
|
||||
class AssistantUpdate(AssistantBase):
|
||||
name: Optional[str] = None
|
||||
|
||||
|
||||
class AssistantOut(AssistantBase):
|
||||
id: str
|
||||
callCount: int = 0
|
||||
created_at: Optional[datetime] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ============ Knowledge Base ============
|
||||
class KnowledgeDocument(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
size: str
|
||||
fileType: str = "txt"
|
||||
storageUrl: Optional[str] = None
|
||||
status: str = "pending"
|
||||
chunkCount: int = 0
|
||||
uploadDate: str
|
||||
|
||||
|
||||
class KnowledgeDocumentCreate(BaseModel):
|
||||
name: str
|
||||
size: str
|
||||
fileType: str = "txt"
|
||||
storageUrl: Optional[str] = None
|
||||
|
||||
|
||||
class KnowledgeDocumentUpdate(BaseModel):
|
||||
status: Optional[str] = None
|
||||
chunkCount: Optional[int] = None
|
||||
errorMessage: Optional[str] = None
|
||||
|
||||
|
||||
class KnowledgeBaseBase(BaseModel):
|
||||
name: str
|
||||
description: str = ""
|
||||
embeddingModel: str = "text-embedding-3-small"
|
||||
chunkSize: int = 500
|
||||
chunkOverlap: int = 50
|
||||
|
||||
|
||||
class KnowledgeBaseCreate(KnowledgeBaseBase):
|
||||
pass
|
||||
|
||||
|
||||
class KnowledgeBaseUpdate(BaseModel):
|
||||
name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
embeddingModel: Optional[str] = None
|
||||
chunkSize: Optional[int] = None
|
||||
chunkOverlap: Optional[int] = None
|
||||
status: Optional[str] = None
|
||||
|
||||
|
||||
class KnowledgeBaseOut(KnowledgeBaseBase):
|
||||
id: str
|
||||
docCount: int = 0
|
||||
chunkCount: int = 0
|
||||
status: str = "active"
|
||||
createdAt: Optional[datetime] = None
|
||||
updatedAt: Optional[datetime] = None
|
||||
documents: List[KnowledgeDocument] = []
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ============ Knowledge Search ============
|
||||
class KnowledgeSearchQuery(BaseModel):
|
||||
query: str
|
||||
kb_id: str
|
||||
nResults: int = 5
|
||||
|
||||
|
||||
class KnowledgeSearchResult(BaseModel):
|
||||
query: str
|
||||
results: List[dict]
|
||||
|
||||
|
||||
class DocumentIndexRequest(BaseModel):
|
||||
document_id: str
|
||||
content: str
|
||||
|
||||
|
||||
class KnowledgeStats(BaseModel):
|
||||
kb_id: str
|
||||
docCount: int
|
||||
chunkCount: int
|
||||
|
||||
|
||||
# ============ Workflow ============
|
||||
class WorkflowNode(BaseModel):
|
||||
name: str
|
||||
type: str
|
||||
isStart: Optional[bool] = None
|
||||
metadata: dict
|
||||
prompt: Optional[str] = None
|
||||
messagePlan: Optional[dict] = None
|
||||
variableExtractionPlan: Optional[dict] = None
|
||||
tool: Optional[dict] = None
|
||||
globalNodePlan: Optional[dict] = None
|
||||
|
||||
|
||||
class WorkflowEdge(BaseModel):
|
||||
from_: str
|
||||
to: str
|
||||
label: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
populate_by_name = True
|
||||
|
||||
|
||||
class WorkflowBase(BaseModel):
|
||||
name: str
|
||||
nodeCount: int = 0
|
||||
createdAt: str = ""
|
||||
updatedAt: str = ""
|
||||
globalPrompt: Optional[str] = None
|
||||
nodes: List[dict] = []
|
||||
edges: List[dict] = []
|
||||
|
||||
|
||||
class WorkflowCreate(WorkflowBase):
|
||||
pass
|
||||
|
||||
|
||||
class WorkflowUpdate(BaseModel):
|
||||
name: Optional[str] = None
|
||||
nodeCount: Optional[int] = None
|
||||
nodes: Optional[List[dict]] = None
|
||||
edges: Optional[List[dict]] = None
|
||||
globalPrompt: Optional[str] = None
|
||||
|
||||
|
||||
class WorkflowOut(WorkflowBase):
|
||||
id: str
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ============ Call Record ============
|
||||
class TranscriptSegment(BaseModel):
|
||||
turnIndex: int
|
||||
speaker: str # human/ai
|
||||
content: str
|
||||
confidence: Optional[float] = None
|
||||
startMs: int
|
||||
endMs: int
|
||||
durationMs: Optional[int] = None
|
||||
audioUrl: Optional[str] = None
|
||||
|
||||
|
||||
class CallRecordCreate(BaseModel):
|
||||
user_id: int
|
||||
assistant_id: Optional[str] = None
|
||||
source: str = "debug"
|
||||
|
||||
|
||||
class CallRecordUpdate(BaseModel):
|
||||
status: Optional[str] = None
|
||||
summary: Optional[str] = None
|
||||
duration_seconds: Optional[int] = None
|
||||
|
||||
|
||||
class CallRecordOut(BaseModel):
|
||||
id: str
|
||||
user_id: int
|
||||
assistant_id: Optional[str] = None
|
||||
source: str
|
||||
status: str
|
||||
started_at: str
|
||||
ended_at: Optional[str] = None
|
||||
duration_seconds: Optional[int] = None
|
||||
summary: Optional[str] = None
|
||||
transcripts: List[TranscriptSegment] = []
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ============ Call Transcript ============
|
||||
class TranscriptCreate(BaseModel):
|
||||
turn_index: int
|
||||
speaker: str
|
||||
content: str
|
||||
confidence: Optional[float] = None
|
||||
start_ms: int
|
||||
end_ms: int
|
||||
duration_ms: Optional[int] = None
|
||||
emotion: Optional[str] = None
|
||||
|
||||
|
||||
class TranscriptOut(TranscriptCreate):
|
||||
id: int
|
||||
audio_url: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ============ Dashboard ============
|
||||
class DashboardStats(BaseModel):
|
||||
totalCalls: int
|
||||
answerRate: int
|
||||
avgDuration: str
|
||||
humanTransferCount: int
|
||||
trend: List[dict]
|
||||
|
||||
|
||||
# ============ API Response ============
|
||||
class Message(BaseModel):
|
||||
message: str
|
||||
|
||||
|
||||
class DocumentIndexRequest(BaseModel):
|
||||
content: str
|
||||
|
||||
|
||||
class ListResponse(BaseModel):
|
||||
total: int
|
||||
page: int
|
||||
limit: int
|
||||
list: List
|
||||
56
api/app/storage.py
Normal file
56
api/app/storage.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
from minio import Minio
|
||||
import uuid
|
||||
|
||||
# MinIO 配置
|
||||
MINIO_ENDPOINT = os.getenv("MINIO_ENDPOINT", "localhost:9000")
|
||||
MINIO_ACCESS_KEY = os.getenv("MINIO_ACCESS_KEY", "admin")
|
||||
MINIO_SECRET_KEY = os.getenv("MINIO_SECRET_KEY", "password123")
|
||||
MINIO_BUCKET = os.getenv("MINIO_BUCKET", "ai-audio")
|
||||
|
||||
# 初始化客户端
|
||||
minio_client = Minio(
|
||||
MINIO_ENDPOINT,
|
||||
access_key=MINIO_ACCESS_KEY,
|
||||
secret_key=MINIO_SECRET_KEY,
|
||||
secure=False
|
||||
)
|
||||
|
||||
|
||||
def ensure_bucket():
|
||||
"""确保 Bucket 存在"""
|
||||
try:
|
||||
if not minio_client.bucket_exists(MINIO_BUCKET):
|
||||
minio_client.make_bucket(MINIO_BUCKET)
|
||||
except Exception as e:
|
||||
print(f"Warning: MinIO bucket check failed: {e}")
|
||||
|
||||
|
||||
def upload_audio(file_path: str, call_id: str, turn_index: int) -> str:
|
||||
"""上传音频片段,返回访问 URL"""
|
||||
ensure_bucket()
|
||||
|
||||
ext = os.path.splitext(file_path)[1] or ".mp3"
|
||||
object_name = f"{call_id}/{call_id}-{turn_index:03d}{ext}"
|
||||
|
||||
try:
|
||||
minio_client.fput_object(MINIO_BUCKET, object_name, file_path)
|
||||
return minio_client.presigned_get_object(MINIO_BUCKET, object_name, expires=604800)
|
||||
except Exception as e:
|
||||
print(f"Warning: MinIO upload failed: {e}")
|
||||
return ""
|
||||
|
||||
|
||||
def get_audio_url(call_id: str, turn_index: int) -> str:
|
||||
"""获取音频 URL"""
|
||||
object_name = f"{call_id}/{call_id}-{turn_index:03d}.mp3"
|
||||
try:
|
||||
return minio_client.presigned_get_object(MINIO_BUCKET, object_name, expires=604800)
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
|
||||
def generate_local_url(call_id: str, turn_index: int) -> str:
|
||||
"""生成本地 URL(如果不用 MinIO)"""
|
||||
return f"/api/history/{call_id}/audio/{turn_index}"
|
||||
311
api/app/vector_store.py
Normal file
311
api/app/vector_store.py
Normal file
@@ -0,0 +1,311 @@
|
||||
"""
|
||||
向量数据库服务 (ChromaDB)
|
||||
"""
|
||||
import os
|
||||
from typing import List, Dict, Optional
|
||||
import chromadb
|
||||
from chromadb.config import Settings
|
||||
|
||||
# 配置
|
||||
VECTOR_STORE_PATH = os.getenv("VECTOR_STORE_PATH", "./data/vector_store")
|
||||
COLLECTION_NAME_PREFIX = "kb_"
|
||||
|
||||
|
||||
class VectorStore:
|
||||
"""向量存储服务"""
|
||||
|
||||
def __init__(self):
|
||||
os.makedirs(VECTOR_STORE_PATH, exist_ok=True)
|
||||
self.client = chromadb.PersistentClient(
|
||||
path=VECTOR_STORE_PATH,
|
||||
settings=Settings(anonymized_telemetry=False)
|
||||
)
|
||||
|
||||
def get_collection(self, kb_id: str):
|
||||
"""获取知识库集合"""
|
||||
collection_name = f"{COLLECTION_NAME_PREFIX}{kb_id}"
|
||||
try:
|
||||
return self.client.get_collection(name=collection_name)
|
||||
except (ValueError, chromadb.errors.NotFoundError):
|
||||
return None
|
||||
|
||||
def create_collection(self, kb_id: str, embedding_model: str = "text-embedding-3-small"):
|
||||
"""创建知识库向量集合"""
|
||||
collection_name = f"{COLLECTION_NAME_PREFIX}{kb_id}"
|
||||
try:
|
||||
self.client.get_collection(name=collection_name)
|
||||
return collection_name
|
||||
except (ValueError, chromadb.errors.NotFoundError):
|
||||
self.client.create_collection(
|
||||
name=collection_name,
|
||||
metadata={
|
||||
"kb_id": kb_id,
|
||||
"embedding_model": embedding_model
|
||||
}
|
||||
)
|
||||
return collection_name
|
||||
|
||||
def delete_collection(self, kb_id: str):
|
||||
"""删除知识库向量集合"""
|
||||
collection_name = f"{COLLECTION_NAME_PREFIX}{kb_id}"
|
||||
try:
|
||||
self.client.delete_collection(name=collection_name)
|
||||
return True
|
||||
except (ValueError, chromadb.errors.NotFoundError):
|
||||
return False
|
||||
|
||||
def add_documents(
|
||||
self,
|
||||
kb_id: str,
|
||||
documents: List[str],
|
||||
embeddings: Optional[List[List[float]]] = None,
|
||||
ids: Optional[List[str]] = None,
|
||||
metadatas: Optional[List[Dict]] = None
|
||||
):
|
||||
"""添加文档片段到向量库"""
|
||||
collection = self.get_collection(kb_id)
|
||||
|
||||
if ids is None:
|
||||
ids = [f"chunk-{i}" for i in range(len(documents))]
|
||||
|
||||
if embeddings is not None:
|
||||
collection.add(
|
||||
documents=documents,
|
||||
embeddings=embeddings,
|
||||
ids=ids,
|
||||
metadatas=metadatas
|
||||
)
|
||||
else:
|
||||
collection.add(
|
||||
documents=documents,
|
||||
ids=ids,
|
||||
metadatas=metadatas
|
||||
)
|
||||
|
||||
return len(documents)
|
||||
|
||||
def search(
|
||||
self,
|
||||
kb_id: str,
|
||||
query: str,
|
||||
n_results: int = 5,
|
||||
where: Optional[Dict] = None
|
||||
) -> Dict:
|
||||
"""检索相似文档"""
|
||||
collection = self.get_collection(kb_id)
|
||||
|
||||
# 生成查询向量
|
||||
query_embedding = embedding_service.embed_query(query)
|
||||
|
||||
results = collection.query(
|
||||
query_embeddings=[query_embedding],
|
||||
n_results=n_results,
|
||||
where=where
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def get_stats(self, kb_id: str) -> Dict:
|
||||
"""获取向量库统计"""
|
||||
collection = self.get_collection(kb_id)
|
||||
return {
|
||||
"count": collection.count(),
|
||||
"kb_id": kb_id
|
||||
}
|
||||
|
||||
def delete_documents(self, kb_id: str, ids: List[str]):
|
||||
"""删除指定文档片段"""
|
||||
collection = self.get_collection(kb_id)
|
||||
collection.delete(ids=ids)
|
||||
|
||||
def delete_by_metadata(self, kb_id: str, document_id: str):
|
||||
"""根据文档 ID 删除所有片段"""
|
||||
collection = self.get_collection(kb_id)
|
||||
results = collection.get(where={"document_id": document_id})
|
||||
if results["ids"]:
|
||||
collection.delete(ids=results["ids"])
|
||||
|
||||
|
||||
class EmbeddingService:
|
||||
""" embedding 服务(支持多种模型)"""
|
||||
|
||||
def __init__(self, model: str = "text-embedding-3-small"):
|
||||
self.model = model
|
||||
self._client = None
|
||||
|
||||
def _get_client(self):
|
||||
"""获取 OpenAI 客户端"""
|
||||
if self._client is None:
|
||||
try:
|
||||
from openai import OpenAI
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
if api_key:
|
||||
self._client = OpenAI(api_key=api_key)
|
||||
except ImportError:
|
||||
pass
|
||||
return self._client
|
||||
|
||||
def embed(self, texts: List[str]) -> List[List[float]]:
|
||||
"""生成 embedding 向量"""
|
||||
client = self._get_client()
|
||||
|
||||
if client is None:
|
||||
# 返回随机向量(仅用于测试)
|
||||
import random
|
||||
import math
|
||||
dim = 1536 if "3-small" in self.model else 1024
|
||||
return [[random.uniform(-1, 1) for _ in range(dim)] for _ in texts]
|
||||
|
||||
response = client.embeddings.create(
|
||||
model=self.model,
|
||||
input=texts
|
||||
)
|
||||
return [data.embedding for data in response.data]
|
||||
|
||||
def embed_query(self, query: str) -> List[float]:
|
||||
"""生成查询向量"""
|
||||
return self.embed([query])[0]
|
||||
|
||||
|
||||
class DocumentProcessor:
|
||||
"""文档处理服务"""
|
||||
|
||||
def __init__(self, chunk_size: int = 500, chunk_overlap: int = 50):
|
||||
self.chunk_size = chunk_size
|
||||
self.chunk_overlap = chunk_overlap
|
||||
|
||||
def chunk_text(self, text: str, document_id: str = "") -> List[Dict]:
|
||||
"""将文本分块"""
|
||||
# 简单分块(按句子/段落)
|
||||
import re
|
||||
|
||||
# 按句子分割
|
||||
sentences = re.split(r'[。!?\n]', text)
|
||||
|
||||
chunks = []
|
||||
current_chunk = ""
|
||||
current_size = 0
|
||||
|
||||
for i, sentence in enumerate(sentences):
|
||||
sentence = sentence.strip()
|
||||
if not sentence:
|
||||
continue
|
||||
|
||||
sentence_len = len(sentence)
|
||||
|
||||
if current_size + sentence_len > self.chunk_size and current_chunk:
|
||||
# 保存当前块
|
||||
chunks.append({
|
||||
"content": current_chunk.strip(),
|
||||
"document_id": document_id,
|
||||
"chunk_index": len(chunks),
|
||||
"metadata": {
|
||||
"source": "text"
|
||||
}
|
||||
})
|
||||
|
||||
# 处理重叠
|
||||
if self.chunk_overlap > 0:
|
||||
# 保留末尾部分
|
||||
overlap_chars = current_chunk[-self.chunk_overlap:]
|
||||
current_chunk = overlap_chars + " " + sentence
|
||||
current_size = len(overlap_chars) + sentence_len + 1
|
||||
else:
|
||||
current_chunk = sentence
|
||||
current_size = sentence_len
|
||||
else:
|
||||
if current_chunk:
|
||||
current_chunk += " "
|
||||
current_chunk += sentence
|
||||
current_size += sentence_len + 1
|
||||
|
||||
# 保存最后一个块
|
||||
if current_chunk.strip():
|
||||
chunks.append({
|
||||
"content": current_chunk.strip(),
|
||||
"document_id": document_id,
|
||||
"chunk_index": len(chunks),
|
||||
"metadata": {
|
||||
"source": "text"
|
||||
}
|
||||
})
|
||||
|
||||
return chunks
|
||||
|
||||
def process_document(self, text: str, document_id: str = "") -> List[Dict]:
|
||||
"""完整处理文档"""
|
||||
return self.chunk_text(text, document_id)
|
||||
|
||||
|
||||
# 全局实例
|
||||
vector_store = VectorStore()
|
||||
embedding_service = EmbeddingService()
|
||||
|
||||
|
||||
def search_knowledge(kb_id: str, query: str, n_results: int = 5) -> Dict:
|
||||
"""知识库检索"""
|
||||
# 生成查询向量
|
||||
query_vector = embedding_service.embed_query(query)
|
||||
|
||||
# 检索
|
||||
results = vector_store.search(
|
||||
kb_id=kb_id,
|
||||
query=query,
|
||||
n_results=n_results
|
||||
)
|
||||
|
||||
return {
|
||||
"query": query,
|
||||
"results": [
|
||||
{
|
||||
"content": doc,
|
||||
"metadata": meta,
|
||||
"distance": dist
|
||||
}
|
||||
for doc, meta, dist in zip(
|
||||
results.get("documents", [[]])[0] if results.get("documents") else [],
|
||||
results.get("metadatas", [[]])[0] if results.get("metadatas") else [],
|
||||
results.get("distances", [[]])[0] if results.get("distances") else []
|
||||
)
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def index_document(kb_id: str, document_id: str, text: str) -> int:
|
||||
"""索引文档到向量库"""
|
||||
# 分块
|
||||
processor = DocumentProcessor()
|
||||
chunks = processor.process_document(text, document_id)
|
||||
|
||||
if not chunks:
|
||||
return 0
|
||||
|
||||
# 生成向量
|
||||
contents = [c["content"] for c in chunks]
|
||||
embeddings = embedding_service.embed(contents)
|
||||
|
||||
# 添加到向量库
|
||||
ids = [f"{document_id}-{c['chunk_index']}" for c in chunks]
|
||||
metadatas = [
|
||||
{
|
||||
"document_id": c["document_id"],
|
||||
"chunk_index": c["chunk_index"],
|
||||
"kb_id": kb_id
|
||||
}
|
||||
for c in chunks
|
||||
]
|
||||
|
||||
vector_store.add_documents(
|
||||
kb_id=kb_id,
|
||||
documents=contents,
|
||||
embeddings=embeddings,
|
||||
ids=ids,
|
||||
metadatas=metadatas
|
||||
)
|
||||
|
||||
return len(chunks)
|
||||
|
||||
|
||||
def delete_document_from_vector(kb_id: str, document_id: str):
|
||||
"""从向量库删除文档"""
|
||||
vector_store.delete_by_metadata(kb_id, document_id)
|
||||
52
api/init_db.py
Normal file
52
api/init_db.py
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python3
|
||||
"""初始化数据库"""
|
||||
import os
|
||||
import sys
|
||||
|
||||
# 添加路径
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from app.db import Base, engine
|
||||
from app.models import Voice
|
||||
|
||||
|
||||
def init_db():
|
||||
"""创建所有表"""
|
||||
print("📦 创建数据库表...")
|
||||
Base.metadata.drop_all(bind=engine) # 删除旧表
|
||||
Base.metadata.create_all(bind=engine)
|
||||
print("✅ 数据库表创建完成")
|
||||
|
||||
|
||||
def init_default_voices():
|
||||
"""初始化默认声音"""
|
||||
from app.db import SessionLocal
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
if db.query(Voice).count() == 0:
|
||||
voices = [
|
||||
Voice(id="v1", name="Xiaoyun", vendor="Ali", gender="Female", language="zh", description="Gentle and professional."),
|
||||
Voice(id="v2", name="Kevin", vendor="Volcano", gender="Male", language="en", description="Deep and authoritative."),
|
||||
Voice(id="v3", name="Abby", vendor="Minimax", gender="Female", language="en", description="Cheerful and lively."),
|
||||
Voice(id="v4", name="Guang", vendor="Ali", gender="Male", language="zh", description="Standard newscast style."),
|
||||
Voice(id="v5", name="Doubao", vendor="Volcano", gender="Female", language="zh", description="Cute and young."),
|
||||
]
|
||||
for v in voices:
|
||||
db.add(v)
|
||||
db.commit()
|
||||
print("✅ 默认声音数据已初始化")
|
||||
else:
|
||||
print("ℹ️ 声音数据已存在,跳过初始化")
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 确保 data 目录存在
|
||||
data_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data")
|
||||
os.makedirs(data_dir, exist_ok=True)
|
||||
|
||||
init_db()
|
||||
init_default_voices()
|
||||
print("🎉 数据库初始化完成!")
|
||||
73
api/main.py
Normal file
73
api/main.py
Normal file
@@ -0,0 +1,73 @@
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from contextlib import asynccontextmanager
|
||||
import os
|
||||
|
||||
from app.db import Base, engine
|
||||
from app.routers import assistants, history, knowledge
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# 启动时创建表
|
||||
Base.metadata.create_all(bind=engine)
|
||||
yield
|
||||
|
||||
|
||||
app = FastAPI(
|
||||
title="AI VideoAssistant API",
|
||||
description="Backend API for AI VideoAssistant",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan
|
||||
)
|
||||
|
||||
# CORS
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# 路由
|
||||
app.include_router(assistants.router, prefix="/api")
|
||||
app.include_router(history.router, prefix="/api")
|
||||
app.include_router(knowledge.router, prefix="/api")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
def root():
|
||||
return {"message": "AI VideoAssistant API", "version": "1.0.0"}
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
# 初始化默认数据
|
||||
@app.on_event("startup")
|
||||
def init_default_data():
|
||||
from sqlalchemy.orm import Session
|
||||
from app.db import SessionLocal
|
||||
from app.models import Voice
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
# 检查是否已有数据
|
||||
if db.query(Voice).count() == 0:
|
||||
# 插入默认声音
|
||||
voices = [
|
||||
Voice(id="v1", name="Xiaoyun", vendor="Ali", gender="Female", language="zh", description="Gentle and professional."),
|
||||
Voice(id="v2", name="Kevin", vendor="Volcano", gender="Male", language="en", description="Deep and authoritative."),
|
||||
Voice(id="v3", name="Abby", vendor="Minimax", gender="Female", language="en", description="Cheerful and lively."),
|
||||
Voice(id="v4", name="Guang", vendor="Ali", gender="Male", language="zh", description="Standard newscast style."),
|
||||
Voice(id="v5", name="Doubao", vendor="Volcano", gender="Female", language="zh", description="Cute and young."),
|
||||
]
|
||||
for v in voices:
|
||||
db.add(v)
|
||||
db.commit()
|
||||
print("✅ 默认声音数据已初始化")
|
||||
finally:
|
||||
db.close()
|
||||
11
api/requirements.txt
Normal file
11
api/requirements.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
aiosqlite==0.19.0
|
||||
fastapi==0.109.0
|
||||
uvicorn==0.27.0
|
||||
python-multipart==0.0.6
|
||||
python-dotenv==1.0.0
|
||||
pydantic==2.5.3
|
||||
sqlalchemy==2.0.25
|
||||
minio==7.2.0
|
||||
httpx==0.26.0
|
||||
chromadb==0.4.22
|
||||
openai==1.12.0
|
||||
Reference in New Issue
Block a user