Update llm library web interface

This commit is contained in:
Xin Wang
2026-02-08 23:55:40 +08:00
parent 6462c4f432
commit be68e335f1
6 changed files with 614 additions and 187 deletions

View File

@@ -10,7 +10,7 @@ from ..db import get_db
from ..models import LLMModel
from ..schemas import (
LLMModelCreate, LLMModelUpdate, LLMModelOut,
LLMModelTestResponse
LLMModelTestResponse, LLMPreviewRequest, LLMPreviewResponse
)
router = APIRouter(prefix="/llm", tags=["LLM Models"])
@@ -204,3 +204,66 @@ def chat_with_llm(
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/{id}/preview", response_model=LLMPreviewResponse)
def preview_llm_model(
id: str,
request: LLMPreviewRequest,
db: Session = Depends(get_db)
):
"""预览 LLM 输出,基于 OpenAI-compatible /chat/completions。"""
model = db.query(LLMModel).filter(LLMModel.id == id).first()
if not model:
raise HTTPException(status_code=404, detail="LLM Model not found")
user_message = (request.message or "").strip()
if not user_message:
raise HTTPException(status_code=400, detail="Preview message cannot be empty")
messages = []
if request.system_prompt and request.system_prompt.strip():
messages.append({"role": "system", "content": request.system_prompt.strip()})
messages.append({"role": "user", "content": user_message})
payload = {
"model": model.model_name or "gpt-3.5-turbo",
"messages": messages,
"max_tokens": request.max_tokens or 512,
"temperature": request.temperature if request.temperature is not None else (model.temperature or 0.7),
}
headers = {"Authorization": f"Bearer {(request.api_key or model.api_key).strip()}"}
start_time = time.time()
try:
with httpx.Client(timeout=60.0) as client:
response = client.post(
f"{model.base_url.rstrip('/')}/chat/completions",
json=payload,
headers=headers
)
except Exception as exc:
raise HTTPException(status_code=502, detail=f"LLM request failed: {exc}") from exc
if response.status_code != 200:
detail = response.text
try:
detail_json = response.json()
detail = detail_json.get("error", {}).get("message") or detail_json.get("detail") or detail
except Exception:
pass
raise HTTPException(status_code=502, detail=f"LLM vendor error: {detail}")
result = response.json()
reply = ""
choices = result.get("choices", [])
if choices:
reply = choices[0].get("message", {}).get("content", "") or ""
return LLMPreviewResponse(
success=bool(reply),
reply=reply,
usage=result.get("usage"),
latency_ms=int((time.time() - start_time) * 1000),
error=None if reply else "No response content",
)

View File

@@ -153,6 +153,22 @@ class LLMModelTestResponse(BaseModel):
message: Optional[str] = None
class LLMPreviewRequest(BaseModel):
message: str
system_prompt: Optional[str] = None
max_tokens: Optional[int] = None
temperature: Optional[float] = None
api_key: Optional[str] = None
class LLMPreviewResponse(BaseModel):
success: bool
reply: Optional[str] = None
usage: Optional[dict] = None
latency_ms: Optional[int] = None
error: Optional[str] = None
# ============ ASR Model ============
class ASRModelBase(BaseModel):
name: str

View File

@@ -244,3 +244,55 @@ class TestLLMModelAPI:
response = client.post("/api/llm", json=data)
assert response.status_code == 200
assert response.json()["type"] == "embedding"
def test_preview_llm_model_success(self, client, sample_llm_model_data, monkeypatch):
"""Test LLM preview endpoint returns model reply."""
from app.routers import llm as llm_router
create_response = client.post("/api/llm", json=sample_llm_model_data)
model_id = create_response.json()["id"]
class DummyResponse:
status_code = 200
def json(self):
return {
"choices": [{"message": {"content": "Preview OK"}}],
"usage": {"prompt_tokens": 10, "completion_tokens": 2, "total_tokens": 12}
}
@property
def text(self):
return '{"ok":true}'
class DummyClient:
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def post(self, url, json=None, headers=None):
assert url.endswith("/chat/completions")
assert headers["Authorization"] == f"Bearer {sample_llm_model_data['api_key']}"
assert json["messages"][0]["role"] == "user"
return DummyResponse()
monkeypatch.setattr(llm_router.httpx, "Client", DummyClient)
response = client.post(f"/api/llm/{model_id}/preview", json={"message": "hello"})
assert response.status_code == 200
data = response.json()
assert data["success"] is True
assert data["reply"] == "Preview OK"
def test_preview_llm_model_reject_empty_message(self, client, sample_llm_model_data):
"""Test LLM preview endpoint validates message."""
create_response = client.post("/api/llm", json=sample_llm_model_data)
model_id = create_response.json()["id"]
response = client.post(f"/api/llm/{model_id}/preview", json={"message": " "})
assert response.status_code == 400

View File

@@ -1,58 +1,71 @@
import React, { useState } from 'react';
import { Search, Filter, Plus, BrainCircuit, Trash2, Key, Settings2, Server, Thermometer } from 'lucide-react';
import React, { useEffect, useState } from 'react';
import { Search, Filter, Plus, BrainCircuit, Trash2, Key, Settings2, Server, Thermometer, Pencil, Play } from 'lucide-react';
import { Button, Input, TableHeader, TableRow, TableHead, TableCell, Dialog, Badge } from '../components/UI';
import { mockLLMModels } from '../services/mockData';
import { LLMModel } from '../types';
import { createLLMModel, deleteLLMModel, fetchLLMModels, previewLLMModel, updateLLMModel } from '../services/backendApi';
const maskApiKey = (key?: string) => {
if (!key) return '********';
if (key.length < 8) return '********';
return `${key.slice(0, 3)}****${key.slice(-4)}`;
};
export const LLMLibraryPage: React.FC = () => {
const [models, setModels] = useState<LLMModel[]>(mockLLMModels);
const [models, setModels] = useState<LLMModel[]>([]);
const [searchTerm, setSearchTerm] = useState('');
const [vendorFilter, setVendorFilter] = useState<string>('all');
const [typeFilter, setTypeFilter] = useState<string>('all');
const [isAddModalOpen, setIsAddModalOpen] = useState(false);
const [editingModel, setEditingModel] = useState<LLMModel | null>(null);
const [previewingModel, setPreviewingModel] = useState<LLMModel | null>(null);
const [isLoading, setIsLoading] = useState(true);
// Form State
const [newModel, setNewModel] = useState<Partial<LLMModel>>({
vendor: 'OpenAI Compatible',
type: 'text',
temperature: 0.7
});
useEffect(() => {
const load = async () => {
setIsLoading(true);
try {
setModels(await fetchLLMModels());
} catch (error) {
console.error(error);
setModels([]);
} finally {
setIsLoading(false);
}
};
load();
}, []);
const filteredModels = models.filter(m => {
const matchesSearch = m.name.toLowerCase().includes(searchTerm.toLowerCase());
const filteredModels = models.filter((m) => {
const q = searchTerm.toLowerCase();
const matchesSearch =
m.name.toLowerCase().includes(q) ||
(m.modelName || '').toLowerCase().includes(q) ||
(m.baseUrl || '').toLowerCase().includes(q);
const matchesVendor = vendorFilter === 'all' || m.vendor === vendorFilter;
const matchesType = typeFilter === 'all' || m.type === typeFilter;
return matchesSearch && matchesVendor && matchesType;
});
const handleAddModel = () => {
if (!newModel.name || !newModel.baseUrl || !newModel.apiKey) {
alert("请填写完整信息");
return;
}
const model: LLMModel = {
id: `m_${Date.now()}`,
name: newModel.name,
vendor: newModel.vendor as string,
type: newModel.type as 'text' | 'embedding' | 'rerank',
baseUrl: newModel.baseUrl,
apiKey: newModel.apiKey,
temperature: newModel.type === 'text' ? newModel.temperature : undefined
};
setModels([model, ...models]);
const handleCreate = async (data: Partial<LLMModel>) => {
const created = await createLLMModel(data);
setModels((prev) => [created, ...prev]);
setIsAddModalOpen(false);
setNewModel({ vendor: 'OpenAI Compatible', type: 'text', temperature: 0.7, name: '', baseUrl: '', apiKey: '' });
};
const handleDeleteModel = (id: string) => {
if (confirm('确认删除该模型配置吗?')) {
setModels(prev => prev.filter(m => m.id !== id));
}
const handleUpdate = async (id: string, data: Partial<LLMModel>) => {
const updated = await updateLLMModel(id, data);
setModels((prev) => prev.map((item) => (item.id === id ? updated : item)));
setEditingModel(null);
};
const handleDelete = async (id: string) => {
if (!confirm('确认删除该模型配置吗?')) return;
await deleteLLMModel(id);
setModels((prev) => prev.filter((item) => item.id !== id));
};
const vendorOptions = Array.from(new Set(models.map((m) => m.vendor).filter(Boolean)));
return (
<div className="space-y-6 animate-in fade-in py-4 pb-10">
<div className="flex items-center justify-between">
@@ -66,10 +79,10 @@ export const LLMLibraryPage: React.FC = () => {
<div className="relative col-span-1 md:col-span-2">
<Search className="absolute left-2.5 top-2.5 h-4 w-4 text-muted-foreground" />
<Input
placeholder="搜索模型名称..."
placeholder="搜索模型名称/Model Name..."
className="pl-9 border-0 bg-white/5"
value={searchTerm}
onChange={e => setSearchTerm(e.target.value)}
onChange={(e) => setSearchTerm(e.target.value)}
/>
</div>
<div className="flex items-center space-x-2">
@@ -80,7 +93,9 @@ export const LLMLibraryPage: React.FC = () => {
onChange={(e) => setVendorFilter(e.target.value)}
>
<option value="all"></option>
<option value="OpenAI Compatible">OpenAI Compatible</option>
{vendorOptions.map((vendor) => (
<option key={vendor} value={vendor}>{vendor}</option>
))}
</select>
</div>
<div className="flex items-center space-x-2">
@@ -104,12 +119,14 @@ export const LLMLibraryPage: React.FC = () => {
<TableHead></TableHead>
<TableHead></TableHead>
<TableHead></TableHead>
<TableHead></TableHead>
<TableHead>Base URL</TableHead>
<TableHead>API Key</TableHead>
<TableHead className="text-right"></TableHead>
</TableRow>
</TableHeader>
<tbody>
{filteredModels.map(model => (
{!isLoading && filteredModels.map((model) => (
<TableRow key={model.id}>
<TableCell className="font-medium text-white flex items-center">
<BrainCircuit className="w-4 h-4 mr-2 text-primary" />
@@ -123,61 +140,157 @@ export const LLMLibraryPage: React.FC = () => {
{model.type.toUpperCase()}
</Badge>
</TableCell>
<TableCell className="font-mono text-xs text-muted-foreground">
{model.baseUrl}
</TableCell>
<TableCell className="font-mono text-xs text-muted-foreground">{model.modelName || '-'}</TableCell>
<TableCell className="font-mono text-xs text-muted-foreground max-w-[240px] truncate">{model.baseUrl}</TableCell>
<TableCell className="font-mono text-xs text-muted-foreground">{maskApiKey(model.apiKey)}</TableCell>
<TableCell className="text-right">
<Button
variant="ghost"
size="icon"
onClick={() => handleDeleteModel(model.id)}
className="text-muted-foreground hover:text-destructive transition-colors"
>
<Button variant="ghost" size="icon" onClick={() => setPreviewingModel(model)} disabled={model.type !== 'text'} title={model.type !== 'text' ? '仅 text 模型可预览' : '预览模型'}>
<Play className="h-4 w-4" />
</Button>
<Button variant="ghost" size="icon" onClick={() => setEditingModel(model)}>
<Pencil className="h-4 w-4" />
</Button>
<Button variant="ghost" size="icon" onClick={() => handleDelete(model.id)} className="text-muted-foreground hover:text-destructive transition-colors">
<Trash2 className="h-4 w-4" />
</Button>
</TableCell>
</TableRow>
))}
{filteredModels.length === 0 && (
{!isLoading && filteredModels.length === 0 && (
<TableRow>
<TableCell colSpan={5} className="text-center py-8 text-muted-foreground"></TableCell>
<TableCell colSpan={7} className="text-center py-8 text-muted-foreground"></TableCell>
</TableRow>
)}
{isLoading && (
<TableRow>
<TableCell colSpan={7} className="text-center py-8 text-muted-foreground">...</TableCell>
</TableRow>
)}
</tbody>
</table>
</div>
<LLMModelModal isOpen={isAddModalOpen} onClose={() => setIsAddModalOpen(false)} onSubmit={handleCreate} />
<LLMModelModal
isOpen={!!editingModel}
onClose={() => setEditingModel(null)}
onSubmit={(data) => handleUpdate(editingModel!.id, data)}
initialModel={editingModel || undefined}
/>
<LLMPreviewModal
isOpen={!!previewingModel}
onClose={() => setPreviewingModel(null)}
model={previewingModel}
/>
</div>
);
};
const LLMModelModal: React.FC<{
isOpen: boolean;
onClose: () => void;
onSubmit: (model: Partial<LLMModel>) => Promise<void>;
initialModel?: LLMModel;
}> = ({ isOpen, onClose, onSubmit, initialModel }) => {
const [name, setName] = useState('');
const [vendor, setVendor] = useState('OpenAI Compatible');
const [type, setType] = useState<'text' | 'embedding' | 'rerank'>('text');
const [modelName, setModelName] = useState('');
const [baseUrl, setBaseUrl] = useState('');
const [apiKey, setApiKey] = useState('');
const [temperature, setTemperature] = useState(0.7);
const [contextLength, setContextLength] = useState(8192);
const [enabled, setEnabled] = useState(true);
const [saving, setSaving] = useState(false);
useEffect(() => {
if (!isOpen) return;
if (initialModel) {
setName(initialModel.name || '');
setVendor(initialModel.vendor || 'OpenAI Compatible');
setType(initialModel.type || 'text');
setModelName(initialModel.modelName || '');
setBaseUrl(initialModel.baseUrl || '');
setApiKey(initialModel.apiKey || '');
setTemperature(initialModel.temperature ?? 0.7);
setContextLength(initialModel.contextLength ?? 8192);
setEnabled(initialModel.enabled ?? true);
return;
}
setName('');
setVendor('OpenAI Compatible');
setType('text');
setModelName('');
setBaseUrl('');
setApiKey('');
setTemperature(0.7);
setContextLength(8192);
setEnabled(true);
}, [initialModel, isOpen]);
const handleSubmit = async () => {
if (!name.trim() || !baseUrl.trim() || !apiKey.trim()) {
alert('请填写完整信息');
return;
}
try {
setSaving(true);
await onSubmit({
name: name.trim(),
vendor: vendor.trim(),
type,
modelName: modelName.trim() || undefined,
baseUrl: baseUrl.trim(),
apiKey: apiKey.trim(),
temperature: type === 'text' ? temperature : undefined,
contextLength: contextLength > 0 ? contextLength : undefined,
enabled,
});
} catch (error: any) {
alert(error?.message || '保存失败');
} finally {
setSaving(false);
}
};
return (
<Dialog
isOpen={isAddModalOpen}
onClose={() => setIsAddModalOpen(false)}
title="添加大模型"
isOpen={isOpen}
onClose={onClose}
title={initialModel ? '编辑大模型' : '添加大模型'}
footer={
<>
<Button variant="ghost" onClick={() => setIsAddModalOpen(false)}></Button>
<Button onClick={handleAddModel}></Button>
<Button variant="ghost" onClick={onClose}></Button>
<Button onClick={handleSubmit} disabled={saving}>{saving ? '保存中...' : (initialModel ? '保存修改' : '确认添加')}</Button>
</>
}
>
<div className="space-y-4">
<div className="space-y-4 max-h-[75vh] overflow-y-auto px-1 custom-scrollbar">
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block"> (Vendor)</label>
<select
className="flex h-10 w-full rounded-md border border-white/10 bg-white/5 px-3 py-1 text-sm shadow-sm transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-primary/50 text-foreground appearance-none cursor-pointer [&>option]:bg-card"
value={newModel.vendor}
onChange={e => setNewModel({...newModel, vendor: e.target.value})}
value={vendor}
onChange={(e) => setVendor(e.target.value)}
>
<option value="OpenAI Compatible">OpenAI Compatible</option>
<option value="OpenAI">OpenAI</option>
<option value="SiliconFlow">SiliconFlow</option>
</select>
</div>
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block"> (Type)</label>
<div className="flex bg-white/5 p-1 rounded-lg border border-white/10">
{(['text', 'embedding', 'rerank'] as const).map(t => (
{(['text', 'embedding', 'rerank'] as const).map((t) => (
<button
key={t}
onClick={() => setNewModel({...newModel, type: t})}
className={`flex-1 flex items-center justify-center py-1.5 text-xs font-bold rounded-md transition-all ${newModel.type === t ? 'bg-primary text-primary-foreground shadow-lg' : 'text-muted-foreground hover:text-foreground'}`}
onClick={() => setType(t)}
className={`flex-1 flex items-center justify-center py-1.5 text-xs font-bold rounded-md transition-all ${type === t ? 'bg-primary text-primary-foreground shadow-lg' : 'text-muted-foreground hover:text-foreground'}`}
>
{t === 'text' && <Settings2 className="w-3 h-3 mr-1.5" />}
{t === 'embedding' && <BrainCircuit className="w-3 h-3 mr-1.5" />}
@@ -189,60 +302,166 @@ export const LLMLibraryPage: React.FC = () => {
</div>
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block"> (Model Name)</label>
<Input
value={newModel.name}
onChange={e => setNewModel({...newModel, name: e.target.value})}
placeholder="例如: gpt-4o, deepseek-chat"
/>
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block"> (Display Name)</label>
<Input value={name} onChange={(e) => setName(e.target.value)} placeholder="例如: GPT4o-Prod" />
</div>
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block flex items-center">
<Server className="w-3 h-3 mr-1.5" /> Base URL
</label>
<Input
value={newModel.baseUrl}
onChange={e => setNewModel({...newModel, baseUrl: e.target.value})}
placeholder="https://api.openai.com/v1"
className="font-mono text-xs"
/>
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block"> (model_name)</label>
<Input value={modelName} onChange={(e) => setModelName(e.target.value)} placeholder="例如: gpt-4o-mini" />
</div>
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block flex items-center">
<Key className="w-3 h-3 mr-1.5" /> API Key
</label>
<Input
type="password"
value={newModel.apiKey}
onChange={e => setNewModel({...newModel, apiKey: e.target.value})}
placeholder="sk-..."
className="font-mono text-xs"
/>
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block flex items-center"><Server className="w-3 h-3 mr-1.5" /> Base URL</label>
<Input value={baseUrl} onChange={(e) => setBaseUrl(e.target.value)} placeholder="https://api.openai.com/v1" className="font-mono text-xs" />
</div>
{newModel.type === 'text' && (
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block flex items-center"><Key className="w-3 h-3 mr-1.5" /> API Key</label>
<Input type="password" value={apiKey} onChange={(e) => setApiKey(e.target.value)} placeholder="sk-..." className="font-mono text-xs" />
</div>
{type === 'text' && (
<div className="space-y-3 pt-2">
<div className="flex justify-between items-center mb-1">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block flex items-center">
<Thermometer className="w-3 h-3 mr-1.5" /> (Temperature)
</label>
<span className="text-[10px] font-mono text-primary bg-primary/10 px-1.5 py-0.5 rounded">{newModel.temperature}</span>
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block flex items-center"><Thermometer className="w-3 h-3 mr-1.5" /> (Temperature)</label>
<span className="text-[10px] font-mono text-primary bg-primary/10 px-1.5 py-0.5 rounded">{temperature.toFixed(1)}</span>
</div>
<input
type="range"
min="0"
max="2"
step="0.1"
value={newModel.temperature}
onChange={(e) => setNewModel({...newModel, temperature: parseFloat(e.target.value)})}
value={temperature}
onChange={(e) => setTemperature(parseFloat(e.target.value))}
className="w-full h-1.5 bg-secondary rounded-lg appearance-none cursor-pointer accent-primary"
/>
</div>
)}
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block"> (context_length)</label>
<Input type="number" min={1} value={contextLength} onChange={(e) => setContextLength(parseInt(e.target.value || '0', 10))} />
</div>
<label className="flex items-center space-x-2 text-xs text-muted-foreground">
<input type="checkbox" checked={enabled} onChange={(e) => setEnabled(e.target.checked)} />
<span></span>
</label>
</div>
</Dialog>
);
};
const LLMPreviewModal: React.FC<{
isOpen: boolean;
onClose: () => void;
model: LLMModel | null;
}> = ({ isOpen, onClose, model }) => {
const [systemPrompt, setSystemPrompt] = useState('You are a concise helpful assistant.');
const [message, setMessage] = useState('Hello, please introduce yourself in one sentence.');
const [temperature, setTemperature] = useState(0.7);
const [maxTokens, setMaxTokens] = useState(256);
const [reply, setReply] = useState('');
const [latency, setLatency] = useState<number | null>(null);
const [usage, setUsage] = useState<Record<string, number> | null>(null);
const [isRunning, setIsRunning] = useState(false);
useEffect(() => {
if (!isOpen) return;
setReply('');
setLatency(null);
setUsage(null);
setTemperature(model?.temperature ?? 0.7);
}, [isOpen, model]);
const runPreview = async () => {
if (!model?.id) return;
if (!message.trim()) {
alert('请输入测试消息');
return;
}
try {
setIsRunning(true);
const result = await previewLLMModel(model.id, {
message,
system_prompt: systemPrompt || undefined,
max_tokens: maxTokens,
temperature,
});
setReply(result.reply || result.error || '无返回内容');
setLatency(result.latency_ms ?? null);
setUsage(result.usage || null);
} catch (error: any) {
alert(error?.message || '预览失败');
} finally {
setIsRunning(false);
}
};
return (
<Dialog
isOpen={isOpen}
onClose={onClose}
title={`预览模型: ${model?.name || ''}`}
footer={
<>
<Button variant="ghost" onClick={onClose}></Button>
<Button onClick={runPreview} disabled={isRunning}>{isRunning ? '请求中...' : '开始预览'}</Button>
</>
}
>
<div className="space-y-4">
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block">System Prompt</label>
<textarea
value={systemPrompt}
onChange={(e) => setSystemPrompt(e.target.value)}
className="flex min-h-[70px] w-full rounded-md border-0 bg-white/5 px-3 py-2 text-sm shadow-sm placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-primary/50 text-white"
placeholder="可选系统提示词"
/>
</div>
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block">User Message</label>
<textarea
value={message}
onChange={(e) => setMessage(e.target.value)}
className="flex min-h-[90px] w-full rounded-md border-0 bg-white/5 px-3 py-2 text-sm shadow-sm placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-primary/50 text-white"
placeholder="输入用户消息"
/>
</div>
<div className="grid grid-cols-2 gap-4">
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block">Temperature</label>
<Input type="number" min={0} max={2} step={0.1} value={temperature} onChange={(e) => setTemperature(parseFloat(e.target.value || '0'))} />
</div>
<div className="space-y-1.5">
<label className="text-[10px] font-black text-muted-foreground uppercase tracking-widest block">Max Tokens</label>
<Input type="number" min={1} value={maxTokens} onChange={(e) => setMaxTokens(parseInt(e.target.value || '1', 10))} />
</div>
</div>
<div className="rounded-lg border border-primary/20 bg-primary/5 p-3 space-y-2">
<div className="flex items-center justify-between text-xs text-primary">
<span></span>
<span>{latency !== null ? `Latency: ${latency}ms` : ''}</span>
</div>
<textarea
readOnly
value={reply}
className="flex min-h-[140px] w-full rounded-md border-0 bg-black/20 px-3 py-2 text-sm shadow-sm text-white"
placeholder="回复会显示在这里"
/>
{usage && (
<div className="text-xs text-muted-foreground font-mono">
prompt: {usage.prompt_tokens ?? '-'} | completion: {usage.completion_tokens ?? '-'} | total: {usage.total_tokens ?? '-'}
</div>
)}
</div>
</div>
</Dialog>
</div>
);
};

View File

@@ -1,4 +1,4 @@
import { ASRModel, Assistant, CallLog, InteractionDetail, KnowledgeBase, KnowledgeDocument, Voice, Workflow, WorkflowEdge, WorkflowNode } from '../types';
import { ASRModel, Assistant, CallLog, InteractionDetail, KnowledgeBase, KnowledgeDocument, LLMModel, Voice, Workflow, WorkflowEdge, WorkflowNode } from '../types';
import { apiRequest } from './apiClient';
type AnyRecord = Record<string, any>;
@@ -78,6 +78,19 @@ const mapASRModel = (raw: AnyRecord): ASRModel => ({
enabled: Boolean(readField(raw, ['enabled'], true)),
});
const mapLLMModel = (raw: AnyRecord): LLMModel => ({
id: String(readField(raw, ['id'], '')),
name: readField(raw, ['name'], ''),
vendor: readField(raw, ['vendor'], 'OpenAI Compatible'),
type: readField(raw, ['type'], 'text'),
baseUrl: readField(raw, ['baseUrl', 'base_url'], ''),
apiKey: readField(raw, ['apiKey', 'api_key'], ''),
modelName: readField(raw, ['modelName', 'model_name'], ''),
temperature: Number(readField(raw, ['temperature'], 0.7)),
contextLength: Number(readField(raw, ['contextLength', 'context_length'], 0)),
enabled: Boolean(readField(raw, ['enabled'], true)),
});
const mapWorkflowNode = (raw: AnyRecord): WorkflowNode => ({
name: readField(raw, ['name'], ''),
type: readField(raw, ['type'], 'conversation') as 'conversation' | 'tool' | 'human' | 'end',
@@ -351,6 +364,67 @@ export const previewASRModel = async (
return data || { success: false, error: 'Invalid preview response' };
};
export const fetchLLMModels = async (): Promise<LLMModel[]> => {
const response = await apiRequest<{ list?: AnyRecord[] } | AnyRecord[]>('/llm');
const list = Array.isArray(response) ? response : (response.list || []);
return list.map((item) => mapLLMModel(item));
};
export const createLLMModel = async (data: Partial<LLMModel>): Promise<LLMModel> => {
const payload = {
id: data.id || undefined,
name: data.name || 'New LLM Model',
vendor: data.vendor || 'OpenAI Compatible',
type: data.type || 'text',
base_url: data.baseUrl || '',
api_key: data.apiKey || '',
model_name: data.modelName || undefined,
temperature: data.temperature,
context_length: data.contextLength,
enabled: data.enabled ?? true,
};
const response = await apiRequest<AnyRecord>('/llm', { method: 'POST', body: payload });
return mapLLMModel(response);
};
export const updateLLMModel = async (id: string, data: Partial<LLMModel>): Promise<LLMModel> => {
const payload = {
name: data.name,
vendor: data.vendor,
type: data.type,
base_url: data.baseUrl,
api_key: data.apiKey,
model_name: data.modelName,
temperature: data.temperature,
context_length: data.contextLength,
enabled: data.enabled,
};
const response = await apiRequest<AnyRecord>(`/llm/${id}`, { method: 'PUT', body: payload });
return mapLLMModel(response);
};
export const deleteLLMModel = async (id: string): Promise<void> => {
await apiRequest(`/llm/${id}`, { method: 'DELETE' });
};
export type LLMPreviewResult = {
success: boolean;
reply?: string;
usage?: Record<string, number>;
latency_ms?: number;
error?: string;
};
export const previewLLMModel = async (
id: string,
payload: { message: string; system_prompt?: string; max_tokens?: number; temperature?: number; api_key?: string }
): Promise<LLMPreviewResult> => {
return apiRequest<LLMPreviewResult>(`/llm/${id}/preview`, {
method: 'POST',
body: payload,
});
};
export const fetchWorkflows = async (): Promise<Workflow[]> => {
const response = await apiRequest<{ list?: AnyRecord[] } | AnyRecord[]>('/workflows');
const list = Array.isArray(response) ? response : (response.list || []);

View File

@@ -170,7 +170,10 @@ export interface LLMModel {
type: 'text' | 'embedding' | 'rerank';
baseUrl: string;
apiKey: string;
modelName?: string;
temperature?: number;
contextLength?: number;
enabled?: boolean;
}
export interface ASRModel {