import React, { useEffect, useState } from 'react'; import { Search, Filter, Plus, BrainCircuit, Trash2, Key, Settings2, Server, Thermometer, Pencil, Play } from 'lucide-react'; import { Button, Input, TableHeader, TableRow, TableHead, TableCell, Dialog, Badge } from '../components/UI'; import { LLMModel } from '../types'; import { createLLMModel, deleteLLMModel, fetchLLMModels, previewLLMModel, updateLLMModel } from '../services/backendApi'; const maskApiKey = (key?: string) => { if (!key) return '********'; if (key.length < 8) return '********'; return `${key.slice(0, 3)}****${key.slice(-4)}`; }; export const LLMLibraryPage: React.FC = () => { const [models, setModels] = useState([]); const [searchTerm, setSearchTerm] = useState(''); const [vendorFilter, setVendorFilter] = useState('all'); const [typeFilter, setTypeFilter] = useState('all'); const [isAddModalOpen, setIsAddModalOpen] = useState(false); const [editingModel, setEditingModel] = useState(null); const [previewingModel, setPreviewingModel] = useState(null); const [isLoading, setIsLoading] = useState(true); useEffect(() => { const load = async () => { setIsLoading(true); try { setModels(await fetchLLMModels()); } catch (error) { console.error(error); setModels([]); } finally { setIsLoading(false); } }; load(); }, []); const filteredModels = models.filter((m) => { const q = searchTerm.toLowerCase(); const matchesSearch = m.name.toLowerCase().includes(q) || (m.modelName || '').toLowerCase().includes(q) || (m.baseUrl || '').toLowerCase().includes(q); const matchesVendor = vendorFilter === 'all' || m.vendor === vendorFilter; const matchesType = typeFilter === 'all' || m.type === typeFilter; return matchesSearch && matchesVendor && matchesType; }); const handleCreate = async (data: Partial) => { const created = await createLLMModel(data); setModels((prev) => [created, ...prev]); setIsAddModalOpen(false); }; const handleUpdate = async (id: string, data: Partial) => { const updated = await updateLLMModel(id, data); setModels((prev) => prev.map((item) => (item.id === id ? updated : item))); setEditingModel(null); }; const handleDelete = async (id: string) => { if (!confirm('确认删除该模型配置吗?')) return; await deleteLLMModel(id); setModels((prev) => prev.filter((item) => item.id !== id)); }; const vendorOptions = Array.from(new Set(models.map((m) => m.vendor).filter(Boolean))); return (

模型接入

setSearchTerm(e.target.value)} />
模型名称 厂商 类型 模型标识 Base URL API Key 操作 {!isLoading && filteredModels.map((model) => ( {model.name} {model.vendor} {model.type.toUpperCase()} {model.modelName || '-'} {model.baseUrl} {maskApiKey(model.apiKey)} ))} {!isLoading && filteredModels.length === 0 && ( 暂无模型数据 )} {isLoading && ( 加载中... )}
setIsAddModalOpen(false)} onSubmit={handleCreate} /> setEditingModel(null)} onSubmit={(data) => handleUpdate(editingModel!.id, data)} initialModel={editingModel || undefined} /> setPreviewingModel(null)} model={previewingModel} />
); }; const LLMModelModal: React.FC<{ isOpen: boolean; onClose: () => void; onSubmit: (model: Partial) => Promise; initialModel?: LLMModel; }> = ({ isOpen, onClose, onSubmit, initialModel }) => { const [name, setName] = useState(''); const [vendor, setVendor] = useState('OpenAI Compatible'); const [type, setType] = useState<'text' | 'embedding' | 'rerank'>('text'); const [modelName, setModelName] = useState(''); const [baseUrl, setBaseUrl] = useState(''); const [apiKey, setApiKey] = useState(''); const [temperature, setTemperature] = useState(0.7); const [contextLength, setContextLength] = useState(8192); const [enabled, setEnabled] = useState(true); const [saving, setSaving] = useState(false); useEffect(() => { if (!isOpen) return; if (initialModel) { setName(initialModel.name || ''); setVendor(initialModel.vendor || 'OpenAI Compatible'); setType(initialModel.type || 'text'); setModelName(initialModel.modelName || ''); setBaseUrl(initialModel.baseUrl || ''); setApiKey(initialModel.apiKey || ''); setTemperature(initialModel.temperature ?? 0.7); setContextLength(initialModel.contextLength ?? 8192); setEnabled(initialModel.enabled ?? true); return; } setName(''); setVendor('OpenAI Compatible'); setType('text'); setModelName(''); setBaseUrl(''); setApiKey(''); setTemperature(0.7); setContextLength(8192); setEnabled(true); }, [initialModel, isOpen]); const handleSubmit = async () => { if (!name.trim() || !baseUrl.trim() || !apiKey.trim()) { alert('请填写完整信息'); return; } try { setSaving(true); await onSubmit({ name: name.trim(), vendor: vendor.trim(), type, modelName: modelName.trim() || undefined, baseUrl: baseUrl.trim(), apiKey: apiKey.trim(), temperature: type === 'text' ? temperature : undefined, contextLength: contextLength > 0 ? contextLength : undefined, enabled, }); } catch (error: any) { alert(error?.message || '保存失败'); } finally { setSaving(false); } }; return ( } >
{(['text', 'embedding', 'rerank'] as const).map((t) => ( ))}
setName(e.target.value)} placeholder="例如: GPT4o-Prod" />
setModelName(e.target.value)} placeholder="例如: gpt-4o-mini" />
setBaseUrl(e.target.value)} placeholder="https://api.openai.com/v1" className="font-mono text-xs" />
setApiKey(e.target.value)} placeholder="sk-..." className="font-mono text-xs" />
{type === 'text' && (
{temperature.toFixed(1)}
setTemperature(parseFloat(e.target.value))} className="w-full h-1.5 bg-secondary rounded-lg appearance-none cursor-pointer accent-primary" />
)}
setContextLength(parseInt(e.target.value || '0', 10))} />
); }; const LLMPreviewModal: React.FC<{ isOpen: boolean; onClose: () => void; model: LLMModel | null; }> = ({ isOpen, onClose, model }) => { const [systemPrompt, setSystemPrompt] = useState('You are a concise helpful assistant.'); const [message, setMessage] = useState('Hello, please introduce yourself in one sentence.'); const [temperature, setTemperature] = useState(0.7); const [maxTokens, setMaxTokens] = useState(256); const [reply, setReply] = useState(''); const [latency, setLatency] = useState(null); const [usage, setUsage] = useState | null>(null); const [isRunning, setIsRunning] = useState(false); useEffect(() => { if (!isOpen) return; setReply(''); setLatency(null); setUsage(null); setTemperature(model?.temperature ?? 0.7); }, [isOpen, model]); const runPreview = async () => { if (!model?.id) return; if (!message.trim()) { alert('请输入测试消息'); return; } try { setIsRunning(true); const result = await previewLLMModel(model.id, { message, system_prompt: systemPrompt || undefined, max_tokens: maxTokens, temperature, }); setReply(result.reply || result.error || '无返回内容'); setLatency(result.latency_ms ?? null); setUsage(result.usage || null); } catch (error: any) { alert(error?.message || '预览失败'); } finally { setIsRunning(false); } }; return ( } >