add documents

This commit is contained in:
Xin Wang
2026-01-08 17:35:09 +08:00
parent 6a6d736991
commit f1bd12353a
21 changed files with 4268 additions and 0 deletions

93
docs/api/app_client.md Normal file
View File

@@ -0,0 +1,93 @@
# AppClient
The `AppClient` provides methods for application analytics and logs.
## Initialization
```python
from fastgpt_client import AppClient
client = AppClient(
api_key="fastgpt-xxxxx",
base_url="http://localhost:3000"
)
```
## Methods
### get_app_logs_chart
Get application analytics chart data.
```python
client.get_app_logs_chart(
appId: str,
dateStart: str,
dateEnd: str,
offset: int = 1,
source: list[str] | None = None,
userTimespan: str = "day",
chatTimespan: str = "day",
appTimespan: str = "day"
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `dateStart` | `str` | Yes | Start date (ISO 8601 format: `YYYY-MM-DD`) |
| `dateEnd` | `str` | Yes | End date (ISO 8601 format: `YYYY-MM-DD`) |
| `offset` | `int` | No | Offset value (default: `1`) |
| `source` | `list[str]` | No | List of sources (default: `["api"]`) |
| `userTimespan` | `str` | No | User data timespan: `day`, `week`, or `month` (default: `"day"`) |
| `chatTimespan` | `str` | No | Chat data timespan: `day`, `week`, or `month` (default: `"day"`) |
| `appTimespan` | `str` | No | App data timespan: `day`, `week`, or `month` (default: `"day"`) |
**Source Options:**
- `"api"` - API interactions
- `"online"` - Online usage
- `"share"` - Shared links
- `"test"` - Test interactions
**Example:**
```python
from datetime import datetime, timedelta
with AppClient(api_key="fastgpt-xxxxx") as client:
# Get analytics for the last 7 days
end_date = datetime.now()
start_date = end_date - timedelta(days=7)
response = client.get_app_logs_chart(
appId="your-app-id",
dateStart=start_date.strftime("%Y-%m-%d"),
dateEnd=end_date.strftime("%Y-%m-%d"),
source=["api", "online"],
userTimespan="day",
chatTimespan="day",
appTimespan="day"
)
data = response.json()
# Access analytics data
print(f"Users: {data['data'].get('users', {})}")
print(f"Chats: {data['data'].get('chats', {})}")
print(f"App metrics: {data['data'].get('app', {})}")
```
**Response Structure:**
The response contains analytics data with the following possible keys:
- `users` - User engagement metrics
- `chats` - Chat interaction metrics
- `app` - Application-level metrics
- `tokens` - Token usage statistics
- `prices` - Cost information
Each metric may be organized by the specified timespan (day/week/month).

201
docs/api/async_clients.md Normal file
View File

@@ -0,0 +1,201 @@
# Async Clients
The SDK provides full async/await support for high-performance applications. All synchronous clients have async equivalents.
## Async ChatClient
### Initialization
```python
from fastgpt_client import AsyncChatClient
client = AsyncChatClient(
api_key="fastgpt-xxxxx",
base_url="http://localhost:3000"
)
```
### Basic Usage
```python
import asyncio
from fastgpt_client import AsyncChatClient
async def main():
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
response = await client.create_chat_completion(
messages=[{"role": "user", "content": "Hello!"}],
stream=False
)
response.raise_for_status()
result = response.json()
print(result['choices'][0]['message']['content'])
asyncio.run(main())
```
### Streaming with Async
```python
import asyncio
import json
from fastgpt_client import AsyncChatClient
async def stream_chat():
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
response = await client.create_chat_completion(
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
)
async for line in response.aiter_lines():
if line.startswith("data:"):
data = line[5:].strip()
if data and data != "[DONE]":
chunk = json.loads(data)
if "choices" in chunk and chunk["choices"]:
delta = chunk["choices"][0].get("delta", {})
content = delta.get("content", "")
if content:
print(content, end="", flush=True)
asyncio.run(stream_chat())
```
### Multiple Concurrent Requests
One of the main benefits of async is handling multiple requests concurrently:
```python
import asyncio
from fastgpt_client import AsyncChatClient
async def fetch_multiple():
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
# Create multiple chat completions concurrently
tasks = [
client.create_chat_completion(
messages=[{"role": "user", "content": f"What is {concept}?"}],
stream=False
)
for concept in ["AI", "Machine Learning", "Deep Learning"]
]
responses = await asyncio.gather(*tasks)
for i, response in enumerate(responses):
response.raise_for_status()
result = response.json()
concept = ["AI", "Machine Learning", "Deep Learning"][i]
print(f"\n{concept}:")
print(result['choices'][0]['message']['content'])
asyncio.run(fetch_multiple())
```
## Async AppClient
### Basic Usage
```python
import asyncio
from fastgpt_client import AsyncAppClient
async def get_analytics():
async with AsyncAppClient(api_key="fastgpt-xxxxx") as client:
response = await client.get_app_logs_chart(
appId="your-app-id",
dateStart="2024-01-01",
dateEnd="2024-12-31",
source=["api"]
)
response.raise_for_status()
data = response.json()
print(data)
asyncio.run(get_analytics())
```
## Complete Example: Async Chat Application
```python
import asyncio
from fastgpt_client import AsyncChatClient
class AsyncChatApp:
def __init__(self, api_key: str, base_url: str):
self.client = AsyncChatClient(api_key=api_key, base_url=base_url)
self.chat_id = None
async def start(self):
await self.client.__aenter__()
async def stop(self):
await self.client.__aexit__(None, None, None)
async def send_message(self, content: str) -> str:
response = await self.client.create_chat_completion(
messages=[{"role": "user", "content": content}],
chatId=self.chat_id,
stream=False
)
response.raise_for_status()
result = response.json()
# Update chat_id after first message
if not self.chat_id:
self.chat_id = result.get('chatId')
return result['choices'][0]['message']['content']
async def chat(self):
await self.start()
try:
while True:
user_input = input("\nYou: ")
if user_input.lower() in ['quit', 'exit']:
break
print("AI: ", end="", flush=True)
response = await self.send_message(user_input)
print(response)
finally:
await self.stop()
async def main():
app = AsyncChatApp(
api_key="fastgpt-xxxxx",
base_url="http://localhost:3000"
)
await app.chat()
asyncio.run(main())
```
## Key Differences from Sync Clients
| Aspect | Sync | Async |
|--------|------|-------|
| Context Manager | `with` | `async with` |
| Method Call | `client.method()` | `await client.method()` |
| Streaming | `for line in response.iter_lines()` | `async for line in response.aiter_lines()` |
| Close | `client.close()` | `await client.close()` |
## Best Practices
1. **Always use `async with`** for automatic resource cleanup
2. **Use `asyncio.gather()`** for concurrent requests
3. **Handle exceptions properly** with try/except blocks
4. **Close clients** when done (or use context managers)
5. **Avoid mixing sync and async** code in the same application
## When to Use Async
Use async clients when you need to:
- Handle many concurrent requests
- Integrate with other async libraries (FastAPI, aiohttp, etc.)
- Build real-time applications with streaming
- Maximize throughput in I/O-bound applications
For simple scripts or applications with low concurrency, sync clients are often simpler and equally effective.

315
docs/api/chat_client.md Normal file
View File

@@ -0,0 +1,315 @@
# ChatClient
The `ChatClient` provides methods for chat completions and conversation management.
## Initialization
```python
from fastgpt_client import ChatClient
client = ChatClient(
api_key="fastgpt-xxxxx",
base_url="http://localhost:3000"
)
```
## Methods
### create_chat_completion
Create a chat completion (synchronous or streaming).
```python
client.create_chat_completion(
messages: list[dict],
stream: bool = False,
chatId: str | None = None,
detail: bool = False,
variables: dict[str, Any] | None = None,
responseChatItemId: str | None = None
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `messages` | `list[dict]` | Yes | Array of message objects with `role` and `content` |
| `stream` | `bool` | No | Whether to stream the response (default: `False`) |
| `chatId` | `str` | No | Chat ID for conversation context |
| `detail` | `bool` | No | Whether to return detailed execution data (default: `False`) |
| `variables` | `dict` | No | Template variables for substitution |
| `responseChatItemId` | `str` | No | Custom ID for the response message |
**Example:**
```python
response = client.create_chat_completion(
messages=[
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi there!"},
{"role": "user", "content": "How are you?"}
],
stream=False
)
result = response.json()
print(result['choices'][0]['message']['content'])
```
---
### get_chat_histories
Get chat histories for an application.
```python
client.get_chat_histories(
appId: str,
offset: int = 0,
pageSize: int = 20,
source: Literal["api", "online", "share", "test"] = "api"
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `offset` | `int` | No | Offset for pagination (default: `0`) |
| `pageSize` | `int` | No | Number of records per page (default: `20`) |
| `source` | `str` | No | Source filter (default: `"api"`) |
**Example:**
```python
response = client.get_chat_histories(
appId="app-123",
offset=0,
pageSize=20,
source="api"
)
data = response.json()
for chat in data['data']['list']:
print(f"{chat['title']}: {chat['chatId']}")
```
---
### get_chat_init
Get chat initialization information.
```python
client.get_chat_init(
appId: str,
chatId: str
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `chatId` | `str` | Yes | Chat ID |
---
### get_chat_records
Get chat records for a specific chat.
```python
client.get_chat_records(
appId: str,
chatId: str,
offset: int = 0,
pageSize: int = 10,
loadCustomFeedbacks: bool = False
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `chatId` | `str` | Yes | Chat ID |
| `offset` | `int` | No | Offset for pagination (default: `0`) |
| `pageSize` | `int` | No | Number of records per page (default: `10`) |
| `loadCustomFeedbacks` | `bool` | No | Whether to load custom feedbacks (default: `False`) |
---
### get_record_detail
Get detailed execution data for a specific record.
```python
client.get_record_detail(
appId: str,
chatId: str,
dataId: str
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `chatId` | `str` | Yes | Chat ID |
| `dataId` | `str` | Yes | Record ID |
---
### update_chat_history
Update chat history (title or pin status).
```python
client.update_chat_history(
appId: str,
chatId: str,
customTitle: str | None = None,
top: bool | None = None
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `chatId` | `str` | Yes | Chat ID |
| `customTitle` | `str` | No | Custom title for the chat |
| `top` | `bool` | No | Whether to pin the chat |
---
### delete_chat_history
Delete a chat history.
```python
client.delete_chat_history(
appId: str,
chatId: str
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `chatId` | `str` | Yes | Chat ID |
---
### clear_chat_histories
Clear all chat histories for an application.
```python
client.clear_chat_histories(
appId: str
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
---
### delete_chat_record
Delete a single chat record.
```python
client.delete_chat_record(
appId: str,
chatId: str,
contentId: str
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `chatId` | `str` | Yes | Chat ID |
| `contentId` | `str` | Yes | Content ID of the record |
---
### send_feedback
Send feedback for a chat message (like/dislike).
```python
client.send_feedback(
appId: str,
chatId: str,
dataId: str,
userGoodFeedback: str | None = None,
userBadFeedback: str | None = None
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `chatId` | `str` | Yes | Chat ID |
| `dataId` | `str` | Yes | Message ID |
| `userGoodFeedback` | `str` | No | Positive feedback text |
| `userBadFeedback` | `str` | No | Negative feedback text |
**Example:**
```python
# Like a message
client.send_feedback(
appId="app-123",
chatId="chat-123",
dataId="msg-123",
userGoodFeedback="Great answer!"
)
# Dislike a message
client.send_feedback(
appId="app-123",
chatId="chat-123",
dataId="msg-123",
userBadFeedback="Not helpful"
)
```
---
### get_suggested_questions
Get suggested questions based on chat context.
```python
client.get_suggested_questions(
appId: str,
chatId: str,
questionGuide: dict[str, Any] | None = None
) -> httpx.Response
```
**Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `appId` | `str` | Yes | Application ID |
| `chatId` | `str` | Yes | Chat ID |
| `questionGuide` | `dict` | No | Custom configuration for question guide |

248
docs/api/exceptions.md Normal file
View File

@@ -0,0 +1,248 @@
# Exceptions
The SDK provides specific exceptions for different error types. All exceptions inherit from the base `FastGPTError`.
## Exception Hierarchy
```
FastGPTError
├── APIError
├── AuthenticationError
├── RateLimitError
├── ValidationError
└── StreamParseError
```
## Exception Types
### FastGPTError
Base exception class for all FastGPT SDK errors.
```python
from fastgpt_client.exceptions import FastGPTError
try:
# SDK operation
pass
except FastGPTError as e:
print(f"FastGPT error: {e}")
```
---
### APIError
General API error for 4xx and 5xx responses that don't have a specific exception type.
```python
from fastgpt_client.exceptions import APIError
try:
response = client.create_chat_completion(...)
except APIError as e:
print(f"API error: {e.message}")
print(f"Status code: {e.status_code}")
print(f"Response data: {e.response_data}")
```
**Attributes:**
| Attribute | Type | Description |
|-----------|------|-------------|
| `message` | `str` | Error message |
| `status_code` | `int` | HTTP status code |
| `response_data` | `dict | None` | Full error response data |
---
### AuthenticationError
Raised when authentication fails (401 status code).
```python
from fastgpt_client.exceptions import AuthenticationError
try:
response = client.create_chat_completion(...)
except AuthenticationError as e:
print(f"Authentication failed: {e.message}")
print("Please check your API key")
```
**Common Causes:**
- Invalid API key
- Missing API key
- Expired API key
- API key format incorrect
---
### RateLimitError
Raised when rate limit is exceeded (429 status code).
```python
from fastgpt_client.exceptions import RateLimitError
try:
response = client.create_chat_completion(...)
except RateLimitError as e:
print(f"Rate limit exceeded: {e.message}")
if e.retry_after:
print(f"Retry after {e.retry_after} seconds")
```
**Attributes:**
| Attribute | Type | Description |
|-----------|------|-------------|
| `message` | `str` | Error message |
| `retry_after` | `str | None` | Suggested retry delay (from `Retry-After` header) |
| `status_code` | `int` | HTTP status code (429) |
| `response_data` | `dict | None` | Full error response data |
---
### ValidationError
Raised when request validation fails (422 status code).
```python
from fastgpt_client.exceptions import ValidationError
try:
response = client.create_chat_completion(
messages=[{"role": "user", "content": "Hello"}],
invalid_param="value"
)
except ValidationError as e:
print(f"Validation error: {e.message}")
print(f"Response data: {e.response_data}")
```
**Common Causes:**
- Invalid parameter values
- Missing required parameters
- Incorrect parameter types
- Invalid message format
---
### StreamParseError
Raised when parsing streaming responses fails.
```python
from fastgpt_client.exceptions import StreamParseError
try:
for line in response.iter_lines():
# Parse streaming data
pass
except StreamParseError as e:
print(f"Stream parsing error: {e.message}")
```
---
## Comprehensive Error Handling
```python
from fastgpt_client import ChatClient
from fastgpt_client.exceptions import (
APIError,
AuthenticationError,
RateLimitError,
ValidationError,
FastGPTError
)
def safe_chat_completion(client, messages):
"""Handle chat completion with comprehensive error handling."""
try:
response = client.create_chat_completion(
messages=messages,
stream=False
)
response.raise_for_status()
return response.json()
except AuthenticationError:
print("Error: Invalid API key. Please check your credentials.")
return None
except RateLimitError as e:
print(f"Error: Rate limit exceeded. Please wait.")
if e.retry_after:
print(f"Retry after {e.retry_after} seconds")
return None
except ValidationError as e:
print(f"Error: Invalid request parameters: {e.message}")
return None
except APIError as e:
print(f"Error: API request failed: {e.message}")
return None
except FastGPTError as e:
print(f"Error: Unexpected FastGPT error: {e}")
return None
except Exception as e:
print(f"Error: Unexpected error: {e}")
return None
# Usage
with ChatClient(api_key="fastgpt-xxxxx") as client:
result = safe_chat_completion(
client,
[{"role": "user", "content": "Hello!"}]
)
```
## Error Handling Best Practices
1. **Always handle exceptions** when making API calls
2. **Use specific exceptions** for better error handling
3. **Log errors** for debugging purposes
4. **Provide user-friendly messages** based on error type
5. **Implement retry logic** for rate limit errors
6. **Validate parameters** before making API calls
## Retry Logic for Rate Limiting
```python
import time
from fastgpt_client import ChatClient
from fastgpt_client.exceptions import RateLimitError
def chat_with_retry(client, messages, max_retries=3):
"""Retry chat completion on rate limit errors."""
for attempt in range(max_retries):
try:
response = client.create_chat_completion(
messages=messages,
stream=False
)
response.raise_for_status()
return response.json()
except RateLimitError as e:
if attempt < max_retries - 1:
# Use Retry-After header or default delay
delay = int(e.retry_after) if e.retry_after else 5
print(f"Rate limited. Waiting {delay} seconds...")
time.sleep(delay)
else:
print(f"Max retries exceeded. Giving up.")
raise
```
## Next Steps
- [Error Handling Guide](../advanced/error_handling.md) - More error handling strategies
- [Rate Limiting](../advanced/rate_limiting.md) - Handling rate limits effectively

104
docs/api/overview.md Normal file
View File

@@ -0,0 +1,104 @@
# API Reference Overview
The FastGPT Python SDK provides three main client types:
## Clients
| Client | Sync | Async | Description |
|--------|------|-------|-------------|
| [`ChatClient`](chat_client.md) | ✅ | ✅ | Chat completions and conversation management |
| [`AppClient`](app_client.md) | ✅ | ✅ | App analytics and logs |
| `FastGPTClient` | ✅ | ✅ | Base client (usually used indirectly) |
## Base Client Options
All clients share these initialization parameters:
```python
from fastgpt_client import ChatClient
client = ChatClient(
api_key="fastgpt-xxxxx", # Required: Your API key
base_url="http://localhost:3000", # Optional: API base URL
timeout=60.0, # Optional: Request timeout (seconds)
max_retries=3, # Optional: Max retry attempts
retry_delay=1.0, # Optional: Delay between retries (seconds)
enable_logging=False # Optional: Enable request logging
)
```
### Parameters
- **api_key** (`str`): Your FastGPT API key
- **base_url** (`str`): Base URL for the FastGPT API (default: `"http://localhost:3000"`)
- **timeout** (`float`): Request timeout in seconds (default: `60.0`)
- **max_retries** (`int`): Maximum number of retry attempts (default: `3`)
- **retry_delay** (`float`): Delay between retries in seconds (default: `1.0`)
- **enable_logging** (`bool`): Whether to enable request logging (default: `False`)
## Context Manager Support
All clients support context managers for automatic resource cleanup:
```python
# Synchronous
with ChatClient(api_key="fastgpt-xxxxx") as client:
response = client.create_chat_completion(...)
# Client is automatically closed
# Asynchronous
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
response = await client.create_chat_completion(...)
# Client is automatically closed
```
## Response Format
All API methods return `httpx.Response` objects. You can:
```python
# Raise exception for 4xx/5xx responses
response.raise_for_status()
# Get JSON data
data = response.json()
# Get status code
status = response.status_code
# Get headers
headers = response.headers
```
## Async Variants
All synchronous clients have async equivalents:
```python
# Sync
from fastgpt_client import ChatClient
with ChatClient(api_key="...") as client:
response = client.create_chat_completion(...)
# Async
from fastgpt_client import AsyncChatClient
async with AsyncChatClient(api_key="...") as client:
response = await client.create_chat_completion(...)
```
See [Async Clients](async_clients.md) for more details.
## Exceptions
The SDK provides specific exceptions for different error types:
| Exception | Status Code | Description |
|-----------|-------------|-------------|
| `AuthenticationError` | 401 | Invalid or missing API key |
| `RateLimitError` | 429 | Too many requests |
| `ValidationError` | 422 | Invalid request parameters |
| `APIError` | 4xx/5xx | General API errors |
See [Exceptions](exceptions.md) for more details.