add documents
This commit is contained in:
340
docs/examples/async_usage.md
Normal file
340
docs/examples/async_usage.md
Normal file
@@ -0,0 +1,340 @@
|
||||
# Async Usage Example
|
||||
|
||||
Complete examples demonstrating asynchronous usage of the FastGPT Python SDK.
|
||||
|
||||
## Why Use Async?
|
||||
|
||||
Async is beneficial when you need to:
|
||||
|
||||
- Handle multiple concurrent requests efficiently
|
||||
- Integrate with async frameworks (FastAPI, aiohttp)
|
||||
- Build real-time streaming applications
|
||||
- Maximize throughput in I/O-bound applications
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install fastgpt-client
|
||||
```
|
||||
|
||||
## Basic Async Chat
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from fastgpt_client import AsyncChatClient
|
||||
|
||||
async def simple_chat():
|
||||
"""Simple async chat completion."""
|
||||
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello! What's AI?"}],
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print(result['choices'][0]['message']['content'])
|
||||
|
||||
asyncio.run(simple_chat())
|
||||
```
|
||||
|
||||
## Async Streaming
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
import json
|
||||
from fastgpt_client import AsyncChatClient
|
||||
|
||||
async def stream_chat():
|
||||
"""Async streaming chat completion."""
|
||||
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Tell me a short story"}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
async for line in response.aiter_lines():
|
||||
if line.startswith("data:"):
|
||||
data = line[5:].strip()
|
||||
if data and data != "[DONE]":
|
||||
chunk = json.loads(data)
|
||||
if "choices" in chunk and chunk["choices"]:
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
print(content, end="", flush=True)
|
||||
print()
|
||||
|
||||
asyncio.run(stream_chat())
|
||||
```
|
||||
|
||||
## Multiple Concurrent Requests
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from fastgpt_client import AsyncChatClient
|
||||
|
||||
async def fetch_multiple():
|
||||
"""Run multiple requests concurrently."""
|
||||
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
|
||||
# Create multiple tasks
|
||||
tasks = [
|
||||
client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": f"What is {concept}?"}],
|
||||
stream=False
|
||||
)
|
||||
for concept in ["AI", "Machine Learning", "Deep Learning"]
|
||||
]
|
||||
|
||||
# Execute all tasks concurrently
|
||||
responses = await asyncio.gather(*tasks)
|
||||
|
||||
# Process results
|
||||
for i, response in enumerate(responses):
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
concept = ["AI", "Machine Learning", "Deep Learning"][i]
|
||||
print(f"\n{concept}:")
|
||||
print(f" {result['choices'][0]['message']['content'][:100]}...")
|
||||
|
||||
asyncio.run(fetch_multiple())
|
||||
```
|
||||
|
||||
## Complete Async Example
|
||||
|
||||
```python
|
||||
"""Complete async usage example for FastGPT Python SDK."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from fastgpt_client import AsyncChatClient, AsyncAppClient
|
||||
|
||||
load_dotenv()
|
||||
|
||||
API_KEY = os.getenv("API_KEY")
|
||||
BASE_URL = os.getenv("BASE_URL")
|
||||
|
||||
|
||||
async def simple_chat():
|
||||
"""Simple async chat completion example."""
|
||||
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello! What's AI?"}],
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print("Response:", result['choices'][0]['message']['content'])
|
||||
|
||||
|
||||
async def streaming_chat():
|
||||
"""Async streaming chat completion example."""
|
||||
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Tell me a short story"}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
print("Streaming response: ", end="", flush=True)
|
||||
async for line in response.aiter_lines():
|
||||
if line.startswith("data:"):
|
||||
data = line[5:].strip()
|
||||
if data and data != "[DONE]":
|
||||
chunk = json.loads(data)
|
||||
if "choices" in chunk and chunk["choices"]:
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
print(content, end="", flush=True)
|
||||
print()
|
||||
|
||||
|
||||
async def chat_with_context():
|
||||
"""Async chat with context using chatId."""
|
||||
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
chat_id = "my_async_chat_123"
|
||||
|
||||
# First message
|
||||
print("User: What's AI?")
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "What's AI?"}],
|
||||
chatId=chat_id,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print(f"AI: {result['choices'][0]['message']['content']}\n")
|
||||
|
||||
# Second message (continues the conversation)
|
||||
print("User: Tell me more about it")
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Tell me more about it"}],
|
||||
chatId=chat_id,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print(f"AI: {result['choices'][0]['message']['content']}")
|
||||
|
||||
|
||||
async def get_histories():
|
||||
"""Async get chat histories."""
|
||||
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
app_id = os.getenv("APP_ID", "default-app-id")
|
||||
|
||||
try:
|
||||
histories = await client.get_chat_histories(
|
||||
appId=app_id,
|
||||
offset=0,
|
||||
pageSize=20,
|
||||
source="api"
|
||||
)
|
||||
histories.raise_for_status()
|
||||
data = histories.json()
|
||||
print(f"Total chats: {data['data']['total']}")
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
|
||||
async def get_app_analytics():
|
||||
"""Async get app analytics."""
|
||||
async with AsyncAppClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
app_id = os.getenv("APP_ID", "default-app-id")
|
||||
|
||||
try:
|
||||
# Get analytics for the last 7 days
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=7)
|
||||
|
||||
logs = await client.get_app_logs_chart(
|
||||
appId=app_id,
|
||||
dateStart=start_date.strftime("%Y-%m-%d"),
|
||||
dateEnd=end_date.strftime("%Y-%m-%d"),
|
||||
source=["api"],
|
||||
userTimespan="day",
|
||||
chatTimespan="day",
|
||||
appTimespan="day"
|
||||
)
|
||||
logs.raise_for_status()
|
||||
data = logs.json()
|
||||
print("App Analytics (last 7 days):")
|
||||
print(f" Data keys: {list(data['data'].keys())}")
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
|
||||
async def multiple_requests():
|
||||
"""Run multiple async requests concurrently."""
|
||||
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
# Create multiple chat completions concurrently
|
||||
tasks = [
|
||||
client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": f"What is {concept}?"}],
|
||||
stream=False
|
||||
)
|
||||
for concept in ["AI", "Machine Learning", "Deep Learning"]
|
||||
]
|
||||
|
||||
responses = await asyncio.gather(*tasks)
|
||||
|
||||
for i, response in enumerate(responses):
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
concept = ["AI", "Machine Learning", "Deep Learning"][i]
|
||||
print(f"\n{concept}:")
|
||||
print(f" {result['choices'][0]['message']['content'][:100]}...")
|
||||
|
||||
|
||||
async def chat_with_variables():
|
||||
"""Async chat with template variables."""
|
||||
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Introduction"}],
|
||||
variables={
|
||||
"user_name": "Alice",
|
||||
"company": "Tech Corp",
|
||||
"language": "English"
|
||||
},
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print("Response with variables:", result['choices'][0]['message']['content'])
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all examples."""
|
||||
print("=== Simple Chat ===")
|
||||
try:
|
||||
await simple_chat()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Streaming Chat ===")
|
||||
try:
|
||||
await streaming_chat()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Chat with Context ===")
|
||||
try:
|
||||
await chat_with_context()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Multiple Requests (Concurrent) ===")
|
||||
try:
|
||||
await multiple_requests()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Chat with Variables ===")
|
||||
try:
|
||||
await chat_with_variables()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## Async with FastAPI
|
||||
|
||||
```python
|
||||
from fastapi import FastAPI
|
||||
from fastgpt_client import AsyncChatClient
|
||||
import os
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
# Initialize client at startup
|
||||
@app.on_event("startup")
|
||||
async def startup():
|
||||
app.state.client = AsyncChatClient(
|
||||
api_key=os.getenv("API_KEY"),
|
||||
base_url=os.getenv("BASE_URL")
|
||||
)
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown():
|
||||
await app.state.client.close()
|
||||
|
||||
@app.post("/chat")
|
||||
async def chat(message: str):
|
||||
response = await app.state.client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": message}],
|
||||
stream=False
|
||||
)
|
||||
result = response.json()
|
||||
return {"response": result['choices'][0]['message']['content']}
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [Async Clients](../api/async_clients.md) - Complete async API reference
|
||||
- [Streaming](streaming.md) - Streaming examples
|
||||
- [Basic Usage](basic_usage.md) - Synchronous examples
|
||||
204
docs/examples/basic_usage.md
Normal file
204
docs/examples/basic_usage.md
Normal file
@@ -0,0 +1,204 @@
|
||||
# Basic Usage Example
|
||||
|
||||
A complete example demonstrating basic usage of the FastGPT Python SDK.
|
||||
|
||||
## Setup
|
||||
|
||||
First, install the SDK and create a `.env` file:
|
||||
|
||||
```bash
|
||||
pip install fastgpt-client python-dotenv
|
||||
```
|
||||
|
||||
```bash
|
||||
# .env
|
||||
API_KEY=fastgpt-xxxxx
|
||||
BASE_URL=http://localhost:3000
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
|
||||
```python
|
||||
"""Basic usage example for FastGPT Python SDK."""
|
||||
|
||||
from fastgpt_client import ChatClient
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Configure your API key and base URL
|
||||
API_KEY = os.getenv("API_KEY")
|
||||
BASE_URL = os.getenv("BASE_URL")
|
||||
|
||||
|
||||
def simple_chat():
|
||||
"""Simple chat completion example."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello! What's AI?"}],
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
print("Response:", result['choices'][0]['message']['content'])
|
||||
|
||||
|
||||
def chat_with_history():
|
||||
"""Chat with message history example."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Hello!"},
|
||||
{"role": "assistant", "content": "Hi there! How can I help you?"},
|
||||
{"role": "user", "content": "What's the capital of France?"}
|
||||
],
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
print("Response:", result['choices'][0]['message']['content'])
|
||||
|
||||
|
||||
def multi_turn_conversation():
|
||||
"""Multi-turn conversation example."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
conversation = [
|
||||
{"role": "user", "content": "What's Python?"}
|
||||
]
|
||||
|
||||
# First turn
|
||||
response = client.create_chat_completion(
|
||||
messages=conversation,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
assistant_message = result['choices'][0]['message']['content']
|
||||
print(f"AI: {assistant_message}")
|
||||
|
||||
# Add assistant response to conversation
|
||||
conversation.append({"role": "assistant", "content": assistant_message})
|
||||
|
||||
# Second turn
|
||||
conversation.append({"role": "user", "content": "Give me an example"})
|
||||
response = client.create_chat_completion(
|
||||
messages=conversation,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
assistant_message = result['choices'][0]['message']['content']
|
||||
print(f"AI: {assistant_message}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("=== Simple Chat ===")
|
||||
try:
|
||||
simple_chat()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Chat with History ===")
|
||||
try:
|
||||
chat_with_history()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Multi-turn Conversation ===")
|
||||
try:
|
||||
multi_turn_conversation()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
```
|
||||
|
||||
## Running the Example
|
||||
|
||||
```bash
|
||||
python basic_usage.py
|
||||
```
|
||||
|
||||
## Expected Output
|
||||
|
||||
```
|
||||
=== Simple Chat ===
|
||||
Response: AI (Artificial Intelligence) is a branch of computer science...
|
||||
|
||||
=== Chat with History ===
|
||||
Response: The capital of France is Paris.
|
||||
|
||||
=== Multi-turn Conversation ===
|
||||
AI: Python is a high-level programming language...
|
||||
AI: Here's a simple example: print("Hello, World!")
|
||||
```
|
||||
|
||||
## Key Concepts
|
||||
|
||||
### Messages Array
|
||||
|
||||
The `messages` parameter is an array of message objects:
|
||||
|
||||
```python
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Hello!"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
{"role": "user", "content": "How are you?"}
|
||||
]
|
||||
```
|
||||
|
||||
### Response Structure
|
||||
|
||||
The response follows OpenAI's format:
|
||||
|
||||
```python
|
||||
{
|
||||
"id": "chatcmpl-xxx",
|
||||
"object": "chat.completion",
|
||||
"created": 1234567890,
|
||||
"model": "gpt-3.5-turbo",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "Response text here..."
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 20,
|
||||
"completion_tokens": 15,
|
||||
"total_tokens": 35
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
Always handle potential errors:
|
||||
|
||||
```python
|
||||
from fastgpt_client import ChatClient
|
||||
from fastgpt_client.exceptions import APIError, AuthenticationError
|
||||
|
||||
try:
|
||||
with ChatClient(api_key=API_KEY) as client:
|
||||
response = client.create_chat_completion(...)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
except AuthenticationError:
|
||||
print("Invalid API key")
|
||||
except APIError as e:
|
||||
print(f"API error: {e}")
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [Streaming Example](streaming.md) - Learn how to use streaming responses
|
||||
- [Async Usage](async_usage.md) - Asynchronous examples
|
||||
- [Chat Context](chat_context.md) - Managing conversation context
|
||||
251
docs/examples/chat_context.md
Normal file
251
docs/examples/chat_context.md
Normal file
@@ -0,0 +1,251 @@
|
||||
# Chat Context Example
|
||||
|
||||
Learn how to maintain conversation context across multiple requests using `chatId`.
|
||||
|
||||
## Understanding chatId
|
||||
|
||||
The `chatId` parameter allows you to:
|
||||
|
||||
- **Maintain conversation history** - The AI remembers previous messages
|
||||
- **Resume conversations** - Continue an existing chat session
|
||||
- **Organize conversations** - Keep separate conversations for different users/topics
|
||||
|
||||
## Basic Context Usage
|
||||
|
||||
```python
|
||||
from fastgpt_client import ChatClient
|
||||
|
||||
with ChatClient(api_key="fastgpt-xxxxx") as client:
|
||||
chat_id = "conversation_123"
|
||||
|
||||
# First message
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "My name is Alice"}],
|
||||
chatId=chat_id,
|
||||
stream=False
|
||||
)
|
||||
result = response.json()
|
||||
print(f"AI: {result['choices'][0]['message']['content']}")
|
||||
|
||||
# Second message - AI remembers the name!
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "What's my name?"}],
|
||||
chatId=chat_id, # Same chatId maintains context
|
||||
stream=False
|
||||
)
|
||||
result = response.json()
|
||||
print(f"AI: {result['choices'][0]['message']['content']}")
|
||||
# Output: "Your name is Alice!"
|
||||
```
|
||||
|
||||
## Multi-Turn Conversation Example
|
||||
|
||||
```python
|
||||
"""Multi-turn conversation with chatId."""
|
||||
|
||||
from fastgpt_client import ChatClient
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
load_dotenv()
|
||||
|
||||
API_KEY = os.getenv("API_KEY")
|
||||
BASE_URL = os.getenv("BASE_URL")
|
||||
|
||||
|
||||
def multi_turn_conversation():
|
||||
"""Maintain context across multiple turns."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
chat_id = "my_conversation"
|
||||
|
||||
conversation = [
|
||||
"Hi, I'm learning Python.",
|
||||
"What are the main data types in Python?",
|
||||
"Can you give me an example of a list?",
|
||||
"How do I add items to a list?",
|
||||
]
|
||||
|
||||
for user_message in conversation:
|
||||
print(f"\nUser: {user_message}")
|
||||
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": user_message}],
|
||||
chatId=chat_id,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
ai_message = result['choices'][0]['message']['content']
|
||||
print(f"AI: {ai_message}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
multi_turn_conversation()
|
||||
```
|
||||
|
||||
## Managing Multiple Conversations
|
||||
|
||||
```python
|
||||
def manage_multiple_conversations():
|
||||
"""Handle separate conversations for different users."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
# Conversation with user A
|
||||
chat_id_a = "user_123_conversation"
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "I like cats"}],
|
||||
chatId=chat_id_a,
|
||||
stream=False
|
||||
)
|
||||
|
||||
# Conversation with user B
|
||||
chat_id_b = "user_456_conversation"
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "I like dogs"}],
|
||||
chatId=chat_id_b,
|
||||
stream=False
|
||||
)
|
||||
|
||||
# Continue with user A
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "What did I say I like?"}],
|
||||
chatId=chat_id_a,
|
||||
stream=False
|
||||
)
|
||||
# Response: "You said you like cats"
|
||||
```
|
||||
|
||||
## Retrieving Chat History
|
||||
|
||||
```python
|
||||
def get_conversation_history():
|
||||
"""Retrieve and display chat history."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
app_id = os.getenv("APP_ID")
|
||||
|
||||
# Get all chat histories
|
||||
response = client.get_chat_histories(
|
||||
appId=app_id,
|
||||
offset=0,
|
||||
pageSize=20,
|
||||
source="api"
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
print("Chat Histories:")
|
||||
for chat in data['data']['list']:
|
||||
print(f" - {chat['title']}: {chat['chatId']}")
|
||||
|
||||
# Get records for a specific chat
|
||||
chat_id = data['data']['list'][0]['chatId']
|
||||
records_response = client.get_chat_records(
|
||||
appId=app_id,
|
||||
chatId=chat_id,
|
||||
offset=0,
|
||||
pageSize=10
|
||||
)
|
||||
records_response.raise_for_status()
|
||||
records_data = records_response.json()
|
||||
|
||||
print(f"\nRecords for {chat_id}:")
|
||||
for record in records_data['data']['list']:
|
||||
content = record.get('content', {})
|
||||
text = content.get('text', 'N/A')
|
||||
print(f" - {text[:50]}...")
|
||||
```
|
||||
|
||||
## Updating Chat Title
|
||||
|
||||
```python
|
||||
def update_chat_title():
|
||||
"""Update the title of a chat."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
app_id = os.getenv("APP_ID")
|
||||
chat_id = "my_conversation"
|
||||
|
||||
# Update the chat title
|
||||
response = client.update_chat_history(
|
||||
appId=app_id,
|
||||
chatId=chat_id,
|
||||
customTitle="Python Learning Session"
|
||||
)
|
||||
response.raise_for_status()
|
||||
print("Chat title updated!")
|
||||
```
|
||||
|
||||
## Pinning a Chat
|
||||
|
||||
```python
|
||||
def pin_chat():
|
||||
"""Pin a chat to the top."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
app_id = os.getenv("APP_ID")
|
||||
chat_id = "my_conversation"
|
||||
|
||||
# Pin the chat
|
||||
response = client.update_chat_history(
|
||||
appId=app_id,
|
||||
chatId=chat_id,
|
||||
top=True
|
||||
)
|
||||
response.raise_for_status()
|
||||
print("Chat pinned!")
|
||||
```
|
||||
|
||||
## Deleting a Chat
|
||||
|
||||
```python
|
||||
def delete_chat():
|
||||
"""Delete a chat conversation."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
app_id = os.getenv("APP_ID")
|
||||
chat_id = "old_conversation"
|
||||
|
||||
# Delete the chat
|
||||
response = client.delete_chat_history(
|
||||
appId=app_id,
|
||||
chatId=chat_id
|
||||
)
|
||||
response.raise_for_status()
|
||||
print("Chat deleted!")
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use meaningful chat IDs** - Include user IDs or session IDs
|
||||
2. **Store chat IDs** - Keep them in your database for later retrieval
|
||||
3. **Handle missing chats** - Chat IDs may expire or be deleted
|
||||
4. **Clean up old chats** - Delete or archive conversations you no longer need
|
||||
|
||||
```python
|
||||
def robust_conversation():
|
||||
"""Handle chatId gracefully."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
chat_id = "user_123_session_456"
|
||||
|
||||
try:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
chatId=chat_id,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
# Store the chatId for future use
|
||||
returned_chat_id = result.get('chatId', chat_id)
|
||||
print(f"Chat ID: {returned_chat_id}")
|
||||
|
||||
except Exception as e:
|
||||
# Start a new conversation if this one fails
|
||||
print(f"Error with chatId: {e}")
|
||||
print("Starting new conversation...")
|
||||
# Create new chat without chatId
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [ChatClient API](../api/chat_client.md) - Complete method reference
|
||||
- [Variables](variables.md) - Using template variables
|
||||
- [Streaming](streaming.md) - Streaming with context
|
||||
233
docs/examples/streaming.md
Normal file
233
docs/examples/streaming.md
Normal file
@@ -0,0 +1,233 @@
|
||||
# Streaming Example
|
||||
|
||||
Learn how to use streaming responses with the FastGPT Python SDK.
|
||||
|
||||
## Why Use Streaming?
|
||||
|
||||
Streaming allows you to:
|
||||
|
||||
- **Display real-time responses** - Show text as it's generated
|
||||
- **Reduce perceived latency** - Users see content immediately
|
||||
- **Better user experience** - More interactive and engaging
|
||||
|
||||
## Basic Streaming
|
||||
|
||||
```python
|
||||
import json
|
||||
from fastgpt_client import ChatClient
|
||||
|
||||
with ChatClient(api_key="fastgpt-xxxxx") as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Tell me a short story"}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
print("Story: ", end="", flush=True)
|
||||
for line in response.iter_lines():
|
||||
if line.startswith("data:"):
|
||||
data = line[5:].strip()
|
||||
if data and data != "[DONE]":
|
||||
chunk = json.loads(data)
|
||||
if "choices" in chunk and chunk["choices"]:
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
print(content, end="", flush=True)
|
||||
print()
|
||||
```
|
||||
|
||||
## Complete Streaming Example
|
||||
|
||||
```python
|
||||
"""Streaming chat completion example."""
|
||||
|
||||
import json
|
||||
from fastgpt_client import ChatClient
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
load_dotenv()
|
||||
|
||||
API_KEY = os.getenv("API_KEY")
|
||||
BASE_URL = os.getenv("BASE_URL")
|
||||
|
||||
|
||||
def stream_chat():
|
||||
"""Simple streaming chat completion."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Tell me a short story about AI"}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
print("\n=== Streaming Response ===\n")
|
||||
for line in response.iter_lines():
|
||||
if line.startswith("data:"):
|
||||
data = line[5:].strip()
|
||||
if data and data != "[DONE]":
|
||||
chunk = json.loads(data)
|
||||
if "choices" in chunk and chunk["choices"]:
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
print(content, end="", flush=True)
|
||||
print("\n")
|
||||
|
||||
|
||||
def stream_with_progress():
|
||||
"""Stream with progress indicator."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Explain quantum computing"}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
print("\n=== Streaming with Progress ===\n")
|
||||
token_count = 0
|
||||
for line in response.iter_lines():
|
||||
if line.startswith("data:"):
|
||||
data = line[5:].strip()
|
||||
if data and data != "[DONE]":
|
||||
chunk = json.loads(data)
|
||||
if "choices" in chunk and chunk["choices"]:
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
print(content, end="", flush=True)
|
||||
token_count += 1
|
||||
print(f"\n\nTotal tokens: {token_count}")
|
||||
|
||||
|
||||
def stream_with_buffer():
|
||||
"""Stream with word buffering (print complete words only)."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "What is machine learning?"}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
print("\n=== Buffered Streaming ===\n")
|
||||
buffer = ""
|
||||
for line in response.iter_lines():
|
||||
if line.startswith("data:"):
|
||||
data = line[5:].strip()
|
||||
if data and data != "[DONE]":
|
||||
chunk = json.loads(data)
|
||||
if "choices" in chunk and chunk["choices"]:
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
buffer += content
|
||||
# Print complete words
|
||||
if " " in buffer:
|
||||
parts = buffer.split(" ", 1)
|
||||
print(parts[0] + " ", end="", flush=True)
|
||||
buffer = parts[1] if len(parts) > 1 else ""
|
||||
# Print remaining content
|
||||
if buffer:
|
||||
print(buffer)
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
stream_chat()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
try:
|
||||
stream_with_progress()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
try:
|
||||
stream_with_buffer()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
```
|
||||
|
||||
## Async Streaming
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
import json
|
||||
from fastgpt_client import AsyncChatClient
|
||||
|
||||
async def stream_async():
|
||||
"""Async streaming example."""
|
||||
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Tell me about async/await"}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
async for line in response.aiter_lines():
|
||||
if line.startswith("data:"):
|
||||
data = line[5:].strip()
|
||||
if data and data != "[DONE]":
|
||||
chunk = json.loads(data)
|
||||
if "choices" in chunk and chunk["choices"]:
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
print(content, end="", flush=True)
|
||||
|
||||
asyncio.run(stream_async())
|
||||
```
|
||||
|
||||
## Streaming Event Types
|
||||
|
||||
FastGPT sends multiple SSE (Server-Sent Events) event types. The common ones are:
|
||||
|
||||
- `data` - Standard response chunks (OpenAI-compatible)
|
||||
- `answer` - Main chat response content
|
||||
- `fastAnswer` - Quick reply content
|
||||
- `flowNodeStatus` - Workflow node status updates
|
||||
- `interactive` - Interactive node prompts
|
||||
- `error` - Error events
|
||||
|
||||
For more details on event types, see [Streaming Events](../advanced/streaming_events.md).
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always flush output**: Use `flush=True` when printing
|
||||
2. **Handle connection errors**: Streaming can fail mid-response
|
||||
3. **Use context managers**: Ensures proper cleanup
|
||||
4. **Buffer for better formatting**: Consider buffering for word boundaries
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
def robust_stream():
|
||||
"""Handle streaming errors gracefully."""
|
||||
try:
|
||||
with ChatClient(api_key=API_KEY) as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
try:
|
||||
data = line[5:].strip() if line.startswith("data:") else line
|
||||
if data and data != "[DONE]":
|
||||
chunk = json.loads(data)
|
||||
# Process chunk
|
||||
if "choices" in chunk and chunk["choices"]:
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
print(content, end="", flush=True)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nStreaming error: {e}")
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [Streaming Events](../advanced/streaming_events.md) - Advanced SSE event handling
|
||||
- [Async Usage](async_usage.md) - Async streaming examples
|
||||
- [Error Handling](../advanced/error_handling.md) - Robust error handling
|
||||
319
docs/examples/variables.md
Normal file
319
docs/examples/variables.md
Normal file
@@ -0,0 +1,319 @@
|
||||
# Variables Example
|
||||
|
||||
Learn how to use template variables in your FastGPT workflows.
|
||||
|
||||
## What are Variables?
|
||||
|
||||
Variables allow you to dynamically replace placeholders in your FastGPT workflows. This is useful for:
|
||||
|
||||
- **Personalizing responses** - Insert user names, company names, etc.
|
||||
- **Conditional content** - Show different content based on user input
|
||||
- **Multi-language support** - Switch languages dynamically
|
||||
- **Configuration** - Pass settings to your workflow
|
||||
|
||||
## Basic Variable Usage
|
||||
|
||||
```python
|
||||
from fastgpt_client import ChatClient
|
||||
|
||||
with ChatClient(api_key="fastgpt-xxxxx") as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Introduction"}],
|
||||
variables={
|
||||
"user_name": "Alice",
|
||||
"company": "Tech Corp",
|
||||
"language": "English"
|
||||
},
|
||||
stream=False
|
||||
)
|
||||
result = response.json()
|
||||
print(result['choices'][0]['message']['content'])
|
||||
# Output might be: "Hello Alice! Welcome to Tech Corp..."
|
||||
```
|
||||
|
||||
## Complete Variables Example
|
||||
|
||||
```python
|
||||
"""Using template variables in FastGPT workflows."""
|
||||
|
||||
from fastgpt_client import ChatClient
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
load_dotenv()
|
||||
|
||||
API_KEY = os.getenv("API_KEY")
|
||||
BASE_URL = os.getenv("BASE_URL")
|
||||
|
||||
|
||||
def personalized_greeting():
|
||||
"""Personalized greeting with variables."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Greet me"}],
|
||||
variables={
|
||||
"user_name": "Alice",
|
||||
"time_of_day": "morning",
|
||||
"language": "English"
|
||||
},
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print("Response:", result['choices'][0]['message']['content'])
|
||||
|
||||
|
||||
def dynamic_content():
|
||||
"""Dynamic content based on variables."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
# Generate content for different audiences
|
||||
audiences = [
|
||||
{
|
||||
"audience": "technical",
|
||||
"detail_level": "high",
|
||||
"include_examples": True
|
||||
},
|
||||
{
|
||||
"audience": "business",
|
||||
"detail_level": "low",
|
||||
"include_examples": False
|
||||
},
|
||||
{
|
||||
"audience": "general",
|
||||
"detail_level": "medium",
|
||||
"include_examples": True
|
||||
}
|
||||
]
|
||||
|
||||
for config in audiences:
|
||||
print(f"\n--- Content for {config['audience']} audience ---")
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Explain AI"}],
|
||||
variables=config,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print(result['choices'][0]['message']['content'][:200] + "...")
|
||||
|
||||
|
||||
def multi_language():
|
||||
"""Multi-language support with variables."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
languages = [
|
||||
{"language": "English", "user_name": "Alice"},
|
||||
{"language": "Spanish", "user_name": "Carlos"},
|
||||
{"language": "French", "user_name": "Marie"},
|
||||
]
|
||||
|
||||
for lang_config in languages:
|
||||
print(f"\n--- Response in {lang_config['language']} ---")
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Say hello"}],
|
||||
variables=lang_config,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print(result['choices'][0]['message']['content'])
|
||||
|
||||
|
||||
def context_aware_response():
|
||||
"""Response with contextual variables."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
# User context
|
||||
user_context = {
|
||||
"user_name": "Alice",
|
||||
"user_level": "beginner",
|
||||
"topic": "Python programming",
|
||||
"goal": "learn data analysis"
|
||||
}
|
||||
|
||||
# Generate personalized learning plan
|
||||
response = client.create_chat_completion(
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Create a learning plan for me"
|
||||
}
|
||||
],
|
||||
variables=user_context,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print("Personalized Learning Plan:")
|
||||
print(result['choices'][0]['message']['content'])
|
||||
|
||||
|
||||
def workflow_with_variables():
|
||||
"""Complete workflow using multiple variables."""
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
# Scenario: Customer support chat
|
||||
customer_context = {
|
||||
"customer_name": "John Doe",
|
||||
"company": "Acme Corp",
|
||||
"plan": "enterprise",
|
||||
"issue_type": "technical",
|
||||
"urgency": "high",
|
||||
"language": "English",
|
||||
"account_id": "12345"
|
||||
}
|
||||
|
||||
# Generate personalized support response
|
||||
response = client.create_chat_completion(
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "I need help with my account"
|
||||
}
|
||||
],
|
||||
variables=customer_context,
|
||||
stream=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
print("Support Response:")
|
||||
print(result['choices'][0]['message']['content'])
|
||||
|
||||
|
||||
def streaming_with_variables():
|
||||
"""Streaming response with variables."""
|
||||
import json
|
||||
|
||||
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
||||
response = client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Tell me a story"}],
|
||||
variables={
|
||||
"genre": "science fiction",
|
||||
"length": "short",
|
||||
"protagonist": "AI robot"
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
print("Streaming story: ", end="", flush=True)
|
||||
for line in response.iter_lines():
|
||||
if line.startswith("data:"):
|
||||
data = line[5:].strip()
|
||||
if data and data != "[DONE]":
|
||||
chunk = json.loads(data)
|
||||
if "choices" in chunk and chunk["choices"]:
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
print(content, end="", flush=True)
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("=== Personalized Greeting ===")
|
||||
try:
|
||||
personalized_greeting()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Dynamic Content ===")
|
||||
try:
|
||||
dynamic_content()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Multi-Language ===")
|
||||
try:
|
||||
multi_language()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Context-Aware Response ===")
|
||||
try:
|
||||
context_aware_response()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Workflow with Variables ===")
|
||||
try:
|
||||
workflow_with_variables()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
print("\n=== Streaming with Variables ===")
|
||||
try:
|
||||
streaming_with_variables()
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
```
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
### 1. Personalization
|
||||
|
||||
```python
|
||||
variables = {
|
||||
"user_name": "Alice",
|
||||
"company": "Tech Corp",
|
||||
"role": "Developer"
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Localization
|
||||
|
||||
```python
|
||||
variables = {
|
||||
"language": "Spanish",
|
||||
"region": "Latin America",
|
||||
"currency": "USD"
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Conditional Logic
|
||||
|
||||
```python
|
||||
variables = {
|
||||
"user_tier": "premium",
|
||||
"feature_set": "advanced",
|
||||
"support_level": "24/7"
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Content Generation
|
||||
|
||||
```python
|
||||
variables = {
|
||||
"topic": "AI",
|
||||
"tone": "professional",
|
||||
"length": "500",
|
||||
"format": "blog post"
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use descriptive variable names** - `user_name` instead of `n`
|
||||
2. **Validate variables** - Ensure required variables are provided
|
||||
3. **Document your variables** - Keep a list of expected variables
|
||||
4. **Use defaults** - Provide fallback values when possible
|
||||
|
||||
```python
|
||||
def get_variables(user_input):
|
||||
"""Validate and prepare variables."""
|
||||
required_vars = ["user_name", "language"]
|
||||
variables = {
|
||||
"user_name": user_input.get("name", "Guest"),
|
||||
"language": user_input.get("language", "English"),
|
||||
"company": user_input.get("company", ""),
|
||||
}
|
||||
|
||||
# Validate required variables
|
||||
missing = [var for var in required_vars if not variables.get(var)]
|
||||
if missing:
|
||||
raise ValueError(f"Missing required variables: {missing}")
|
||||
|
||||
return variables
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [ChatClient API](../api/chat_client.md) - Complete API reference
|
||||
- [Chat Context](chat_context.md) - Managing conversation context
|
||||
- [Streaming](streaming.md) - Streaming responses
|
||||
Reference in New Issue
Block a user