341 lines
10 KiB
Markdown
341 lines
10 KiB
Markdown
# Async Usage Example
|
|
|
|
Complete examples demonstrating asynchronous usage of the FastGPT Python SDK.
|
|
|
|
## Why Use Async?
|
|
|
|
Async is beneficial when you need to:
|
|
|
|
- Handle multiple concurrent requests efficiently
|
|
- Integrate with async frameworks (FastAPI, aiohttp)
|
|
- Build real-time streaming applications
|
|
- Maximize throughput in I/O-bound applications
|
|
|
|
## Installation
|
|
|
|
```bash
|
|
pip install fastgpt-client
|
|
```
|
|
|
|
## Basic Async Chat
|
|
|
|
```python
|
|
import asyncio
|
|
from fastgpt_client import AsyncChatClient
|
|
|
|
async def simple_chat():
|
|
"""Simple async chat completion."""
|
|
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Hello! What's AI?"}],
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
print(result['choices'][0]['message']['content'])
|
|
|
|
asyncio.run(simple_chat())
|
|
```
|
|
|
|
## Async Streaming
|
|
|
|
```python
|
|
import asyncio
|
|
import json
|
|
from fastgpt_client import AsyncChatClient
|
|
|
|
async def stream_chat():
|
|
"""Async streaming chat completion."""
|
|
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Tell me a short story"}],
|
|
stream=True
|
|
)
|
|
|
|
async for line in response.aiter_lines():
|
|
if line.startswith("data:"):
|
|
data = line[5:].strip()
|
|
if data and data != "[DONE]":
|
|
chunk = json.loads(data)
|
|
if "choices" in chunk and chunk["choices"]:
|
|
delta = chunk["choices"][0].get("delta", {})
|
|
content = delta.get("content", "")
|
|
if content:
|
|
print(content, end="", flush=True)
|
|
print()
|
|
|
|
asyncio.run(stream_chat())
|
|
```
|
|
|
|
## Multiple Concurrent Requests
|
|
|
|
```python
|
|
import asyncio
|
|
from fastgpt_client import AsyncChatClient
|
|
|
|
async def fetch_multiple():
|
|
"""Run multiple requests concurrently."""
|
|
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
|
|
# Create multiple tasks
|
|
tasks = [
|
|
client.create_chat_completion(
|
|
messages=[{"role": "user", "content": f"What is {concept}?"}],
|
|
stream=False
|
|
)
|
|
for concept in ["AI", "Machine Learning", "Deep Learning"]
|
|
]
|
|
|
|
# Execute all tasks concurrently
|
|
responses = await asyncio.gather(*tasks)
|
|
|
|
# Process results
|
|
for i, response in enumerate(responses):
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
concept = ["AI", "Machine Learning", "Deep Learning"][i]
|
|
print(f"\n{concept}:")
|
|
print(f" {result['choices'][0]['message']['content'][:100]}...")
|
|
|
|
asyncio.run(fetch_multiple())
|
|
```
|
|
|
|
## Complete Async Example
|
|
|
|
```python
|
|
"""Complete async usage example for FastGPT Python SDK."""
|
|
|
|
import asyncio
|
|
import json
|
|
import os
|
|
from dotenv import load_dotenv
|
|
from datetime import datetime, timedelta
|
|
|
|
from fastgpt_client import AsyncChatClient, AsyncAppClient
|
|
|
|
load_dotenv()
|
|
|
|
API_KEY = os.getenv("API_KEY")
|
|
BASE_URL = os.getenv("BASE_URL")
|
|
|
|
|
|
async def simple_chat():
|
|
"""Simple async chat completion example."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Hello! What's AI?"}],
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
print("Response:", result['choices'][0]['message']['content'])
|
|
|
|
|
|
async def streaming_chat():
|
|
"""Async streaming chat completion example."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Tell me a short story"}],
|
|
stream=True
|
|
)
|
|
|
|
print("Streaming response: ", end="", flush=True)
|
|
async for line in response.aiter_lines():
|
|
if line.startswith("data:"):
|
|
data = line[5:].strip()
|
|
if data and data != "[DONE]":
|
|
chunk = json.loads(data)
|
|
if "choices" in chunk and chunk["choices"]:
|
|
delta = chunk["choices"][0].get("delta", {})
|
|
content = delta.get("content", "")
|
|
if content:
|
|
print(content, end="", flush=True)
|
|
print()
|
|
|
|
|
|
async def chat_with_context():
|
|
"""Async chat with context using chatId."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
chat_id = "my_async_chat_123"
|
|
|
|
# First message
|
|
print("User: What's AI?")
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "What's AI?"}],
|
|
chatId=chat_id,
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
print(f"AI: {result['choices'][0]['message']['content']}\n")
|
|
|
|
# Second message (continues the conversation)
|
|
print("User: Tell me more about it")
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Tell me more about it"}],
|
|
chatId=chat_id,
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
print(f"AI: {result['choices'][0]['message']['content']}")
|
|
|
|
|
|
async def get_histories():
|
|
"""Async get chat histories."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
app_id = os.getenv("APP_ID", "default-app-id")
|
|
|
|
try:
|
|
histories = await client.get_chat_histories(
|
|
appId=app_id,
|
|
offset=0,
|
|
pageSize=20,
|
|
source="api"
|
|
)
|
|
histories.raise_for_status()
|
|
data = histories.json()
|
|
print(f"Total chats: {data['data']['total']}")
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
|
|
async def get_app_analytics():
|
|
"""Async get app analytics."""
|
|
async with AsyncAppClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
app_id = os.getenv("APP_ID", "default-app-id")
|
|
|
|
try:
|
|
# Get analytics for the last 7 days
|
|
end_date = datetime.now()
|
|
start_date = end_date - timedelta(days=7)
|
|
|
|
logs = await client.get_app_logs_chart(
|
|
appId=app_id,
|
|
dateStart=start_date.strftime("%Y-%m-%d"),
|
|
dateEnd=end_date.strftime("%Y-%m-%d"),
|
|
source=["api"],
|
|
userTimespan="day",
|
|
chatTimespan="day",
|
|
appTimespan="day"
|
|
)
|
|
logs.raise_for_status()
|
|
data = logs.json()
|
|
print("App Analytics (last 7 days):")
|
|
print(f" Data keys: {list(data['data'].keys())}")
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
|
|
async def multiple_requests():
|
|
"""Run multiple async requests concurrently."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
# Create multiple chat completions concurrently
|
|
tasks = [
|
|
client.create_chat_completion(
|
|
messages=[{"role": "user", "content": f"What is {concept}?"}],
|
|
stream=False
|
|
)
|
|
for concept in ["AI", "Machine Learning", "Deep Learning"]
|
|
]
|
|
|
|
responses = await asyncio.gather(*tasks)
|
|
|
|
for i, response in enumerate(responses):
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
concept = ["AI", "Machine Learning", "Deep Learning"][i]
|
|
print(f"\n{concept}:")
|
|
print(f" {result['choices'][0]['message']['content'][:100]}...")
|
|
|
|
|
|
async def chat_with_variables():
|
|
"""Async chat with template variables."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Introduction"}],
|
|
variables={
|
|
"user_name": "Alice",
|
|
"company": "Tech Corp",
|
|
"language": "English"
|
|
},
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
print("Response with variables:", result['choices'][0]['message']['content'])
|
|
|
|
|
|
async def main():
|
|
"""Run all examples."""
|
|
print("=== Simple Chat ===")
|
|
try:
|
|
await simple_chat()
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
print("\n=== Streaming Chat ===")
|
|
try:
|
|
await streaming_chat()
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
print("\n=== Chat with Context ===")
|
|
try:
|
|
await chat_with_context()
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
print("\n=== Multiple Requests (Concurrent) ===")
|
|
try:
|
|
await multiple_requests()
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
print("\n=== Chat with Variables ===")
|
|
try:
|
|
await chat_with_variables()
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main())
|
|
```
|
|
|
|
## Async with FastAPI
|
|
|
|
```python
|
|
from fastapi import FastAPI
|
|
from fastgpt_client import AsyncChatClient
|
|
import os
|
|
|
|
app = FastAPI()
|
|
|
|
# Initialize client at startup
|
|
@app.on_event("startup")
|
|
async def startup():
|
|
app.state.client = AsyncChatClient(
|
|
api_key=os.getenv("API_KEY"),
|
|
base_url=os.getenv("BASE_URL")
|
|
)
|
|
|
|
@app.on_event("shutdown")
|
|
async def shutdown():
|
|
await app.state.client.close()
|
|
|
|
@app.post("/chat")
|
|
async def chat(message: str):
|
|
response = await app.state.client.create_chat_completion(
|
|
messages=[{"role": "user", "content": message}],
|
|
stream=False
|
|
)
|
|
result = response.json()
|
|
return {"response": result['choices'][0]['message']['content']}
|
|
```
|
|
|
|
## See Also
|
|
|
|
- [Async Clients](../api/async_clients.md) - Complete async API reference
|
|
- [Streaming](streaming.md) - Streaming examples
|
|
- [Basic Usage](basic_usage.md) - Synchronous examples
|