263 lines
8.5 KiB
Python
263 lines
8.5 KiB
Python
"""Async usage example for FastGPT Python SDK."""
|
|
|
|
import asyncio
|
|
import json
|
|
import os
|
|
from dotenv import load_dotenv
|
|
|
|
from fastgpt_client import AsyncChatClient, AsyncAppClient
|
|
|
|
load_dotenv()
|
|
|
|
# Configure your API key and base URL
|
|
API_KEY = os.getenv("API_KEY")
|
|
BASE_URL = os.getenv("BASE_URL")
|
|
|
|
|
|
async def simple_chat():
|
|
"""Simple async chat completion example."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Hello! What's AI?"}],
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
|
|
print("Response:", result['choices'][0]['message']['content'])
|
|
|
|
|
|
async def streaming_chat():
|
|
"""Async streaming chat completion example."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Tell me a short story"}],
|
|
stream=True
|
|
)
|
|
|
|
print("Streaming response: ", end="")
|
|
async for line in response.aiter_lines():
|
|
if line.startswith("data:"):
|
|
data = line[5:].strip()
|
|
if data and data != "[DONE]":
|
|
chunk = json.loads(data)
|
|
if "choices" in chunk and chunk["choices"]:
|
|
delta = chunk["choices"][0].get("delta", {})
|
|
content = delta.get("content", "")
|
|
if content:
|
|
print(content, end="", flush=True)
|
|
print()
|
|
|
|
|
|
async def chat_with_context():
|
|
"""Async chat with context using chatId example."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
chat_id = os.getenv("CHAT_ID")
|
|
|
|
# First message
|
|
print("User: What's AI?")
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "What's AI?"}],
|
|
chatId=chat_id,
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
print(f"AI: {result['choices'][0]['message']['content']}\n")
|
|
|
|
# Second message (continues the conversation)
|
|
print("User: Tell me more about it")
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Tell me more about it"}],
|
|
chatId=chat_id, # Same chatId maintains context
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
print(f"AI: {result['choices'][0]['message']['content']}")
|
|
|
|
|
|
async def get_histories():
|
|
"""Async get chat histories example."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
# You need to replace this with your actual app ID
|
|
app_id = os.getenv("APP_ID")
|
|
|
|
try:
|
|
histories = await client.get_chat_histories(
|
|
appId=app_id,
|
|
offset=0,
|
|
pageSize=20,
|
|
source="api"
|
|
)
|
|
histories.raise_for_status()
|
|
data = histories.json()
|
|
|
|
print(f"Total chats: {data['data']['total']}")
|
|
for chat in data['data']['list']:
|
|
print(f" - {chat['title']}: {chat['chatId']}")
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
|
|
async def get_app_analytics():
|
|
"""Async get app analytics example."""
|
|
async with AsyncAppClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
app_id = os.getenv("APP_ID")
|
|
|
|
try:
|
|
from datetime import datetime, timedelta
|
|
|
|
# Get analytics for the last 7 days
|
|
end_date = datetime.now()
|
|
start_date = end_date - timedelta(days=7)
|
|
|
|
logs = await client.get_app_logs_chart(
|
|
appId=app_id,
|
|
dateStart=start_date.strftime("%Y-%m-%d"),
|
|
dateEnd=end_date.strftime("%Y-%m-%d"),
|
|
source=["api"],
|
|
userTimespan="day",
|
|
chatTimespan="day",
|
|
appTimespan="day"
|
|
)
|
|
logs.raise_for_status()
|
|
data = logs.json()
|
|
|
|
print("App Analytics (last 7 days):")
|
|
print(f" Data keys: {list(data['data'].keys())}")
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
|
|
async def multiple_requests():
|
|
"""Example of running multiple async requests concurrently."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
# Create multiple chat completions concurrently
|
|
tasks = [
|
|
client.create_chat_completion(
|
|
messages=[{"role": "user", "content": f"What is {concept}?"}],
|
|
stream=False
|
|
)
|
|
for concept in ["AI", "Machine Learning", "Deep Learning"]
|
|
]
|
|
|
|
responses = await asyncio.gather(*tasks)
|
|
|
|
for i, response in enumerate(responses):
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
concept = ["AI", "Machine Learning", "Deep Learning"][i]
|
|
print(f"\n{concept}:")
|
|
print(f" {result['choices'][0]['message']['content'][:100]}...")
|
|
|
|
|
|
async def chat_with_variables():
|
|
"""Async chat with template variables example."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
# Use variables to replace placeholders in your workflow
|
|
response = await client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Introduction"}],
|
|
variables={
|
|
"user_name": "Alice",
|
|
"company": "Tech Corp",
|
|
"language": "English"
|
|
},
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
print("Response with variables:", result['choices'][0]['message']['content'])
|
|
|
|
|
|
async def get_chat_records():
|
|
"""Async get chat records example."""
|
|
async with AsyncChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
app_id = os.getenv("APP_ID")
|
|
chat_id = os.getenv("CHAT_ID")
|
|
|
|
try:
|
|
records = await client.get_chat_records(
|
|
appId=app_id,
|
|
chatId=chat_id,
|
|
offset=0,
|
|
pageSize=10,
|
|
loadCustomFeedbacks=True
|
|
)
|
|
records.raise_for_status()
|
|
data = records.json()
|
|
|
|
print(f"Chat records:")
|
|
for record in data['data']['list']:
|
|
# Extract text from the value array structure
|
|
text = 'N/A'
|
|
value_array = record.get('value', [])
|
|
|
|
if value_array and len(value_array) > 0:
|
|
# Get the first item in the value array
|
|
first_item = value_array[0]
|
|
if first_item.get('type') == 'text' and 'text' in first_item:
|
|
text = first_item['text'].get('content', 'N/A')
|
|
|
|
# Get role (obj field: "Human" or "AI")
|
|
role = record.get('obj', 'Unknown')
|
|
|
|
print(f" - [{role}] [{record.get('dataId', 'N/A')}] {text[:50] if text != 'N/A' else 'N/A'}...")
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
|
|
async def main():
|
|
"""Run all examples."""
|
|
# print("=== Simple Chat ===")
|
|
# try:
|
|
# await simple_chat()
|
|
# except Exception as e:
|
|
# print(f"Error: {e}")
|
|
|
|
# print("\n=== Streaming Chat ===")
|
|
# try:
|
|
# await streaming_chat()
|
|
# except Exception as e:
|
|
# print(f"Error: {e}")
|
|
|
|
# print("\n=== Chat with Context ===")
|
|
# try:
|
|
# await chat_with_context()
|
|
# except Exception as e:
|
|
# print(f"Error: {e}")
|
|
|
|
# print("\n=== Get Histories ===")
|
|
# try:
|
|
# await get_histories()
|
|
# except Exception as e:
|
|
# print(f"Error: {e}")
|
|
|
|
# print("\n=== Get App Analytics ===")
|
|
# try:
|
|
# await get_app_analytics()
|
|
# except Exception as e:
|
|
# print(f"Error: {e}")
|
|
|
|
# print("\n=== Multiple Requests (Concurrent) ===")
|
|
# try:
|
|
# await multiple_requests()
|
|
# except Exception as e:
|
|
# print(f"Error: {e}")
|
|
|
|
# print("\n=== Chat with Variables ===")
|
|
# try:
|
|
# await chat_with_variables()
|
|
# except Exception as e:
|
|
# print(f"Error: {e}")
|
|
|
|
print("\n=== Get Chat Records ===")
|
|
try:
|
|
await get_chat_records()
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main())
|