205 lines
5.0 KiB
Markdown
205 lines
5.0 KiB
Markdown
# Basic Usage Example
|
|
|
|
A complete example demonstrating basic usage of the FastGPT Python SDK.
|
|
|
|
## Setup
|
|
|
|
First, install the SDK and create a `.env` file:
|
|
|
|
```bash
|
|
pip install fastgpt-client python-dotenv
|
|
```
|
|
|
|
```bash
|
|
# .env
|
|
API_KEY=fastgpt-xxxxx
|
|
BASE_URL=http://localhost:3000
|
|
```
|
|
|
|
## Complete Example
|
|
|
|
```python
|
|
"""Basic usage example for FastGPT Python SDK."""
|
|
|
|
from fastgpt_client import ChatClient
|
|
from dotenv import load_dotenv
|
|
import os
|
|
|
|
load_dotenv()
|
|
|
|
# Configure your API key and base URL
|
|
API_KEY = os.getenv("API_KEY")
|
|
BASE_URL = os.getenv("BASE_URL")
|
|
|
|
|
|
def simple_chat():
|
|
"""Simple chat completion example."""
|
|
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
response = client.create_chat_completion(
|
|
messages=[{"role": "user", "content": "Hello! What's AI?"}],
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
|
|
print("Response:", result['choices'][0]['message']['content'])
|
|
|
|
|
|
def chat_with_history():
|
|
"""Chat with message history example."""
|
|
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
response = client.create_chat_completion(
|
|
messages=[
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
{"role": "user", "content": "Hello!"},
|
|
{"role": "assistant", "content": "Hi there! How can I help you?"},
|
|
{"role": "user", "content": "What's the capital of France?"}
|
|
],
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
|
|
print("Response:", result['choices'][0]['message']['content'])
|
|
|
|
|
|
def multi_turn_conversation():
|
|
"""Multi-turn conversation example."""
|
|
with ChatClient(api_key=API_KEY, base_url=BASE_URL) as client:
|
|
conversation = [
|
|
{"role": "user", "content": "What's Python?"}
|
|
]
|
|
|
|
# First turn
|
|
response = client.create_chat_completion(
|
|
messages=conversation,
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
assistant_message = result['choices'][0]['message']['content']
|
|
print(f"AI: {assistant_message}")
|
|
|
|
# Add assistant response to conversation
|
|
conversation.append({"role": "assistant", "content": assistant_message})
|
|
|
|
# Second turn
|
|
conversation.append({"role": "user", "content": "Give me an example"})
|
|
response = client.create_chat_completion(
|
|
messages=conversation,
|
|
stream=False
|
|
)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
assistant_message = result['choices'][0]['message']['content']
|
|
print(f"AI: {assistant_message}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
print("=== Simple Chat ===")
|
|
try:
|
|
simple_chat()
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
print("\n=== Chat with History ===")
|
|
try:
|
|
chat_with_history()
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
print("\n=== Multi-turn Conversation ===")
|
|
try:
|
|
multi_turn_conversation()
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
```
|
|
|
|
## Running the Example
|
|
|
|
```bash
|
|
python basic_usage.py
|
|
```
|
|
|
|
## Expected Output
|
|
|
|
```
|
|
=== Simple Chat ===
|
|
Response: AI (Artificial Intelligence) is a branch of computer science...
|
|
|
|
=== Chat with History ===
|
|
Response: The capital of France is Paris.
|
|
|
|
=== Multi-turn Conversation ===
|
|
AI: Python is a high-level programming language...
|
|
AI: Here's a simple example: print("Hello, World!")
|
|
```
|
|
|
|
## Key Concepts
|
|
|
|
### Messages Array
|
|
|
|
The `messages` parameter is an array of message objects:
|
|
|
|
```python
|
|
messages = [
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
{"role": "user", "content": "Hello!"},
|
|
{"role": "assistant", "content": "Hi there!"},
|
|
{"role": "user", "content": "How are you?"}
|
|
]
|
|
```
|
|
|
|
### Response Structure
|
|
|
|
The response follows OpenAI's format:
|
|
|
|
```python
|
|
{
|
|
"id": "chatcmpl-xxx",
|
|
"object": "chat.completion",
|
|
"created": 1234567890,
|
|
"model": "gpt-3.5-turbo",
|
|
"choices": [
|
|
{
|
|
"index": 0,
|
|
"message": {
|
|
"role": "assistant",
|
|
"content": "Response text here..."
|
|
},
|
|
"finish_reason": "stop"
|
|
}
|
|
],
|
|
"usage": {
|
|
"prompt_tokens": 20,
|
|
"completion_tokens": 15,
|
|
"total_tokens": 35
|
|
}
|
|
}
|
|
```
|
|
|
|
### Error Handling
|
|
|
|
Always handle potential errors:
|
|
|
|
```python
|
|
from fastgpt_client import ChatClient
|
|
from fastgpt_client.exceptions import APIError, AuthenticationError
|
|
|
|
try:
|
|
with ChatClient(api_key=API_KEY) as client:
|
|
response = client.create_chat_completion(...)
|
|
response.raise_for_status()
|
|
result = response.json()
|
|
except AuthenticationError:
|
|
print("Invalid API key")
|
|
except APIError as e:
|
|
print(f"API error: {e}")
|
|
```
|
|
|
|
## See Also
|
|
|
|
- [Streaming Example](streaming.md) - Learn how to use streaming responses
|
|
- [Async Usage](async_usage.md) - Asynchronous examples
|
|
- [Chat Context](chat_context.md) - Managing conversation context
|