9.1 KiB
9.1 KiB
Error Handling
A comprehensive guide to handling errors in the FastGPT Python SDK.
Exception Types
The SDK provides specific exceptions for different error scenarios:
| Exception | Status Code | When to Use |
|---|---|---|
AuthenticationError |
401 | Invalid API key |
RateLimitError |
429 | Too many requests |
ValidationError |
422 | Invalid parameters |
APIError |
4xx/5xx | General API errors |
Basic Error Handling
from fastgpt_client import ChatClient
from fastgpt_client.exceptions import (
APIError,
AuthenticationError,
RateLimitError,
ValidationError
)
try:
with ChatClient(api_key="fastgpt-xxxxx") as client:
response = client.create_chat_completion(
messages=[{"role": "user", "content": "Hello"}],
stream=False
)
response.raise_for_status()
result = response.json()
print(result['choices'][0]['message']['content'])
except AuthenticationError:
print("Authentication failed. Check your API key.")
except RateLimitError as e:
print(f"Rate limit exceeded. Retry after: {e.retry_after}")
except ValidationError as e:
print(f"Invalid parameters: {e.message}")
except APIError as e:
print(f"API error: {e.message}")
Comprehensive Error Handler
from fastgpt_client import ChatClient
from fastgpt_client.exceptions import FastGPTError
import logging
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(__name__)
class ChatService:
"""Chat service with comprehensive error handling."""
def __init__(self, api_key: str, base_url: str):
self.client = ChatClient(api_key=api_key, base_url=base_url)
def send_message(self, message: str) -> str | None:
"""Send a message with error handling."""
try:
response = self.client.create_chat_completion(
messages=[{"role": "user", "content": message}],
stream=False
)
response.raise_for_status()
result = response.json()
return result['choices'][0]['message']['content']
except AuthenticationError:
logger.error("Invalid API key")
return "Error: Authentication failed. Please check your API key."
except RateLimitError as e:
logger.error(f"Rate limit exceeded: {e}")
wait_time = int(e.retry_after) if e.retry_after else 5
return f"Error: Too many requests. Please wait {wait_time} seconds."
except ValidationError as e:
logger.error(f"Validation error: {e}")
return f"Error: Invalid request - {e.message}"
except APIError as e:
logger.error(f"API error: {e}")
return f"Error: Server error - {e.message}"
except Exception as e:
logger.exception(f"Unexpected error: {e}")
return "Error: An unexpected error occurred."
def close(self):
"""Close the client."""
self.client.close()
Retry Logic
Simple Retry
import time
from fastgpt_client import ChatClient
from fastgpt_client.exceptions import RateLimitError
def chat_with_retry(client, messages, max_retries=3):
"""Retry chat completion on rate limit errors."""
for attempt in range(max_retries):
try:
response = client.create_chat_completion(
messages=messages,
stream=False
)
response.raise_for_status()
return response.json()
except RateLimitError as e:
if attempt < max_retries - 1:
wait_time = int(e.retry_after) if e.retry_after else 5
print(f"Rate limited. Waiting {wait_time} seconds...")
time.sleep(wait_time)
else:
raise
Exponential Backoff
import time
from fastgpt_client import ChatClient
from fastgpt_client.exceptions import APIError
def chat_with_backoff(client, messages, max_retries=5):
"""Retry with exponential backoff."""
base_delay = 1 # seconds
for attempt in range(max_retries):
try:
response = client.create_chat_completion(
messages=messages,
stream=False
)
response.raise_for_status()
return response.json()
except APIError as e:
if attempt < max_retries - 1 and e.status_code >= 500:
# Exponential backoff for server errors
delay = base_delay * (2 ** attempt)
print(f"Server error. Retrying in {delay} seconds...")
time.sleep(delay)
else:
raise
Streaming Error Handling
import json
from fastgpt_client import ChatClient
from fastgpt_client.exceptions import FastGPTError
def stream_chat_safely(client, messages):
"""Handle streaming with error recovery."""
try:
response = client.create_chat_completion(
messages=messages,
stream=True
)
for line in response.iter_lines():
try:
if line.startswith("data:"):
data = line[5:].strip()
if data and data != "[DONE]":
chunk = json.loads(data)
if "choices" in chunk and chunk["choices"]:
delta = chunk["choices"][0].get("delta", {})
content = delta.get("content", "")
if content:
print(content, end="", flush=True)
except json.JSONDecodeError:
# Skip malformed JSON chunks
continue
except FastGPTError as e:
print(f"\nStream error: {e}")
Response Validation
def validate_response(response_data: dict) -> bool:
"""Validate API response structure."""
if "choices" not in response_data:
raise ValueError("Response missing 'choices' field")
if not response_data["choices"]:
raise ValueError("Empty choices array")
choice = response_data["choices"][0]
if "message" not in choice:
raise ValueError("Choice missing 'message' field")
message = choice["message"]
if "content" not in message:
raise ValueError("Message missing 'content' field")
return True
def safe_chat_completion(client, messages):
"""Chat with response validation."""
response = client.create_chat_completion(
messages=messages,
stream=False
)
response.raise_for_status()
result = response.json()
try:
validate_response(result)
return result['choices'][0]['message']['content']
except ValueError as e:
print(f"Invalid response format: {e}")
return None
Logging Errors
import logging
from fastgpt_client import ChatClient
from fastgpt_client.exceptions import FastGPTError
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
def log_errors(client, messages):
"""Log errors with context."""
try:
response = client.create_chat_completion(
messages=messages,
stream=False
)
response.raise_for_status()
return response.json()
except FastGPTError as e:
logger.error(
f"API Error: {type(e).__name__} - {e.message}",
extra={
"status_code": e.status_code,
"response_data": e.response_data,
}
)
raise
Custom Exception Handler
from functools import wraps
from fastgpt_client.exceptions import FastGPTError
def handle_fastgpt_errors(func):
"""Decorator for handling FastGPT errors."""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except FastGPTError as e:
print(f"FastGPT Error: {e}")
return None
except Exception as e:
print(f"Unexpected Error: {e}")
return None
return wrapper
@handle_fastgpt_errors
def send_message(client, message: str):
"""Send message with automatic error handling."""
response = client.create_chat_completion(
messages=[{"role": "user", "content": message}],
stream=False
)
response.raise_for_status()
result = response.json()
return result['choices'][0]['message']['content']
Best Practices
- Always use
raise_for_status()- Catches HTTP errors early - Handle specific exceptions - Use except blocks for known error types
- Log all errors - Helps with debugging and monitoring
- Provide user feedback - Show meaningful error messages
- Implement retries - For transient errors (rate limits, server errors)
- Validate responses - Ensure data structure is correct
- Use context managers - Ensures proper cleanup
See Also
- Exceptions Reference - Exception types and attributes
- Rate Limiting - Handling rate limits effectively