Initial commit: FastGPT Python SDK Phase 1

Implement core infrastructure:

- BaseClientMixin with retry logic and validation
- FastGPTClient base class with httpx
- ChatClient with 11 chat operation methods
- AppClient for analytics and logs
- Custom exceptions (APIError, AuthenticationError, etc.)
- Package configuration (pyproject.toml, setup.py)
- Documentation (README.md, CLAUDE.md)
- Basic usage examples

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Xin Wang
2026-01-06 14:38:32 +08:00
commit 0495dd4676
11 changed files with 1332 additions and 0 deletions

487
fastgpt_client/client.py Normal file
View File

@@ -0,0 +1,487 @@
"""FastGPT Client - Main synchronous client."""
import logging
from typing import Any, Dict, Literal, Union
import httpx
from .base_client import BaseClientMixin
from .exceptions import APIError, AuthenticationError, RateLimitError, ValidationError
class FastGPTClient(BaseClientMixin):
"""Synchronous FastGPT API client.
This client uses httpx.Client for efficient connection pooling and resource management.
It's recommended to use this client as a context manager.
Example:
with FastGPTClient(api_key="your-key") as client:
response = client.get_app_info(app_id="app-123")
"""
def __init__(
self,
api_key: str,
base_url: str = "http://localhost:3000",
timeout: float = 60.0,
max_retries: int = 3,
retry_delay: float = 1.0,
enable_logging: bool = False,
):
"""Initialize the FastGPT client.
Args:
api_key: Your FastGPT API key
base_url: Base URL for the FastGPT API
timeout: Request timeout in seconds (default: 60.0)
max_retries: Maximum number of retry attempts (default: 3)
retry_delay: Delay between retries in seconds (default: 1.0)
enable_logging: Whether to enable request logging (default: False)
"""
# Initialize base client functionality
super().__init__(api_key, base_url, timeout, max_retries, retry_delay, enable_logging)
self._client = httpx.Client(
base_url=base_url,
timeout=httpx.Timeout(timeout, connect=5.0),
)
def __enter__(self):
"""Support context manager protocol."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Clean up resources when exiting context."""
self.close()
def close(self):
"""Close the HTTP client and release resources."""
if hasattr(self, "_client"):
self._client.close()
def _send_request(
self,
method: str,
endpoint: str,
json: Dict[str, Any] | None = None,
params: Dict[str, Any] | None = None,
stream: bool = False,
**kwargs,
):
"""Send an HTTP request to the FastGPT API with retry logic.
Args:
method: HTTP method (GET, POST, PUT, PATCH, DELETE)
endpoint: API endpoint path
json: JSON request body
params: Query parameters
stream: Whether to stream the response
**kwargs: Additional arguments to pass to httpx.request
Returns:
httpx.Response object
"""
# Validate parameters
if json:
self._validate_params(**json)
if params:
self._validate_params(**params)
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
def make_request():
"""Inner function to perform the actual HTTP request."""
# Log request if logging is enabled
if self.enable_logging:
self.logger.info(f"Sending {method} request to {endpoint}")
# Debug logging for detailed information
if self.logger.isEnabledFor(logging.DEBUG):
if json:
self.logger.debug(f"Request body: {json}")
if params:
self.logger.debug(f"Request params: {params}")
# httpx.Client automatically prepends base_url
response = self._client.request(
method,
endpoint,
json=json,
params=params,
headers=headers,
**kwargs,
)
# Log response if logging is enabled
if self.enable_logging:
self.logger.info(f"Received response: {response.status_code}")
return response
# Use the retry mechanism from base client
request_context = f"{method} {endpoint}"
response = self._retry_request(make_request, request_context)
# Handle error responses (API errors don't retry)
self._handle_error_response(response)
return response
def _handle_error_response(self, response) -> None:
"""Handle HTTP error responses and raise appropriate exceptions.
Args:
response: httpx.Response object
Raises:
AuthenticationError: If status code is 401
RateLimitError: If status code is 429
ValidationError: If status code is 422
APIError: For other 4xx and 5xx errors
"""
if response.status_code < 400:
return # Success response
try:
error_data = response.json()
message = error_data.get("message", f"HTTP {response.status_code}")
except (ValueError, KeyError):
message = f"HTTP {response.status_code}"
error_data = None
# Log error response if logging is enabled
if self.enable_logging:
self.logger.error(f"API error: {response.status_code} - {message}")
if response.status_code == 401:
raise AuthenticationError(message, response.status_code, error_data)
elif response.status_code == 429:
retry_after = response.headers.get("Retry-After")
raise RateLimitError(message, retry_after, response.status_code, error_data)
elif response.status_code == 422:
raise ValidationError(message, response.status_code, error_data)
elif response.status_code >= 400:
raise APIError(message, response.status_code, error_data)
class ChatClient(FastGPTClient):
"""Client for chat-related operations.
Example:
with ChatClient(api_key="fastgpt-xxxxx") as client:
response = client.create_chat_completion(
messages=[{"role": "user", "content": "Hello!"}],
stream=False
)
"""
def create_chat_completion(
self,
messages: list[dict],
stream: bool = False,
chatId: str | None = None,
detail: bool = False,
variables: dict[str, Any] | None = None,
responseChatItemId: str | None = None,
):
"""Create a chat completion.
Args:
messages: Array of message objects with role and content
stream: Whether to stream the response
chatId: Chat ID for conversation context (optional)
detail: Whether to return detailed response data
variables: Template variables for substitution
responseChatItemId: Custom ID for the response message
Returns:
httpx.Response object
"""
self._validate_params(messages=messages)
data = {
"messages": messages,
"stream": stream,
"detail": detail,
}
if chatId:
data["chatId"] = chatId
if variables:
data["variables"] = variables
if responseChatItemId:
data["responseChatItemId"] = responseChatItemId
return self._send_request(
"POST",
"/api/v1/chat/completions",
json=data,
stream=stream,
)
def get_chat_histories(
self,
appId: str,
offset: int = 0,
pageSize: int = 20,
source: Literal["api", "online", "share", "test"] = "api",
):
"""Get chat histories for an application.
Args:
appId: Application ID
offset: Offset for pagination
pageSize: Number of records per page
source: Source filter (api, online, share, test)
Returns:
httpx.Response object
"""
data = {
"appId": appId,
"offset": offset,
"pageSize": pageSize,
"source": source,
}
return self._send_request("POST", "/api/core/chat/getHistories", json=data)
def get_chat_init(self, appId: str, chatId: str):
"""Get chat initialization information.
Args:
appId: Application ID
chatId: Chat ID
Returns:
httpx.Response object
"""
params = {"appId": appId, "chatId": chatId}
return self._send_request("GET", "/api/core/chat/init", params=params)
def get_chat_records(
self,
appId: str,
chatId: str,
offset: int = 0,
pageSize: int = 10,
loadCustomFeedbacks: bool = False,
):
"""Get chat records for a specific chat.
Args:
appId: Application ID
chatId: Chat ID
offset: Offset for pagination
pageSize: Number of records per page
loadCustomFeedbacks: Whether to load custom feedbacks
Returns:
httpx.Response object
"""
data = {
"appId": appId,
"chatId": chatId,
"offset": offset,
"pageSize": pageSize,
"loadCustomFeedbacks": loadCustomFeedbacks,
}
return self._send_request("POST", "/api/core/chat/getPaginationRecords", json=data)
def get_record_detail(self, appId: str, chatId: str, dataId: str):
"""Get detailed execution data for a specific record.
Args:
appId: Application ID
chatId: Chat ID
dataId: Record ID
Returns:
httpx.Response object
"""
params = {"appId": appId, "chatId": chatId, "dataId": dataId}
return self._send_request("GET", "/api/core/chat/getResData", params=params)
def update_chat_history(
self,
appId: str,
chatId: str,
customTitle: str | None = None,
top: bool | None = None,
):
"""Update chat history (title or pin status).
Args:
appId: Application ID
chatId: Chat ID
customTitle: Custom title for the chat
top: Whether to pin the chat
Returns:
httpx.Response object
"""
data = {
"appId": appId,
"chatId": chatId,
}
if customTitle is not None:
data["customTitle"] = customTitle
if top is not None:
data["top"] = top
return self._send_request("POST", "/api/core/chat/updateHistory", json=data)
def delete_chat_history(self, appId: str, chatId: str):
"""Delete a chat history.
Args:
appId: Application ID
chatId: Chat ID
Returns:
httpx.Response object
"""
params = {"appId": appId, "chatId": chatId}
return self._send_request("DELETE", "/api/core/chat/delHistory", params=params)
def clear_chat_histories(self, appId: str):
"""Clear all chat histories for an application.
Args:
appId: Application ID
Returns:
httpx.Response object
"""
params = {"appId": appId}
return self._send_request("DELETE", "/api/core/chat/clearHistories", params=params)
def delete_chat_record(self, appId: str, chatId: str, contentId: str):
"""Delete a single chat record.
Args:
appId: Application ID
chatId: Chat ID
contentId: Content ID of the record
Returns:
httpx.Response object
"""
params = {"appId": appId, "chatId": chatId, "contentId": contentId}
return self._send_request("DELETE", "/api/core/chat/item/delete", params=params)
def send_feedback(
self,
appId: str,
chatId: str,
dataId: str,
userGoodFeedback: str | None = None,
userBadFeedback: str | None = None,
):
"""Send feedback for a chat message (like/dislike).
Args:
appId: Application ID
chatId: Chat ID
dataId: Message ID
userGoodFeedback: Positive feedback text (pass None to cancel like)
userBadFeedback: Negative feedback text (pass None to cancel dislike)
Returns:
httpx.Response object
"""
data = {
"appId": appId,
"chatId": chatId,
"dataId": dataId,
}
if userGoodFeedback is not None:
data["userGoodFeedback"] = userGoodFeedback
if userBadFeedback is not None:
data["userBadFeedback"] = userBadFeedback
return self._send_request("POST", "/api/core/chat/feedback/updateUserFeedback", json=data)
def get_suggested_questions(
self,
appId: str,
chatId: str,
questionGuide: dict[str, Any] | None = None,
):
"""Get suggested questions based on chat context.
Args:
appId: Application ID
chatId: Chat ID
questionGuide: Optional custom configuration for question guide
Returns:
httpx.Response object
"""
data = {
"appId": appId,
"chatId": chatId,
}
if questionGuide:
data["questionGuide"] = questionGuide
return self._send_request("POST", "/api/core/ai/agent/v2/createQuestionGuide", json=data)
class AppClient(FastGPTClient):
"""Client for application analytics and logs.
Example:
with AppClient(api_key="fastgpt-xxxxx") as client:
logs = client.get_app_logs_chart(appId="app-123")
"""
def get_app_logs_chart(
self,
appId: str,
dateStart: str,
dateEnd: str,
offset: int = 1,
source: list[str] | None = None,
userTimespan: str = "day",
chatTimespan: str = "day",
appTimespan: str = "day",
):
"""Get application analytics chart data.
Args:
appId: Application ID
dateStart: Start date (ISO 8601 format)
dateEnd: End date (ISO 8601 format)
offset: Offset value
source: List of sources (test, online, share, api, etc.)
userTimespan: User data timespan (day, week, month)
chatTimespan: Chat data timespan (day, week, month)
appTimespan: App data timespan (day, week, month)
Returns:
httpx.Response object
"""
if source is None:
source = ["api"]
data = {
"appId": appId,
"dateStart": dateStart,
"dateEnd": dateEnd,
"offset": offset,
"source": source,
"userTimespan": userTimespan,
"chatTimespan": chatTimespan,
"appTimespan": appTimespan,
}
return self._send_request("POST", "/api/proApi/core/app/logs/getChartData", json=data)