Add async client support
- Add AsyncFastGPTClient, AsyncChatClient, AsyncAppClient classes - Implement async/await patterns with httpx.AsyncClient - Add async context manager support (async with, async for) - Add async retry logic with exponential backoff - Add 29 async unit tests covering all async client methods - Update __init__.py to export async clients 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -3,6 +3,11 @@
|
||||
A Python client library for interacting with FastGPT's OpenAPI.
|
||||
"""
|
||||
|
||||
from fastgpt_client.async_client import (
|
||||
AsyncAppClient,
|
||||
AsyncChatClient,
|
||||
AsyncFastGPTClient,
|
||||
)
|
||||
from fastgpt_client.client import AppClient, ChatClient, FastGPTClient
|
||||
from fastgpt_client.exceptions import (
|
||||
APIError,
|
||||
@@ -18,6 +23,10 @@ __all__ = [
|
||||
"FastGPTClient",
|
||||
"ChatClient",
|
||||
"AppClient",
|
||||
# Asynchronous clients
|
||||
"AsyncFastGPTClient",
|
||||
"AsyncChatClient",
|
||||
"AsyncAppClient",
|
||||
# Exceptions
|
||||
"FastGPTError",
|
||||
"APIError",
|
||||
|
||||
609
fastgpt_client/async_client.py
Normal file
609
fastgpt_client/async_client.py
Normal file
@@ -0,0 +1,609 @@
|
||||
"""FastGPT Async Client - Asynchronous client for FastGPT API."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import weakref
|
||||
from typing import Any, Dict, Literal, Union
|
||||
|
||||
import httpx
|
||||
|
||||
from .base_client import BaseClientMixin
|
||||
from .exceptions import APIError, AuthenticationError, RateLimitError, ValidationError
|
||||
|
||||
|
||||
class AsyncFastGPTClient(BaseClientMixin):
|
||||
"""Asynchronous FastGPT API client.
|
||||
|
||||
This client uses httpx.AsyncClient for efficient async connection pooling.
|
||||
It's recommended to use this client as an async context manager.
|
||||
|
||||
Example:
|
||||
async with AsyncFastGPTClient(api_key="your-key") as client:
|
||||
response = await client.get_app_info(app_id="app-123")
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str,
|
||||
base_url: str = "http://localhost:3000",
|
||||
timeout: float = 60.0,
|
||||
max_retries: int = 3,
|
||||
retry_delay: float = 1.0,
|
||||
enable_logging: bool = False,
|
||||
):
|
||||
"""Initialize the Async FastGPT client.
|
||||
|
||||
Args:
|
||||
api_key: Your FastGPT API key
|
||||
base_url: Base URL for the FastGPT API
|
||||
timeout: Request timeout in seconds (default: 60.0)
|
||||
max_retries: Maximum number of retry attempts (default: 3)
|
||||
retry_delay: Delay between retries in seconds (default: 1.0)
|
||||
enable_logging: Whether to enable request logging (default: False)
|
||||
"""
|
||||
# Initialize base client functionality
|
||||
super().__init__(api_key, base_url, timeout, max_retries, retry_delay, enable_logging)
|
||||
|
||||
self._client = httpx.AsyncClient(
|
||||
base_url=base_url,
|
||||
timeout=httpx.Timeout(timeout, connect=5.0),
|
||||
)
|
||||
|
||||
async def __aenter__(self):
|
||||
"""Support async context manager protocol."""
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Clean up resources when exiting async context."""
|
||||
await self.close()
|
||||
|
||||
async def close(self):
|
||||
"""Close the HTTP client and release resources."""
|
||||
if hasattr(self, "_client"):
|
||||
await self._client.aclose()
|
||||
|
||||
async def _send_request(
|
||||
self,
|
||||
method: str,
|
||||
endpoint: str,
|
||||
json: Dict[str, Any] | None = None,
|
||||
params: Dict[str, Any] | None = None,
|
||||
stream: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
"""Send an HTTP request to the FastGPT API with retry logic.
|
||||
|
||||
Args:
|
||||
method: HTTP method (GET, POST, PUT, PATCH, DELETE)
|
||||
endpoint: API endpoint path
|
||||
json: JSON request body
|
||||
params: Query parameters
|
||||
stream: Whether to stream the response
|
||||
**kwargs: Additional arguments to pass to httpx.request
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
# Validate parameters
|
||||
if json:
|
||||
self._validate_params(**json)
|
||||
if params:
|
||||
self._validate_params(**params)
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
async def make_request():
|
||||
"""Inner function to perform the actual HTTP request."""
|
||||
# Log request if logging is enabled
|
||||
if self.enable_logging:
|
||||
self.logger.info(f"Sending {method} request to {endpoint}")
|
||||
|
||||
# Debug logging for detailed information
|
||||
if self.logger.isEnabledFor(logging.DEBUG):
|
||||
if json:
|
||||
self.logger.debug(f"Request body: {json}")
|
||||
if params:
|
||||
self.logger.debug(f"Request params: {params}")
|
||||
|
||||
# httpx.AsyncClient automatically prepends base_url
|
||||
# For streaming, use stream() method; for non-streaming, use request()
|
||||
if stream:
|
||||
# httpx.stream() returns a context manager, enter it and return response
|
||||
stream_context = self._client.stream(
|
||||
method,
|
||||
endpoint,
|
||||
json=json,
|
||||
params=params,
|
||||
headers=headers,
|
||||
**kwargs,
|
||||
)
|
||||
response = await stream_context.__aenter__()
|
||||
|
||||
# Store the stream context on the response
|
||||
response._stream_context = stream_context
|
||||
response._stream_context_closed = False
|
||||
|
||||
# Override close() to also close the stream context
|
||||
original_close = response.close
|
||||
|
||||
async def close_with_context():
|
||||
"""Close both the response and the stream context."""
|
||||
if getattr(response, '_stream_context_closed', False):
|
||||
return
|
||||
|
||||
try:
|
||||
# Close the response first
|
||||
await original_close()
|
||||
finally:
|
||||
# Always close the stream context, even if response.close() fails
|
||||
if hasattr(response, '_stream_context') and response._stream_context is not None:
|
||||
try:
|
||||
await response._stream_context.__aexit__(None, None, None)
|
||||
except Exception:
|
||||
pass # Ignore errors during cleanup
|
||||
finally:
|
||||
response._stream_context = None
|
||||
response._stream_context_closed = True
|
||||
|
||||
response.close = close_with_context
|
||||
|
||||
# Safety net: ensure cleanup on garbage collection
|
||||
def cleanup_stream_context(stream_ctx_ref):
|
||||
"""Finalizer to close stream context if response is GC'd without being closed."""
|
||||
async def _cleanup():
|
||||
stream_ctx = stream_ctx_ref()
|
||||
if stream_ctx is not None:
|
||||
try:
|
||||
await stream_ctx.__aexit__(None, None, None)
|
||||
except Exception:
|
||||
pass # Ignore errors in finalizer
|
||||
|
||||
# Schedule cleanup on event loop
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
loop.create_task(_cleanup())
|
||||
except RuntimeError:
|
||||
pass # No running loop
|
||||
|
||||
# Use weakref to avoid circular references
|
||||
weakref.finalize(response, cleanup_stream_context, weakref.ref(stream_context))
|
||||
else:
|
||||
response = await self._client.request(
|
||||
method,
|
||||
endpoint,
|
||||
json=json,
|
||||
params=params,
|
||||
headers=headers,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# Log response if logging is enabled
|
||||
if self.enable_logging:
|
||||
self.logger.info(f"Received response: {response.status_code}")
|
||||
|
||||
return response
|
||||
|
||||
# Use the retry mechanism from base client
|
||||
request_context = f"{method} {endpoint}"
|
||||
response = await self._retry_request_async(make_request, request_context)
|
||||
|
||||
# Handle error responses (API errors don't retry)
|
||||
self._handle_error_response(response)
|
||||
|
||||
return response
|
||||
|
||||
async def _retry_request_async(self, request_func, request_context: str):
|
||||
"""Execute a request with retry logic (async version).
|
||||
|
||||
Args:
|
||||
request_func: Async function that executes the HTTP request
|
||||
request_context: Description of the request for logging
|
||||
|
||||
Returns:
|
||||
Response from the request
|
||||
|
||||
Raises:
|
||||
APIError: If all retries are exhausted
|
||||
"""
|
||||
last_exception = None
|
||||
|
||||
for attempt in range(self.max_retries):
|
||||
try:
|
||||
response = await request_func()
|
||||
|
||||
# Success on non-5xx responses
|
||||
if response.status_code < 500:
|
||||
return response
|
||||
|
||||
# Server error - will retry
|
||||
if self.enable_logging:
|
||||
self.logger.warning(
|
||||
f"{request_context} failed with status {response.status_code} "
|
||||
f"(attempt {attempt + 1}/{self.max_retries})"
|
||||
)
|
||||
|
||||
if attempt < self.max_retries - 1:
|
||||
# Exponential backoff
|
||||
sleep_time = self.retry_delay * (2 ** attempt)
|
||||
await asyncio.sleep(sleep_time)
|
||||
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
if self.enable_logging:
|
||||
self.logger.warning(
|
||||
f"{request_context} raised exception: {e} "
|
||||
f"(attempt {attempt + 1}/{self.max_retries})"
|
||||
)
|
||||
|
||||
if attempt < self.max_retries - 1:
|
||||
sleep_time = self.retry_delay * (2 ** attempt)
|
||||
await asyncio.sleep(sleep_time)
|
||||
|
||||
# All retries exhausted
|
||||
if last_exception:
|
||||
from .exceptions import APIError
|
||||
raise APIError(f"Request failed after {self.max_retries} attempts: {last_exception}")
|
||||
|
||||
from .exceptions import APIError
|
||||
raise APIError(f"Request failed after {self.max_retries} attempts")
|
||||
|
||||
def _handle_error_response(self, response) -> None:
|
||||
"""Handle HTTP error responses and raise appropriate exceptions.
|
||||
|
||||
Args:
|
||||
response: httpx.Response object
|
||||
|
||||
Raises:
|
||||
AuthenticationError: If status code is 401
|
||||
RateLimitError: If status code is 429
|
||||
ValidationError: If status code is 422
|
||||
APIError: For other 4xx and 5xx errors
|
||||
"""
|
||||
# Check status code first (doesn't consume response body)
|
||||
if response.status_code < 400:
|
||||
return # Success response
|
||||
|
||||
# Try to parse error message (this will consume the body, but that's OK for errors)
|
||||
try:
|
||||
error_data = response.json()
|
||||
message = error_data.get("message", f"HTTP {response.status_code}")
|
||||
except (ValueError, KeyError, AttributeError):
|
||||
# If we can't parse JSON (e.g., streaming response or invalid JSON), use status code
|
||||
message = f"HTTP {response.status_code}"
|
||||
error_data = None
|
||||
|
||||
# Log error response if logging is enabled
|
||||
if self.enable_logging:
|
||||
self.logger.error(f"API error: {response.status_code} - {message}")
|
||||
|
||||
if response.status_code == 401:
|
||||
raise AuthenticationError(message, response.status_code, error_data)
|
||||
elif response.status_code == 429:
|
||||
retry_after = response.headers.get("Retry-After")
|
||||
raise RateLimitError(message, retry_after, response.status_code, error_data)
|
||||
elif response.status_code == 422:
|
||||
raise ValidationError(message, response.status_code, error_data)
|
||||
elif response.status_code >= 400:
|
||||
raise APIError(message, response.status_code, error_data)
|
||||
|
||||
|
||||
class AsyncChatClient(AsyncFastGPTClient):
|
||||
"""Async client for chat-related operations.
|
||||
|
||||
Example:
|
||||
async with AsyncChatClient(api_key="fastgpt-xxxxx") as client:
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello!"}],
|
||||
stream=False
|
||||
)
|
||||
"""
|
||||
|
||||
async def create_chat_completion(
|
||||
self,
|
||||
messages: list[dict],
|
||||
stream: bool = False,
|
||||
chatId: str | None = None,
|
||||
detail: bool = False,
|
||||
variables: dict[str, Any] | None = None,
|
||||
responseChatItemId: str | None = None,
|
||||
):
|
||||
"""Create a chat completion.
|
||||
|
||||
Args:
|
||||
messages: Array of message objects with role and content
|
||||
stream: Whether to stream the response
|
||||
chatId: Chat ID for conversation context (optional)
|
||||
detail: Whether to return detailed response data
|
||||
variables: Template variables for substitution
|
||||
responseChatItemId: Custom ID for the response message
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
self._validate_params(messages=messages)
|
||||
|
||||
data = {
|
||||
"messages": messages,
|
||||
"stream": stream,
|
||||
"detail": detail,
|
||||
}
|
||||
|
||||
if chatId:
|
||||
data["chatId"] = chatId
|
||||
if variables:
|
||||
data["variables"] = variables
|
||||
if responseChatItemId:
|
||||
data["responseChatItemId"] = responseChatItemId
|
||||
|
||||
return await self._send_request(
|
||||
"POST",
|
||||
"/api/v1/chat/completions",
|
||||
json=data,
|
||||
stream=stream,
|
||||
)
|
||||
|
||||
async def get_chat_histories(
|
||||
self,
|
||||
appId: str,
|
||||
offset: int = 0,
|
||||
pageSize: int = 20,
|
||||
source: Literal["api", "online", "share", "test"] = "api",
|
||||
):
|
||||
"""Get chat histories for an application.
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
offset: Offset for pagination
|
||||
pageSize: Number of records per page
|
||||
source: Source filter (api, online, share, test)
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
data = {
|
||||
"appId": appId,
|
||||
"offset": offset,
|
||||
"pageSize": pageSize,
|
||||
"source": source,
|
||||
}
|
||||
|
||||
return await self._send_request("POST", "/api/core/chat/getHistories", json=data)
|
||||
|
||||
async def get_chat_init(self, appId: str, chatId: str):
|
||||
"""Get chat initialization information.
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
chatId: Chat ID
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
params = {"appId": appId, "chatId": chatId}
|
||||
return await self._send_request("GET", "/api/core/chat/init", params=params)
|
||||
|
||||
async def get_chat_records(
|
||||
self,
|
||||
appId: str,
|
||||
chatId: str,
|
||||
offset: int = 0,
|
||||
pageSize: int = 10,
|
||||
loadCustomFeedbacks: bool = False,
|
||||
):
|
||||
"""Get chat records for a specific chat.
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
chatId: Chat ID
|
||||
offset: Offset for pagination
|
||||
pageSize: Number of records per page
|
||||
loadCustomFeedbacks: Whether to load custom feedbacks
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
data = {
|
||||
"appId": appId,
|
||||
"chatId": chatId,
|
||||
"offset": offset,
|
||||
"pageSize": pageSize,
|
||||
"loadCustomFeedbacks": loadCustomFeedbacks,
|
||||
}
|
||||
|
||||
return await self._send_request("POST", "/api/core/chat/getPaginationRecords", json=data)
|
||||
|
||||
async def get_record_detail(self, appId: str, chatId: str, dataId: str):
|
||||
"""Get detailed execution data for a specific record.
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
chatId: Chat ID
|
||||
dataId: Record ID
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
params = {"appId": appId, "chatId": chatId, "dataId": dataId}
|
||||
return await self._send_request("GET", "/api/core/chat/getResData", params=params)
|
||||
|
||||
async def update_chat_history(
|
||||
self,
|
||||
appId: str,
|
||||
chatId: str,
|
||||
customTitle: str | None = None,
|
||||
top: bool | None = None,
|
||||
):
|
||||
"""Update chat history (title or pin status).
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
chatId: Chat ID
|
||||
customTitle: Custom title for the chat
|
||||
top: Whether to pin the chat
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
data = {
|
||||
"appId": appId,
|
||||
"chatId": chatId,
|
||||
}
|
||||
|
||||
if customTitle is not None:
|
||||
data["customTitle"] = customTitle
|
||||
if top is not None:
|
||||
data["top"] = top
|
||||
|
||||
return await self._send_request("POST", "/api/core/chat/updateHistory", json=data)
|
||||
|
||||
async def delete_chat_history(self, appId: str, chatId: str):
|
||||
"""Delete a chat history.
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
chatId: Chat ID
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
params = {"appId": appId, "chatId": chatId}
|
||||
return await self._send_request("DELETE", "/api/core/chat/delHistory", params=params)
|
||||
|
||||
async def clear_chat_histories(self, appId: str):
|
||||
"""Clear all chat histories for an application.
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
params = {"appId": appId}
|
||||
return await self._send_request("DELETE", "/api/core/chat/clearHistories", params=params)
|
||||
|
||||
async def delete_chat_record(self, appId: str, chatId: str, contentId: str):
|
||||
"""Delete a single chat record.
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
chatId: Chat ID
|
||||
contentId: Content ID of the record
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
params = {"appId": appId, "chatId": chatId, "contentId": contentId}
|
||||
return await self._send_request("DELETE", "/api/core/chat/item/delete", params=params)
|
||||
|
||||
async def send_feedback(
|
||||
self,
|
||||
appId: str,
|
||||
chatId: str,
|
||||
dataId: str,
|
||||
userGoodFeedback: str | None = None,
|
||||
userBadFeedback: str | None = None,
|
||||
):
|
||||
"""Send feedback for a chat message (like/dislike).
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
chatId: Chat ID
|
||||
dataId: Message ID
|
||||
userGoodFeedback: Positive feedback text (pass None to cancel like)
|
||||
userBadFeedback: Negative feedback text (pass None to cancel dislike)
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
data = {
|
||||
"appId": appId,
|
||||
"chatId": chatId,
|
||||
"dataId": dataId,
|
||||
}
|
||||
|
||||
if userGoodFeedback is not None:
|
||||
data["userGoodFeedback"] = userGoodFeedback
|
||||
if userBadFeedback is not None:
|
||||
data["userBadFeedback"] = userBadFeedback
|
||||
|
||||
return await self._send_request("POST", "/api/core/chat/feedback/updateUserFeedback", json=data)
|
||||
|
||||
async def get_suggested_questions(
|
||||
self,
|
||||
appId: str,
|
||||
chatId: str,
|
||||
questionGuide: dict[str, Any] | None = None,
|
||||
):
|
||||
"""Get suggested questions based on chat context.
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
chatId: Chat ID
|
||||
questionGuide: Optional custom configuration for question guide
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
data = {
|
||||
"appId": appId,
|
||||
"chatId": chatId,
|
||||
}
|
||||
|
||||
if questionGuide:
|
||||
data["questionGuide"] = questionGuide
|
||||
|
||||
return await self._send_request("POST", "/api/core/ai/agent/v2/createQuestionGuide", json=data)
|
||||
|
||||
|
||||
class AsyncAppClient(AsyncFastGPTClient):
|
||||
"""Async client for application analytics and logs.
|
||||
|
||||
Example:
|
||||
async with AsyncAppClient(api_key="fastgpt-xxxxx") as client:
|
||||
logs = await client.get_app_logs_chart(appId="app-123")
|
||||
"""
|
||||
|
||||
async def get_app_logs_chart(
|
||||
self,
|
||||
appId: str,
|
||||
dateStart: str,
|
||||
dateEnd: str,
|
||||
offset: int = 1,
|
||||
source: list[str] | None = None,
|
||||
userTimespan: str = "day",
|
||||
chatTimespan: str = "day",
|
||||
appTimespan: str = "day",
|
||||
):
|
||||
"""Get application analytics chart data.
|
||||
|
||||
Args:
|
||||
appId: Application ID
|
||||
dateStart: Start date (ISO 8601 format)
|
||||
dateEnd: End date (ISO 8601 format)
|
||||
offset: Offset value
|
||||
source: List of sources (test, online, share, api, etc.)
|
||||
userTimespan: User data timespan (day, week, month)
|
||||
chatTimespan: Chat data timespan (day, week, month)
|
||||
appTimespan: App data timespan (day, week, month)
|
||||
|
||||
Returns:
|
||||
httpx.Response object
|
||||
"""
|
||||
if source is None:
|
||||
source = ["api"]
|
||||
|
||||
data = {
|
||||
"appId": appId,
|
||||
"dateStart": dateStart,
|
||||
"dateEnd": dateEnd,
|
||||
"offset": offset,
|
||||
"source": source,
|
||||
"userTimespan": userTimespan,
|
||||
"chatTimespan": chatTimespan,
|
||||
"appTimespan": appTimespan,
|
||||
}
|
||||
|
||||
return await self._send_request("POST", "/api/proApi/core/app/logs/getChartData", json=data)
|
||||
561
tests/test_async_client.py
Normal file
561
tests/test_async_client.py
Normal file
@@ -0,0 +1,561 @@
|
||||
"""Tests for async FastGPT clients."""
|
||||
|
||||
import asyncio
|
||||
from unittest.mock import Mock, AsyncMock, MagicMock, patch
|
||||
import pytest
|
||||
|
||||
import httpx
|
||||
|
||||
|
||||
class AsyncContextManagerMock:
|
||||
"""Helper class to mock async context managers."""
|
||||
def __init__(self, return_value):
|
||||
self._return_value = return_value
|
||||
self.__aenter__ = AsyncMock(return_value=return_value)
|
||||
self.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
from fastgpt_client.async_client import (
|
||||
AsyncFastGPTClient,
|
||||
AsyncChatClient,
|
||||
AsyncAppClient,
|
||||
)
|
||||
from fastgpt_client.exceptions import (
|
||||
APIError,
|
||||
AuthenticationError,
|
||||
RateLimitError,
|
||||
ValidationError,
|
||||
)
|
||||
|
||||
|
||||
class TestAsyncFastGPTClient:
|
||||
"""Test suite for AsyncFastGPTClient."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_init_default(self, api_key, base_url):
|
||||
"""Test client initialization with defaults."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
assert client.api_key == api_key
|
||||
assert client.base_url == base_url
|
||||
assert client.timeout == 60.0
|
||||
assert client.max_retries == 3
|
||||
assert client.retry_delay == 1.0
|
||||
assert client.enable_logging is False
|
||||
assert isinstance(client._client, httpx.AsyncClient)
|
||||
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_context_manager(self, api_key):
|
||||
"""Test using async client as context manager."""
|
||||
async with AsyncFastGPTClient(api_key) as client:
|
||||
assert client.api_key == api_key
|
||||
assert not client._client.is_closed
|
||||
|
||||
# Client should be closed after exiting context
|
||||
assert client._client.is_closed
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_context_manager_with_exception(self, api_key):
|
||||
"""Test async context manager properly closes on exception."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
try:
|
||||
async with client:
|
||||
raise ValueError("Test exception")
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Client should still be closed even with exception
|
||||
assert client._client.is_closed
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_close(self, api_key):
|
||||
"""Test closing the async client."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
assert not client._client.is_closed
|
||||
|
||||
await client.close()
|
||||
assert client._client.is_closed
|
||||
|
||||
|
||||
class TestAsyncFastGPTClientSendRequest:
|
||||
"""Test suite for AsyncFastGPTClient._send_request method."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_request_get_success(self, api_key, mock_response):
|
||||
"""Test successful async GET request."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
async def mock_request(*args, **kwargs):
|
||||
return mock_response
|
||||
|
||||
with patch.object(client._client, 'request', mock_request):
|
||||
response = await client._send_request("GET", "/api/test")
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_request_post_success(self, api_key, mock_response):
|
||||
"""Test successful async POST request with JSON body."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
async def mock_request(*args, **kwargs):
|
||||
return mock_response
|
||||
|
||||
with patch.object(client._client, 'request', mock_request) as mock_req:
|
||||
response = await client._send_request(
|
||||
"POST",
|
||||
"/api/test",
|
||||
json={"key": "value"}
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_request_with_params(self, api_key, mock_response):
|
||||
"""Test async request with query parameters."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
async def mock_request(*args, **kwargs):
|
||||
return mock_response
|
||||
|
||||
with patch.object(client._client, 'request', mock_request):
|
||||
response = await client._send_request(
|
||||
"GET",
|
||||
"/api/test",
|
||||
params={"page": 1, "limit": 10}
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_request_with_streaming(self, api_key, mock_stream_response):
|
||||
"""Test async streaming request."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
mock_stream_context = AsyncContextManagerMock(mock_stream_response)
|
||||
|
||||
# stream() is not async, it returns an async context manager
|
||||
def mock_stream(*args, **kwargs):
|
||||
return mock_stream_context
|
||||
|
||||
with patch.object(client._client, 'stream', mock_stream):
|
||||
response = await client._send_request(
|
||||
"POST",
|
||||
"/api/test/stream",
|
||||
stream=True
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
mock_stream_context.__aenter__.assert_called_once()
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_request_stream_cleanup(self, api_key):
|
||||
"""Test that async streaming response cleanup works correctly."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
# Track if close was called on the response
|
||||
original_close_called = []
|
||||
|
||||
async def original_close():
|
||||
original_close_called.append(True)
|
||||
|
||||
mock_response.close = original_close
|
||||
|
||||
mock_stream_context = AsyncContextManagerMock(mock_response)
|
||||
|
||||
# stream() is not async, it returns an async context manager
|
||||
def mock_stream(*args, **kwargs):
|
||||
return mock_stream_context
|
||||
|
||||
with patch.object(client._client, 'stream', mock_stream):
|
||||
response = await client._send_request("POST", "/api/stream", stream=True)
|
||||
await response.close()
|
||||
|
||||
# Verify stream context exit was called
|
||||
mock_stream_context.__aexit__.assert_called_once_with(None, None, None)
|
||||
# Verify the original close was called
|
||||
assert len(original_close_called) == 1
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_request_authentication_error(self, api_key, error_responses):
|
||||
"""Test handling of 401 authentication error."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 401
|
||||
mock_response.json = Mock(return_value=error_responses['authentication_error'])
|
||||
|
||||
async def mock_request(*args, **kwargs):
|
||||
return mock_response
|
||||
|
||||
with patch.object(client._client, 'request', mock_request):
|
||||
with pytest.raises(AuthenticationError) as exc_info:
|
||||
await client._send_request("GET", "/api/test")
|
||||
|
||||
assert exc_info.value.status_code == 401
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_request_rate_limit_error(self, api_key, error_responses):
|
||||
"""Test handling of 429 rate limit error."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 429
|
||||
mock_response.headers = {"Retry-After": "60"}
|
||||
mock_response.json = Mock(return_value=error_responses['rate_limit_error'])
|
||||
|
||||
async def mock_request(*args, **kwargs):
|
||||
return mock_response
|
||||
|
||||
with patch.object(client._client, 'request', mock_request):
|
||||
with pytest.raises(RateLimitError) as exc_info:
|
||||
await client._send_request("GET", "/api/test")
|
||||
|
||||
assert exc_info.value.status_code == 429
|
||||
assert exc_info.value.retry_after == "60"
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_request_validation_error(self, api_key, error_responses):
|
||||
"""Test handling of 422 validation error."""
|
||||
client = AsyncFastGPTClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 422
|
||||
mock_response.json = Mock(return_value=error_responses['validation_error'])
|
||||
|
||||
async def mock_request(*args, **kwargs):
|
||||
return mock_response
|
||||
|
||||
with patch.object(client._client, 'request', mock_request):
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
await client._send_request("GET", "/api/test")
|
||||
|
||||
assert exc_info.value.status_code == 422
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_request_retry_on_server_error(self, api_key, mock_response):
|
||||
"""Test that request is retried on 5xx errors."""
|
||||
client = AsyncFastGPTClient(api_key, max_retries=2, retry_delay=0.01)
|
||||
|
||||
error_response = Mock(spec=httpx.Response)
|
||||
error_response.status_code = 503
|
||||
|
||||
call_count = [0]
|
||||
|
||||
async def mock_request(*args, **kwargs):
|
||||
call_count[0] += 1
|
||||
if call_count[0] == 1:
|
||||
return error_response
|
||||
return mock_response
|
||||
|
||||
with patch.object(client._client, 'request', mock_request):
|
||||
response = await client._send_request("GET", "/api/test")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert call_count[0] == 2
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_retry_request_exponential_backoff(self, api_key, mock_response):
|
||||
"""Test exponential backoff in async retry logic."""
|
||||
client = AsyncFastGPTClient(api_key, max_retries=4, retry_delay=0.05)
|
||||
|
||||
error_response = Mock(spec=httpx.Response)
|
||||
error_response.status_code = 503
|
||||
|
||||
call_count = [0]
|
||||
|
||||
async def mock_request(*args, **kwargs):
|
||||
call_count[0] += 1
|
||||
if call_count[0] < 4:
|
||||
return error_response
|
||||
return mock_response
|
||||
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
with patch.object(client._client, 'request', mock_request):
|
||||
response = await client._send_request("GET", "/api/test")
|
||||
|
||||
elapsed_time = time.time() - start_time
|
||||
|
||||
assert response.status_code == 200
|
||||
assert call_count[0] == 4
|
||||
# Should have exponential backoff delays
|
||||
assert elapsed_time >= 0.25
|
||||
await client.close()
|
||||
|
||||
|
||||
class TestAsyncChatClient:
|
||||
"""Test suite for AsyncChatClient."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_chat_completion_basic(self, api_key, sample_chat_response):
|
||||
"""Test basic async chat completion creation."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
mock_response.json = Mock(return_value=sample_chat_response)
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
stream=False
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_chat_completion_with_chat_id(self, api_key):
|
||||
"""Test async chat completion with chatId parameter."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)) as mock_send:
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
chatId="chat-123"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert mock_send.call_args[1]['json']['chatId'] == "chat-123"
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_chat_completion_streaming(self, api_key, mock_stream_response):
|
||||
"""Test async streaming chat completion."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_stream_response)):
|
||||
response = await client.create_chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_chat_histories_basic(self, api_key, sample_chat_histories_response):
|
||||
"""Test getting chat histories with basic parameters."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
mock_response.json = Mock(return_value=sample_chat_histories_response)
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.get_chat_histories(appId="app-123")
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_chat_init(self, api_key):
|
||||
"""Test getting chat initialization."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.get_chat_init(appId="app-123", chatId="chat-123")
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_chat_records(self, api_key, sample_chat_records_response):
|
||||
"""Test getting chat records."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
mock_response.json = Mock(return_value=sample_chat_records_response)
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.get_chat_records(appId="app-123", chatId="chat-123")
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_record_detail(self, api_key):
|
||||
"""Test getting record detail."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.get_record_detail(
|
||||
appId="app-123",
|
||||
chatId="chat-123",
|
||||
dataId="data-123"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_chat_history(self, api_key):
|
||||
"""Test updating chat history."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.update_chat_history(
|
||||
appId="app-123",
|
||||
chatId="chat-123",
|
||||
customTitle="New Title"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_delete_chat_history(self, api_key):
|
||||
"""Test deleting a chat history."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.delete_chat_history(appId="app-123", chatId="chat-123")
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_clear_chat_histories(self, api_key):
|
||||
"""Test clearing all chat histories."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.clear_chat_histories(appId="app-123")
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_delete_chat_record(self, api_key):
|
||||
"""Test deleting a single chat record."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.delete_chat_record(
|
||||
appId="app-123",
|
||||
chatId="chat-123",
|
||||
contentId="content-123"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_feedback(self, api_key):
|
||||
"""Test sending feedback."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.send_feedback(
|
||||
appId="app-123",
|
||||
chatId="chat-123",
|
||||
dataId="data-123",
|
||||
userGoodFeedback="Great!"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_suggested_questions(self, api_key):
|
||||
"""Test getting suggested questions."""
|
||||
client = AsyncChatClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.get_suggested_questions(
|
||||
appId="app-123",
|
||||
chatId="chat-123"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
|
||||
class TestAsyncAppClient:
|
||||
"""Test suite for AsyncAppClient."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_app_logs_chart_basic(self, api_key, sample_app_logs_response):
|
||||
"""Test getting app logs chart with basic parameters."""
|
||||
client = AsyncAppClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
mock_response.json = Mock(return_value=sample_app_logs_response)
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.get_app_logs_chart(
|
||||
appId="app-123",
|
||||
dateStart="2024-01-01",
|
||||
dateEnd="2024-01-31"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_app_logs_chart_all_parameters(self, api_key):
|
||||
"""Test getting app logs chart with all parameters."""
|
||||
client = AsyncAppClient(api_key)
|
||||
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(client, '_send_request', AsyncMock(return_value=mock_response)):
|
||||
response = await client.get_app_logs_chart(
|
||||
appId="app-123",
|
||||
dateStart="2024-01-01",
|
||||
dateEnd="2024-12-31",
|
||||
offset=10,
|
||||
source=["api", "online"],
|
||||
userTimespan="week",
|
||||
chatTimespan="month",
|
||||
appTimespan="day"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
await client.close()
|
||||
Reference in New Issue
Block a user