APIYI is fully compatible with OpenAI API format. You can directly use the official OpenAI SDK with simple configuration changes to seamlessly switch to APIYI service.Documentation Index
Fetch the complete documentation index at: https://docs.apiyi.com/llms.txt
Use this file to discover all available pages before exploring further.
Supported Official SDKs
APIYI supports all official OpenAI SDKs:- Python (
openai) - Node.js (
openai) - .NET (
OpenAI) - Go (
go-openai) - Java (third-party)
- PHP (third-party)
- Ruby (third-party)
Python SDK
Installation
pip install openai
Basic Configuration
from openai import OpenAI
# Configure APIYI service
client = OpenAI(
api_key="YOUR_API_KEY", # Your APIYI key
base_url="https://api.apiyi.com/v1" # APIYI endpoint
)
# Usage is exactly the same as official
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Hello!"}
]
)
print(response.choices[0].message.content)
Environment Variable Configuration
import os
from openai import OpenAI
# Set environment variables
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
os.environ["OPENAI_BASE_URL"] = "https://api.apiyi.com/v1"
# Use default configuration
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Explain quantum computing"}]
)
Async Usage
import asyncio
from openai import AsyncOpenAI
async def main():
client = AsyncOpenAI(
api_key="YOUR_API_KEY",
base_url="https://api.apiyi.com/v1"
)
response = await client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
asyncio.run(main())
Streaming Output
from openai import OpenAI
client = OpenAI(
api_key="YOUR_API_KEY",
base_url="https://api.apiyi.com/v1"
)
stream = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Write a short story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
Node.js SDK
Installation
npm install openai
Basic Configuration
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: 'YOUR_API_KEY',
baseURL: 'https://api.apiyi.com/v1'
});
const response = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Hello!' }]
});
console.log(response.choices[0].message.content);
Environment Variable Configuration
// Set environment variables
process.env.OPENAI_API_KEY = 'YOUR_API_KEY';
process.env.OPENAI_BASE_URL = 'https://api.apiyi.com/v1';
import OpenAI from 'openai';
// Use default configuration
const openai = new OpenAI();
const response = await openai.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Explain AI to a 5-year-old' }]
});
Streaming Output
const stream = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Tell me a joke' }],
stream: true
});
for await (const chunk of stream) {
if (chunk.choices[0]?.delta?.content) {
process.stdout.write(chunk.choices[0].delta.content);
}
}
TypeScript Support
import OpenAI from 'openai';
import type { ChatCompletionCreateParamsNonStreaming } from 'openai/resources/chat/completions';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY!,
baseURL: 'https://api.apiyi.com/v1'
});
const params: ChatCompletionCreateParamsNonStreaming = {
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Hello TypeScript!' }],
temperature: 0.7
};
const response = await openai.chat.completions.create(params);
.NET SDK
Installation
dotnet add package OpenAI
Basic Configuration
using OpenAI;
using OpenAI.Chat;
var client = new OpenAIClient("YOUR_API_KEY", new OpenAIClientOptions
{
Endpoint = new Uri("https://api.apiyi.com/v1")
});
var chatClient = client.GetChatClient("gpt-3.5-turbo");
var response = await chatClient.CompleteChatAsync("Hello!");
Console.WriteLine(response.Value.Content[0].Text);
Streaming Output
await foreach (var update in chatClient.CompleteChatStreamingAsync("Tell me a story"))
{
if (update.ContentUpdate.Count > 0)
{
Console.Write(update.ContentUpdate[0].Text);
}
}
Go SDK
Installation
go get github.com/sashabaranov/go-openai
Basic Configuration
package main
import (
"context"
"fmt"
"github.com/sashabaranov/go-openai"
)
func main() {
config := openai.DefaultConfig("YOUR_API_KEY")
config.BaseURL = "https://api.apiyi.com/v1"
client := openai.NewClientWithConfig(config)
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
},
)
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
Streaming Output
stream, err := client.CreateChatCompletionStream(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Write a haiku",
},
},
Stream: true,
},
)
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
defer stream.Close()
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Print(response.Choices[0].Delta.Content)
}
Java SDK
Dependency Configuration
<dependency>
<groupId>com.theokanning.openai-gpt3-java</groupId>
<artifactId>service</artifactId>
<version>0.18.2</version>
</dependency>
Basic Usage
import com.theokanning.openai.OpenAiService;
import com.theokanning.openai.completion.chat.*;
import java.time.Duration;
import java.util.List;
public class APIYiExample {
public static void main(String[] args) {
OpenAiService service = new OpenAiService(
"YOUR_API_KEY",
Duration.ofSeconds(60),
"https://api.apiyi.com/v1/"
);
ChatCompletionRequest request = ChatCompletionRequest.builder()
.model("gpt-3.5-turbo")
.messages(List.of(
new ChatMessage(ChatMessageRole.USER, "Hello!")
))
.build();
ChatCompletionResult result = service.createChatCompletion(request);
System.out.println(result.getChoices().get(0).getMessage().getContent());
}
}
Model Switching
Using Different Models
# GPT models
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello"}]
)
# Claude models
response = client.chat.completions.create(
model="claude-3-opus-20240229",
messages=[{"role": "user", "content": "Hello"}]
)
# Gemini models
response = client.chat.completions.create(
model="gemini-pro",
messages=[{"role": "user", "content": "Hello"}]
)
Dynamic Model Selection
def chat_with_model(message: str, model: str = "gpt-3.5-turbo"):
"""Chat function supporting dynamic model switching"""
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": message}]
)
return response.choices[0].message.content
# Use different models
print(chat_with_model("Explain quantum computing", "gpt-4"))
print(chat_with_model("Explain quantum computing", "claude-3-opus-20240229"))
print(chat_with_model("Explain quantum computing", "gemini-pro"))
Advanced Features
Function Calling
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather information for a specified city",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
}
},
"required": ["location"]
}
}
}
]
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "What's the weather in Beijing?"}],
tools=tools,
tool_choice="auto"
)
if response.choices[0].message.tool_calls:
print("AI wants to call function:", response.choices[0].message.tool_calls[0].function.name)
Image Input
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://example.com/image.jpg"
}
}
]
}
]
)
Embeddings
response = client.embeddings.create(
model="text-embedding-3-small",
input="Text content to embed"
)
embedding = response.data[0].embedding
print(f"Vector dimension: {len(embedding)}")
Error Handling
Basic Error Handling
from openai import OpenAI, OpenAIError
try:
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello"}]
)
except OpenAIError as e:
print(f"API error: {e}")
Detailed Error Handling
from openai import (
OpenAI,
APIError,
APIConnectionError,
RateLimitError,
InternalServerError
)
try:
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello"}]
)
except RateLimitError:
print("Rate limit exceeded, please retry later")
except APIConnectionError:
print("Network connection error")
except InternalServerError:
print("Internal server error")
except APIError as e:
print(f"API error: {e}")
Best Practices
1. Configuration Management
import os
from openai import OpenAI
class APIYiClient:
def __init__(self):
self.client = OpenAI(
api_key=os.getenv("APIYI_API_KEY"),
base_url=os.getenv("APIYI_BASE_URL", "https://api.apiyi.com/v1")
)
def chat(self, message: str, model: str = "gpt-3.5-turbo"):
return self.client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": message}]
)
2. Retry Mechanism
import time
import random
from openai import OpenAI, RateLimitError
def chat_with_retry(client, messages, max_retries=3):
for attempt in range(max_retries):
try:
return client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
except RateLimitError:
if attempt < max_retries - 1:
wait_time = (2 ** attempt) + random.uniform(0, 1)
time.sleep(wait_time)
else:
raise
3. Cost Control
def controlled_chat(message: str, max_tokens: int = 150):
"""Control output length to manage costs"""
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": message}],
max_tokens=max_tokens,
temperature=0.7
)
return response
Migration Guide
Migrating from OpenAI
If you’re already using OpenAI’s official service, migrating to APIYI is very simple:- Change base_url
# Original configuration
client = OpenAI(api_key="sk-...")
# Change to APIYI
client = OpenAI(
api_key="YOUR_APIYI_KEY",
base_url="https://api.apiyi.com/v1"
)
- Update Environment Variables
# Original
export OPENAI_API_KEY="sk-..."
# Change to
export OPENAI_API_KEY="YOUR_APIYI_KEY"
export OPENAI_BASE_URL="https://api.apiyi.com/v1"
- No Code Changes Needed All other code remains unchanged, including:
- Method calls
- Parameter formats
- Response handling
Multi-Provider Compatibility
class MultiProviderClient:
def __init__(self):
self.apiyi_client = OpenAI(
api_key="APIYI_KEY",
base_url="https://api.apiyi.com/v1"
)
self.openai_client = OpenAI(api_key="OPENAI_KEY")
def chat(self, message: str, provider: str = "apiyi"):
client = self.apiyi_client if provider == "apiyi" else self.openai_client
return client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": message}]
)