curl https://api.apiyi.com/v1/responses \ -H "Content-Type: application/json" \ -H "Authorization: Bearer YOUR_API_KEY" \ -d '{ "model": "gpt-4.1", "input": "Hello! How can you help me today?", "instructions": "You are a helpful assistant." }'
response = client.responses.create( model="gpt-4.1", input="Create a chart showing sales data: Jan:100, Feb:150, Mar:120", instructions="You are a data analyst. Use code interpreter to create visualizations.", tools=[{"type": "code_interpreter"}])
response = client.responses.create( model="gpt-4.1", input="Search for information about quarterly reports", instructions="You are a document analyst.", tools=[{"type": "file_search"}])
# 第一轮对话response1 = client.responses.create( model="gpt-4.1", input="My name is Alice. Please remember this.", instructions="You are a helpful assistant with good memory.")# 第二轮对话 - 使用 previous_response_id 维护上下文response2 = client.responses.create( model="gpt-4.1", input="What's my name?", instructions="You are a helpful assistant with good memory.", previous_response_id=response1.id)print(response2.output[0].content[0].text) # 应该回答 "Alice"
def multi_turn_conversation(): response_id = None for user_input in ["What's 2+2?", "Now multiply that by 3", "And divide by 2"]: response = client.responses.create( model="o3", input=user_input, instructions="You are a math tutor. Show your reasoning.", previous_response_id=response_id, tools=[{"type": "code_interpreter"}] ) print(f"User: {user_input}") print(f"Assistant: {response.output[0].content[0].text}") response_id = response.id # 保持上下文
# 使用 O3 进行复杂推理response = client.responses.create( model="o3", input="Solve this step by step: If a train travels 120km in 2 hours, then speeds up 20% for the next hour, how far did it travel in total?", instructions="Think through this problem step by step, showing all reasoning.")# 查看推理过程reasoning_tokens = response.usage.output_tokens_details.reasoning_tokensprint(f"Reasoning tokens used: {reasoning_tokens}")# 继续对话,推理上下文会保持follow_up = client.responses.create( model="o3", input="Now what if the train slowed down 10% in the fourth hour?", previous_response_id=response.id)
{ "error": { "type": "invalid_request_error", "code": "model_not_supported", "message": "The model 'gpt-3.5-turbo' is not supported for the responses endpoint.", "param": "model" }}
def smart_tool_calling(user_input): # 根据输入智能选择工具 available_tools = [] if "weather" in user_input.lower(): available_tools.append(weather_tool) if "calculate" in user_input.lower(): available_tools.append(calculator_tool) if "search" in user_input.lower(): available_tools.append(search_tool) response = client.responses.create( model="gpt-4.1", input=user_input, instructions="Use the appropriate tools to help the user.", tools=available_tools, tool_choice="auto" ) return response