OpenAI开发的生成式预训练Transformer模型家族,从GPT-1到GPT-4o及o系列推理模型的演进历程
# 使用GPT-4o进行多模态理解 from openai import OpenAI import base64 client = OpenAI() # 文本+图像输入 def analyze_multimodal(text_prompt, image_path): with open(image_path, "rb") as image_file: base64_image = base64.b64encode(image_file.read()).decode('utf-8') response = client.chat.completions.create( model="gpt-4o", messages=[ { "role": "user", "content": [ {"type": "text", "text": text_prompt}, { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{base64_image}" } } ] } ], max_tokens=1000 ) return response.choices[0].message.content # 使用o3进行复杂推理 def complex_reasoning(problem): response = client.chat.completions.create( model="o3", messages=[ {"role": "system", "content": "你是一个专业的问题求解器,请展示完整的推理过程"}, {"role": "user", "content": problem} ], temperature=0.1, # 低温度提高推理准确性 max_thinking_tokens=50000 # o系列特有:思考token ) return response.choices[0].message.content # 流式输出 (GPT-4 Turbo) def stream_response(prompt): stream = client.chat.completions.create( model="gpt-4-turbo", messages=[{"role": "user", "content": prompt}], stream=True ) for chunk in stream: if chunk.choices[0].delta.content is not None: print(chunk.choices[0].delta.content, end="")