Skip to content

Commit 575fc45

Browse files
committed
update on pull request openai#494
1 parent 0eee6b8 commit 575fc45

File tree

6 files changed

+511
-17
lines changed

6 files changed

+511
-17
lines changed
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
"""
2+
Examples demonstrating how to use models that provide reasoning content.
3+
"""

examples/reasoning_content/main.py

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
"""
2+
Example demonstrating how to use the reasoning content feature with models that support it.
3+
4+
Some models, like deepseek-reasoner, provide a reasoning_content field in addition to the regular content.
5+
This example shows how to access and use this reasoning content from both streaming and non-streaming responses.
6+
7+
To run this example, you need to:
8+
1. Set your OPENAI_API_KEY environment variable
9+
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
10+
"""
11+
12+
import os
13+
import asyncio
14+
15+
from agents.models.openai_provider import OpenAIProvider
16+
from agents import ModelSettings
17+
from agents.items import ReasoningItem
18+
19+
# Replace this with a model that supports reasoning content (e.g., deepseek-reasoner)
20+
# For demonstration purposes, we'll use a placeholder model name
21+
MODEL_NAME = "deepseek-reasoner"
22+
23+
async def stream_with_reasoning_content():
24+
"""
25+
Example of streaming a response from a model that provides reasoning content.
26+
The reasoning content will be emitted as separate events.
27+
"""
28+
provider = OpenAIProvider()
29+
model = provider.get_model(MODEL_NAME)
30+
31+
print("\n=== Streaming Example ===")
32+
print("Prompt: Write a haiku about recursion in programming")
33+
34+
reasoning_content = ""
35+
regular_content = ""
36+
37+
async for event in model.stream_response(
38+
system_instructions="You are a helpful assistant that writes creative content.",
39+
input="Write a haiku about recursion in programming",
40+
model_settings=ModelSettings(),
41+
tools=[],
42+
output_schema=None,
43+
handoffs=[],
44+
tracing=None,
45+
previous_response_id=None,
46+
):
47+
if event.type == "response.reasoning_summary_text.delta":
48+
print(f"\033[33m{event.delta}\033[0m", end="", flush=True) # Yellow for reasoning content
49+
reasoning_content += event.delta
50+
elif event.type == "response.output_text.delta":
51+
print(f"\033[32m{event.delta}\033[0m", end="", flush=True) # Green for regular content
52+
regular_content += event.delta
53+
54+
print("\n\nReasoning Content:")
55+
print(reasoning_content)
56+
print("\nRegular Content:")
57+
print(regular_content)
58+
print("\n")
59+
60+
async def get_response_with_reasoning_content():
61+
"""
62+
Example of getting a complete response from a model that provides reasoning content.
63+
The reasoning content will be available as a separate item in the response.
64+
"""
65+
provider = OpenAIProvider()
66+
model = provider.get_model(MODEL_NAME)
67+
68+
print("\n=== Non-streaming Example ===")
69+
print("Prompt: Explain the concept of recursion in programming")
70+
71+
response = await model.get_response(
72+
system_instructions="You are a helpful assistant that explains technical concepts clearly.",
73+
input="Explain the concept of recursion in programming",
74+
model_settings=ModelSettings(),
75+
tools=[],
76+
output_schema=None,
77+
handoffs=[],
78+
tracing=None,
79+
previous_response_id=None,
80+
)
81+
82+
# Extract reasoning content and regular content from the response
83+
reasoning_content = None
84+
regular_content = None
85+
86+
for item in response.output:
87+
if hasattr(item, "type") and item.type == "reasoning_item":
88+
reasoning_content = item.content
89+
elif hasattr(item, "type") and item.type == "message":
90+
if item.content and len(item.content) > 0:
91+
regular_content = item.content[0].text
92+
93+
print("\nReasoning Content:")
94+
print(reasoning_content or "No reasoning content provided")
95+
96+
print("\nRegular Content:")
97+
print(regular_content or "No regular content provided")
98+
99+
print("\n")
100+
101+
async def main():
102+
try:
103+
await stream_with_reasoning_content()
104+
await get_response_with_reasoning_content()
105+
except Exception as e:
106+
print(f"Error: {e}")
107+
print("\nNote: This example requires a model that supports reasoning content.")
108+
print("You may need to use a specific model like deepseek-reasoner or similar.")
109+
110+
if __name__ == "__main__":
111+
asyncio.run(main())
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
"""
2+
Example demonstrating how to use the reasoning content feature with the Runner API.
3+
4+
This example shows how to extract and use reasoning content from responses when using
5+
the Runner API, which is the most common way users interact with the Agents library.
6+
7+
To run this example, you need to:
8+
1. Set your OPENAI_API_KEY environment variable
9+
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
10+
"""
11+
12+
import os
13+
import asyncio
14+
15+
from agents import Agent, Runner, ModelSettings, trace
16+
from agents.items import ReasoningItem
17+
18+
# Replace this with a model that supports reasoning content (e.g., deepseek-reasoner)
19+
# For demonstration purposes, we'll use a placeholder model name
20+
MODEL_NAME = "deepseek-reasoner"
21+
22+
async def main():
23+
print(f"Using model: {MODEL_NAME}")
24+
25+
# Create an agent with a model that supports reasoning content
26+
agent = Agent(
27+
name="Reasoning Agent",
28+
instructions="You are a helpful assistant that explains your reasoning step by step.",
29+
model=MODEL_NAME,
30+
)
31+
32+
# Example 1: Non-streaming response
33+
with trace("Reasoning Content - Non-streaming"):
34+
print("\n=== Example 1: Non-streaming response ===")
35+
result = await Runner.run(
36+
agent,
37+
"What is the square root of 841? Please explain your reasoning."
38+
)
39+
40+
# Extract reasoning content from the result items
41+
reasoning_content = None
42+
for item in result.items:
43+
if isinstance(item, ReasoningItem):
44+
reasoning_content = item.raw_item.content
45+
break
46+
47+
print("\nReasoning Content:")
48+
print(reasoning_content or "No reasoning content provided")
49+
50+
print("\nFinal Output:")
51+
print(result.final_output)
52+
53+
# Example 2: Streaming response
54+
with trace("Reasoning Content - Streaming"):
55+
print("\n=== Example 2: Streaming response ===")
56+
print("\nStreaming response:")
57+
58+
# Buffers to collect reasoning and regular content
59+
reasoning_buffer = ""
60+
content_buffer = ""
61+
62+
async for event in Runner.run_streamed(
63+
agent,
64+
"What is 15 × 27? Please explain your reasoning."
65+
):
66+
if isinstance(event, ReasoningItem):
67+
# This is reasoning content
68+
reasoning_buffer += event.raw_item.content
69+
print(f"\033[33m{event.raw_item.content}\033[0m", end="", flush=True) # Yellow for reasoning
70+
elif hasattr(event, "text"):
71+
# This is regular content
72+
content_buffer += event.text
73+
print(f"\033[32m{event.text}\033[0m", end="", flush=True) # Green for regular content
74+
75+
print("\n\nCollected Reasoning Content:")
76+
print(reasoning_buffer)
77+
78+
print("\nCollected Final Answer:")
79+
print(content_buffer)
80+
81+
if __name__ == "__main__":
82+
asyncio.run(main())

src/agents/models/chatcmpl_converter.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
ResponseOutputMessageParam,
3434
ResponseOutputRefusal,
3535
ResponseOutputText,
36+
ResponseReasoningItem,
3637
)
3738
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
3839

@@ -84,7 +85,16 @@ def convert_response_format(
8485
@classmethod
8586
def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
8687
items: list[TResponseOutputItem] = []
87-
88+
89+
# Handle reasoning content if available
90+
if hasattr(message, "reasoning_content") and message.reasoning_content:
91+
items.append(
92+
ResponseReasoningItem(
93+
content=message.reasoning_content,
94+
type="reasoning_item",
95+
)
96+
)
97+
8898
message_item = ResponseOutputMessage(
8999
id=FAKE_RESPONSES_ID,
90100
content=[],

0 commit comments

Comments
 (0)