Skip to content

Commit bbf5137

Browse files
joslatRogerBarreto
authored andcommitted
.Net: Sample Code Showcasing Usage of Reasoning Models in OpenAI and AzureOpenAI (microsoft#10558)
### Motivation and Context 1. Required: showing usage of reasoning effort. 2. Problem: Controlling reasoning effort - no sample 3. Scenario: Using reasoning effort to benefit from the new amazing models :) ### Description This pull request adds sample code that demonstrates how to leverage reasoning models in a ChatCompletion on AzureOpenAI and OpenAI. This implementation how to leverage LLM Reasoning capabilities but also complements the phenomenal code from Roger Barreto (@RogerBarreto), further strengthening Semantic Kernel overall solution. ### Contribution Checklist <!-- Before submitting this PR, please make sure: --> - [ ] The code builds clean without any errors or warnings - [ ] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [ ] All unit tests pass, and I have added new tests where possible - [ ] I didn't break anyone 😄 --------- Co-authored-by: Roger Barreto <[email protected]>
1 parent 093e551 commit bbf5137

File tree

4 files changed

+216
-39
lines changed

4 files changed

+216
-39
lines changed

dotnet/samples/Concepts/ChatCompletion/AzureOpenAI_ChatCompletion.cs

+20-39
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,14 @@ namespace ChatCompletion;
1111
// The following example shows how to use Semantic Kernel with Azure OpenAI API
1212
public class AzureOpenAI_ChatCompletion(ITestOutputHelper output) : BaseTest(output)
1313
{
14+
/// <summary>
15+
/// Sample showing how to use <see cref="Kernel"/> with chat completion and chat prompt syntax.
16+
/// </summary>
1417
[Fact]
1518
public async Task ChatPromptAsync()
1619
{
20+
Console.WriteLine("======== Azure Open AI - Chat Completion ========");
21+
1722
Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName);
1823
Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint);
1924

@@ -39,8 +44,8 @@ public async Task ChatPromptAsync()
3944
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
4045
modelId: TestConfiguration.AzureOpenAI.ChatModelId);
4146
}
42-
var kernel = kernelBuilder.Build();
4347

48+
var kernel = kernelBuilder.Build();
4449
var reply = await kernel.InvokePromptAsync(chatPrompt.ToString());
4550

4651
chatPrompt.AppendLine($"<message role=\"assistant\"><![CDATA[{reply}]]></message>");
@@ -51,54 +56,30 @@ public async Task ChatPromptAsync()
5156
Console.WriteLine(reply);
5257
}
5358

59+
/// <summary>
60+
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/>.
61+
/// </summary>
5462
[Fact]
5563
public async Task ServicePromptAsync()
5664
{
5765
Console.WriteLine("======== Azure Open AI - Chat Completion ========");
5866

67+
Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName);
68+
Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint);
69+
5970
AzureOpenAIChatCompletionService chatCompletionService =
60-
string.IsNullOrEmpty(TestConfiguration.AzureOpenAI.ApiKey) ?
61-
new(
71+
string.IsNullOrEmpty(TestConfiguration.AzureOpenAI.ApiKey)
72+
? new(
6273
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
6374
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
6475
credentials: new DefaultAzureCredential(),
65-
modelId: TestConfiguration.AzureOpenAI.ChatModelId) :
66-
new(
67-
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
68-
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
69-
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
70-
modelId: TestConfiguration.AzureOpenAI.ChatModelId);
71-
72-
await StartChatAsync(chatCompletionService);
73-
}
74-
75-
/// <summary>
76-
/// Sample showing how to use Azure Open AI Chat Completion with Azure Default Credential.
77-
/// If local auth is disabled in the Azure Open AI deployment, you can use Azure Default Credential to authenticate.
78-
/// </summary>
79-
[Fact]
80-
public async Task DefaultAzureCredentialSampleAsync()
81-
{
82-
Console.WriteLine("======== Azure Open AI - Chat Completion with Azure Default Credential ========");
83-
84-
AzureOpenAIChatCompletionService chatCompletionService =
85-
string.IsNullOrEmpty(TestConfiguration.AzureOpenAI.ApiKey) ?
86-
new(
76+
modelId: TestConfiguration.AzureOpenAI.ChatModelId)
77+
: new(
8778
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
8879
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
89-
credentials: new DefaultAzureCredential(),
90-
modelId: TestConfiguration.AzureOpenAI.ChatModelId) :
91-
new(
92-
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
93-
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
94-
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
95-
modelId: TestConfiguration.AzureOpenAI.ChatModelId);
96-
97-
await StartChatAsync(chatCompletionService);
98-
}
80+
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
81+
modelId: TestConfiguration.AzureOpenAI.ChatModelId);
9982

100-
private async Task StartChatAsync(IChatCompletionService chatGPT)
101-
{
10283
Console.WriteLine("Chat content:");
10384
Console.WriteLine("------------------------");
10485

@@ -109,7 +90,7 @@ private async Task StartChatAsync(IChatCompletionService chatGPT)
10990
OutputLastMessage(chatHistory);
11091

11192
// First assistant message
112-
var reply = await chatGPT.GetChatMessageContentAsync(chatHistory);
93+
var reply = await chatCompletionService.GetChatMessageContentAsync(chatHistory);
11394
chatHistory.Add(reply);
11495
OutputLastMessage(chatHistory);
11596

@@ -118,7 +99,7 @@ private async Task StartChatAsync(IChatCompletionService chatGPT)
11899
OutputLastMessage(chatHistory);
119100

120101
// Second assistant message
121-
reply = await chatGPT.GetChatMessageContentAsync(chatHistory);
102+
reply = await chatCompletionService.GetChatMessageContentAsync(chatHistory);
122103
chatHistory.Add(reply);
123104
OutputLastMessage(chatHistory);
124105
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
// Copyright (c) Microsoft. All rights reserved.
2+
3+
using System.Text;
4+
using Microsoft.SemanticKernel;
5+
using Microsoft.SemanticKernel.ChatCompletion;
6+
using Microsoft.SemanticKernel.Connectors.AzureOpenAI;
7+
using OpenAI.Chat;
8+
9+
namespace ChatCompletion;
10+
11+
// The following example shows how to use Semantic Kernel with Azure OpenAI API
12+
public class AzureOpenAI_ChatCompletion_WithReasoning(ITestOutputHelper output) : BaseTest(output)
13+
{
14+
/// <summary>
15+
/// Sample showing how to use <see cref="Kernel"/> with chat completion and chat prompt syntax.
16+
/// </summary>
17+
[Fact]
18+
public async Task ChatPromptWithReasoningAsync()
19+
{
20+
Console.WriteLine("======== Azure Open AI - Chat Completion with Reasoning ========");
21+
22+
Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName);
23+
Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint);
24+
Assert.NotNull(TestConfiguration.AzureOpenAI.ApiKey);
25+
26+
var kernel = Kernel.CreateBuilder()
27+
.AddAzureOpenAIChatCompletion(
28+
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
29+
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
30+
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
31+
modelId: TestConfiguration.AzureOpenAI.ChatModelId)
32+
.Build();
33+
34+
// Create execution settings with high reasoning effort.
35+
var executionSettings = new AzureOpenAIPromptExecutionSettings //OpenAIPromptExecutionSettings
36+
{
37+
// Flags Azure SDK to use the new token property.
38+
SetNewMaxCompletionTokensEnabled = true,
39+
MaxTokens = 2000,
40+
// Note: reasoning effort is only available for reasoning models (at this moment o3-mini & o1 models)
41+
ReasoningEffort = ChatReasoningEffortLevel.Low
42+
};
43+
44+
// Create KernelArguments using the execution settings.
45+
var kernelArgs = new KernelArguments(executionSettings);
46+
47+
StringBuilder chatPrompt = new("""
48+
<message role="developer">You are an expert software engineer, specialized in the Semantic Kernel SDK and NET framework</message>
49+
<message role="user">Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop .</message>
50+
""");
51+
52+
// Invoke the prompt with high reasoning effort.
53+
var reply = await kernel.InvokePromptAsync(chatPrompt.ToString(), kernelArgs);
54+
55+
Console.WriteLine(reply);
56+
}
57+
58+
/// <summary>
59+
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/>.
60+
/// </summary>
61+
[Fact]
62+
public async Task ServicePromptWithReasoningAsync()
63+
{
64+
Console.WriteLine("======== Azure Open AI - Chat Completion with Azure Default Credential with Reasoning ========");
65+
66+
Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName);
67+
Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint);
68+
Assert.NotNull(TestConfiguration.AzureOpenAI.ApiKey);
69+
70+
IChatCompletionService chatCompletionService = new AzureOpenAIChatCompletionService(
71+
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
72+
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
73+
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
74+
modelId: TestConfiguration.AzureOpenAI.ChatModelId);
75+
76+
// Create execution settings with high reasoning effort.
77+
var executionSettings = new AzureOpenAIPromptExecutionSettings
78+
{
79+
// Flags Azure SDK to use the new token property.
80+
SetNewMaxCompletionTokensEnabled = true,
81+
MaxTokens = 2000,
82+
// Note: reasoning effort is only available for reasoning models (at this moment o3-mini & o1 models)
83+
ReasoningEffort = ChatReasoningEffortLevel.Low
84+
};
85+
86+
// Create a ChatHistory and add messages.
87+
var chatHistory = new ChatHistory();
88+
chatHistory.AddDeveloperMessage(
89+
"You are an expert software engineer, specialized in the Semantic Kernel SDK and .NET framework.");
90+
chatHistory.AddUserMessage(
91+
"Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop.");
92+
93+
// Instead of a prompt string, call GetChatMessageContentAsync with the chat history.
94+
var reply = await chatCompletionService.GetChatMessageContentAsync(
95+
chatHistory: chatHistory,
96+
executionSettings: executionSettings);
97+
98+
Console.WriteLine(reply);
99+
}
100+
}

dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs

+10
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@ namespace ChatCompletion;
1010
// The following example shows how to use Semantic Kernel with OpenAI API
1111
public class OpenAI_ChatCompletion(ITestOutputHelper output) : BaseTest(output)
1212
{
13+
/// <summary>
14+
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/>.
15+
/// </summary>
1316
[Fact]
1417
public async Task ServicePromptAsync()
1518
{
@@ -23,6 +26,10 @@ public async Task ServicePromptAsync()
2326
await StartChatAsync(chatCompletionService);
2427
}
2528

29+
/// <summary>
30+
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/> also exploring the
31+
/// breaking glass approach capturing the underlying <see cref="OpenAI.Chat.ChatCompletion"/> instance via <see cref="KernelContent.InnerContent"/>.
32+
/// </summary>
2633
[Fact]
2734
public async Task ServicePromptWithInnerContentAsync()
2835
{
@@ -51,6 +58,9 @@ public async Task ServicePromptWithInnerContentAsync()
5158
OutputInnerContent(replyInnerContent!);
5259
}
5360

61+
/// <summary>
62+
/// Sample showing how to use <see cref="Kernel"/> with chat completion and chat prompt syntax.
63+
/// </summary>
5464
[Fact]
5565
public async Task ChatPromptAsync()
5666
{
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
// Copyright (c) Microsoft. All rights reserved.
2+
3+
using System.Text;
4+
using Microsoft.SemanticKernel;
5+
using Microsoft.SemanticKernel.ChatCompletion;
6+
using Microsoft.SemanticKernel.Connectors.OpenAI;
7+
using OpenAI.Chat;
8+
9+
namespace ChatCompletion;
10+
11+
// The following example shows how to use Semantic Kernel with OpenAI API
12+
public class OpenAI_ChatCompletion_WithReasoning(ITestOutputHelper output) : BaseTest(output)
13+
{
14+
/// <summary>
15+
/// Sample showing how to use <see cref="Kernel"/> with chat completion and chat prompt syntax.
16+
/// </summary>
17+
[Fact]
18+
public async Task ChatPromptWithReasoningAsync()
19+
{
20+
Console.WriteLine("======== Open AI - Chat Completion with Reasoning ========");
21+
22+
Assert.NotNull(TestConfiguration.OpenAI.ChatModelId);
23+
Assert.NotNull(TestConfiguration.OpenAI.ApiKey);
24+
25+
var kernel = Kernel.CreateBuilder()
26+
.AddOpenAIChatCompletion(
27+
modelId: TestConfiguration.OpenAI.ChatModelId,
28+
apiKey: TestConfiguration.OpenAI.ApiKey)
29+
.Build();
30+
31+
// Create execution settings with low reasoning effort.
32+
var executionSettings = new OpenAIPromptExecutionSettings //OpenAIPromptExecutionSettings
33+
{
34+
MaxTokens = 2000,
35+
ReasoningEffort = ChatReasoningEffortLevel.Low // Only available for reasoning models (i.e: o3-mini, o1, ...)
36+
};
37+
38+
// Create KernelArguments using the execution settings.
39+
var kernelArgs = new KernelArguments(executionSettings);
40+
41+
StringBuilder chatPrompt = new("""
42+
<message role="developer">You are an expert software engineer, specialized in the Semantic Kernel SDK and NET framework</message>
43+
<message role="user">Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop .</message>
44+
""");
45+
46+
// Invoke the prompt with high reasoning effort.
47+
var reply = await kernel.InvokePromptAsync(chatPrompt.ToString(), kernelArgs);
48+
49+
Console.WriteLine(reply);
50+
}
51+
52+
/// <summary>
53+
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/>.
54+
/// </summary>
55+
[Fact]
56+
public async Task ServicePromptWithReasoningAsync()
57+
{
58+
Assert.NotNull(TestConfiguration.OpenAI.ChatModelId);
59+
Assert.NotNull(TestConfiguration.OpenAI.ApiKey);
60+
61+
Console.WriteLine("======== Open AI - Chat Completion with Reasoning ========");
62+
63+
OpenAIChatCompletionService chatCompletionService = new(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey);
64+
65+
// Create execution settings with low reasoning effort.
66+
var executionSettings = new OpenAIPromptExecutionSettings
67+
{
68+
MaxTokens = 2000,
69+
ReasoningEffort = ChatReasoningEffortLevel.Low // Only available for reasoning models (i.e: o3-mini, o1, ...)
70+
};
71+
72+
// Create a ChatHistory and add messages.
73+
var chatHistory = new ChatHistory();
74+
chatHistory.AddDeveloperMessage(
75+
"You are an expert software engineer, specialized in the Semantic Kernel SDK and .NET framework.");
76+
chatHistory.AddUserMessage(
77+
"Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop.");
78+
79+
// Instead of a prompt string, call GetChatMessageContentAsync with the chat history.
80+
var reply = await chatCompletionService.GetChatMessageContentAsync(
81+
chatHistory: chatHistory,
82+
executionSettings: executionSettings);
83+
84+
Console.WriteLine(reply);
85+
}
86+
}

0 commit comments

Comments
 (0)