diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 6198b68a6e5..4726588453b 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -18,14 +18,18 @@
| Xiaoyun Zhang | [LittleLittleCloud](https://github.com/LittleLittleCloud) | Microsoft | AutoGen.Net, group chat | Yes | [Backlog - AutoGen.Net](https://github.com/microsoft/autogen/issues) - Available most of the time (PST) |
| Yiran Wu | [yiranwu0](https://github.com/yiranwu0) | Penn State University | alt-models, group chat, logging | Yes | |
| Beibin Li | [BeibinLi](https://github.com/BeibinLi) | Microsoft Research | alt-models | Yes | |
-| Gagan Bansal | [gagb](https://github.com/gagb) | Microsoft Research | Complex Tasks | | |
+| Gagan Bansal | [gagb](https://github.com/gagb) | Microsoft Research | All | | |
| Adam Fourney | [afourney](https://github.com/afourney) | Microsoft Research | Complex Tasks | | |
| Ricky Loynd | [rickyloynd-microsoft](https://github.com/rickyloynd-microsoft) | Microsoft Research | Teachability | | |
-| Eric Zhu | [ekzhu](https://github.com/ekzhu) | Microsoft Research | Infra | | |
-| Jack Gerrits | [jackgerrits](https://github.com/jackgerrits) | Microsoft Research | Infra | | |
+| Eric Zhu | [ekzhu](https://github.com/ekzhu) | Microsoft Research | All, Infra | | |
+| Jack Gerrits | [jackgerrits](https://github.com/jackgerrits) | Microsoft Research | All, Infra | | |
| David Luong | [DavidLuong98](https://github.com/DavidLuong98) | Microsoft | AutoGen.Net | | |
| Davor Runje | [davorrunje](https://github.com/davorrunje) | airt.ai | Tool calling, IO | | Available most of the time (Central European Time) |
-
+| Friederike Niedtner | [Friderike](https://www.microsoft.com/en-us/research/people/fniedtner/) | Microsoft Research | PM | | |
+| Rafah Hosn | [Rafah](https://www.microsoft.com/en-us/research/people/raaboulh/) | Microsoft Research | PM | | |
+| Robin Moeur | [Robin](https://www.linkedin.com/in/rmoeur/) | Microsoft Research | PM | | |
+| Jingya Chen | [jingyachen](https://github.com/JingyaChen) | Microsoft | UX Design, AutoGen Studio | | |
+| Suff Syed | [suffsyed](https://github.com/suffsyed) | Microsoft | UX Design, AutoGen Studio | | |
## I would like to join this list. How can I help the project?
> We're always looking for new contributors to join our team and help improve the project. For more information, please refer to our [CONTRIBUTING](https://microsoft.github.io/autogen/docs/contributor-guide/contributing) guide.
diff --git a/TRANSPARENCY_FAQS.md b/TRANSPARENCY_FAQS.md
index 206af084748..addf29d8b8d 100644
--- a/TRANSPARENCY_FAQS.md
+++ b/TRANSPARENCY_FAQS.md
@@ -31,6 +31,8 @@ While AutoGen automates LLM workflows, decisions about how to use specific LLM o
- Current version of AutoGen was evaluated on six applications to illustrate its potential in simplifying the development of high-performance multi-agent applications. These applications are selected based on their real-world relevance, problem difficulty and problem solving capabilities enabled by AutoGen, and innovative potential.
- These applications involve using AutoGen to solve math problems, question answering, decision making in text world environments, supply chain optimization, etc. For each of these domains AutoGen was evaluated on various success based metrics (i.e., how often the AutoGen based implementation solved the task). And, in some cases, AutoGen based approach was also evaluated on implementation efficiency (e.g., to track reductions in developer effort to build). More details can be found at: https://aka.ms/AutoGen/TechReport
- The team has conducted tests where a “red” agent attempts to get the default AutoGen assistant to break from its alignment and guardrails. The team has observed that out of 70 attempts to break guardrails, only 1 was successful in producing text that would have been flagged as problematic by Azure OpenAI filters. The team has not observed any evidence that AutoGen (or GPT models as hosted by OpenAI or Azure) can produce novel code exploits or jailbreak prompts, since direct prompts to “be a hacker”, “write exploits”, or “produce a phishing email” are refused by existing filters.
+- We also evaluated [a team of AutoGen agents](https://github.com/microsoft/autogen/tree/gaia_multiagent_v01_march_1st/samples/tools/autogenbench/scenarios/GAIA/Templates/Orchestrator) on the [GAIA benchmarks](https://arxiv.org/abs/2311.12983), and got [SOTA results](https://huggingface.co/spaces/gaia-benchmark/leaderboard) as of
+ March 1, 2024.
## What are the limitations of AutoGen? How can users minimize the impact of AutoGen’s limitations when using the system?
AutoGen relies on existing LLMs. Experimenting with AutoGen would retain common limitations of large language models; including:
diff --git a/dotnet/AutoGen.sln b/dotnet/AutoGen.sln
index db0b2cbb54c..78d18527b62 100644
--- a/dotnet/AutoGen.sln
+++ b/dotnet/AutoGen.sln
@@ -64,7 +64,7 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.Gemini.Sample", "sa
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.AotCompatibility.Tests", "test\AutoGen.AotCompatibility.Tests\AutoGen.AotCompatibility.Tests.csproj", "{6B82F26D-5040-4453-B21B-C8D1F913CE4C}"
EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.OpenAI.V1.Sample", "sample\AutoGen.OpenAI.Sample\AutoGen.OpenAI.V1.Sample.csproj", "{0E635268-351C-4A6B-A28D-593D868C2CA4}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.OpenAI.Sample", "sample\AutoGen.OpenAI.Sample\AutoGen.OpenAI.Sample.csproj", "{0E635268-351C-4A6B-A28D-593D868C2CA4}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.WebAPI.Sample", "sample\AutoGen.WebAPI.Sample\AutoGen.WebAPI.Sample.csproj", "{12079C18-A519-403F-BBFD-200A36A0C083}"
EndProject
@@ -74,6 +74,10 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.AzureAIInference.Te
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.Tests.Share", "test\AutoGen.Test.Share\AutoGen.Tests.Share.csproj", "{143725E2-206C-4D37-93E4-9EDF699826B2}"
EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.OpenAI", "src\AutoGen.OpenAI\AutoGen.OpenAI.csproj", "{3AF1CBEC-2877-41E9-92AE-3A391B2AA9E8}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.OpenAI.Tests", "test\AutoGen.OpenAI.Tests\AutoGen.OpenAI.Tests.csproj", "{42A8251C-E7B3-47BB-A82E-459952EBE132}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -212,6 +216,14 @@ Global
{143725E2-206C-4D37-93E4-9EDF699826B2}.Debug|Any CPU.Build.0 = Debug|Any CPU
{143725E2-206C-4D37-93E4-9EDF699826B2}.Release|Any CPU.ActiveCfg = Release|Any CPU
{143725E2-206C-4D37-93E4-9EDF699826B2}.Release|Any CPU.Build.0 = Release|Any CPU
+ {3AF1CBEC-2877-41E9-92AE-3A391B2AA9E8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {3AF1CBEC-2877-41E9-92AE-3A391B2AA9E8}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {3AF1CBEC-2877-41E9-92AE-3A391B2AA9E8}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {3AF1CBEC-2877-41E9-92AE-3A391B2AA9E8}.Release|Any CPU.Build.0 = Release|Any CPU
+ {42A8251C-E7B3-47BB-A82E-459952EBE132}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {42A8251C-E7B3-47BB-A82E-459952EBE132}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {42A8251C-E7B3-47BB-A82E-459952EBE132}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {42A8251C-E7B3-47BB-A82E-459952EBE132}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
@@ -250,6 +262,8 @@ Global
{5C45981D-1319-4C25-935C-83D411CB28DF} = {18BF8DD7-0585-48BF-8F97-AD333080CE06}
{5970868F-831E-418F-89A9-4EC599563E16} = {F823671B-3ECA-4AE6-86DA-25E920D3FE64}
{143725E2-206C-4D37-93E4-9EDF699826B2} = {F823671B-3ECA-4AE6-86DA-25E920D3FE64}
+ {3AF1CBEC-2877-41E9-92AE-3A391B2AA9E8} = {18BF8DD7-0585-48BF-8F97-AD333080CE06}
+ {42A8251C-E7B3-47BB-A82E-459952EBE132} = {F823671B-3ECA-4AE6-86DA-25E920D3FE64}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {93384647-528D-46C8-922C-8DB36A382F0B}
diff --git a/dotnet/eng/Version.props b/dotnet/eng/Version.props
index d90e8bc76c8..36cfd917c2c 100644
--- a/dotnet/eng/Version.props
+++ b/dotnet/eng/Version.props
@@ -2,8 +2,9 @@
1.0.0-beta.17
- 1.15.1
- 1.15.1-alpha
+ 2.0.0-beta.3
+ 1.18.1-rc
+ 1.18.1-alpha
5.0.0
4.3.0
6.0.0
@@ -16,6 +17,7 @@
3.0.0
4.3.0.2
1.0.0-beta.1
+ 2.0.0-beta.10
7.4.4
\ No newline at end of file
diff --git a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/CreateAnAgent.cs b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/CreateAnAgent.cs
index 45be312cbd5..f6805322466 100644
--- a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/CreateAnAgent.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/CreateAnAgent.cs
@@ -3,8 +3,10 @@
using AutoGen;
using AutoGen.Core;
-using AutoGen.OpenAI.V1;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
using FluentAssertions;
+using OpenAI;
public partial class AssistantCodeSnippet
{
@@ -32,23 +34,18 @@ public void CodeSnippet2()
{
#region code_snippet_2
// get OpenAI Key and create config
- var apiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY");
- string endPoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); // change to your endpoint
+ var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY");
+ var model = "gpt-4o-mini";
- var llmConfig = new AzureOpenAIConfig(
- endpoint: endPoint,
- deploymentName: "gpt-3.5-turbo-16k", // change to your deployment name
- apiKey: apiKey);
+ var openAIClient = new OpenAIClient(apiKey);
// create assistant agent
- var assistantAgent = new AssistantAgent(
+ var assistantAgent = new OpenAIChatAgent(
name: "assistant",
systemMessage: "You are an assistant that help user to do some tasks.",
- llmConfig: new ConversableAgentConfig
- {
- Temperature = 0,
- ConfigList = new[] { llmConfig },
- });
+ chatClient: openAIClient.GetChatClient(model))
+ .RegisterMessageConnector()
+ .RegisterPrintMessage();
#endregion code_snippet_2
}
@@ -71,27 +68,21 @@ public async Task CodeSnippet4()
// get OpenAI Key and create config
var apiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY");
string endPoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); // change to your endpoint
-
- var llmConfig = new AzureOpenAIConfig(
- endpoint: endPoint,
- deploymentName: "gpt-3.5-turbo-16k", // change to your deployment name
- apiKey: apiKey);
+ var model = "gpt-4o-mini";
+ var openAIClient = new OpenAIClient(new System.ClientModel.ApiKeyCredential(apiKey), new OpenAIClientOptions
+ {
+ Endpoint = new Uri(endPoint),
+ });
#region code_snippet_4
- var assistantAgent = new AssistantAgent(
+ var assistantAgent = new OpenAIChatAgent(
+ chatClient: openAIClient.GetChatClient(model),
name: "assistant",
systemMessage: "You are an assistant that convert user input to upper case.",
- llmConfig: new ConversableAgentConfig
- {
- Temperature = 0,
- ConfigList = new[]
- {
- llmConfig
- },
- FunctionContracts = new[]
- {
- this.UpperCaseFunctionContract, // The FunctionDefinition object for the UpperCase function
- },
- });
+ functions: [
+ this.UpperCaseFunctionContract.ToChatTool(), // The FunctionDefinition object for the UpperCase function
+ ])
+ .RegisterMessageConnector()
+ .RegisterPrintMessage();
var response = await assistantAgent.SendAsync("hello");
response.Should().BeOfType();
@@ -106,31 +97,24 @@ public async Task CodeSnippet5()
// get OpenAI Key and create config
var apiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY");
string endPoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); // change to your endpoint
-
- var llmConfig = new AzureOpenAIConfig(
- endpoint: endPoint,
- deploymentName: "gpt-3.5-turbo-16k", // change to your deployment name
- apiKey: apiKey);
+ var model = "gpt-4o-mini";
+ var openAIClient = new OpenAIClient(new System.ClientModel.ApiKeyCredential(apiKey), new OpenAIClientOptions
+ {
+ Endpoint = new Uri(endPoint),
+ });
#region code_snippet_5
- var assistantAgent = new AssistantAgent(
- name: "assistant",
- systemMessage: "You are an assistant that convert user input to upper case.",
- llmConfig: new ConversableAgentConfig
- {
- Temperature = 0,
- ConfigList = new[]
- {
- llmConfig
- },
- FunctionContracts = new[]
- {
- this.UpperCaseFunctionContract, // The FunctionDefinition object for the UpperCase function
- },
- },
- functionMap: new Dictionary>>
+ var functionCallMiddleware = new FunctionCallMiddleware(
+ functions: [this.UpperCaseFunctionContract],
+ functionMap: new Dictionary>>()
{
- { this.UpperCaseFunctionContract.Name, this.UpperCaseWrapper }, // The wrapper function for the UpperCase function
+ { this.UpperCaseFunctionContract.Name, this.UpperCase },
});
+ var assistantAgent = new OpenAIChatAgent(
+ name: "assistant",
+ systemMessage: "You are an assistant that convert user input to upper case.",
+ chatClient: openAIClient.GetChatClient(model))
+ .RegisterMessageConnector()
+ .RegisterStreamingMiddleware(functionCallMiddleware);
var response = await assistantAgent.SendAsync("hello");
response.Should().BeOfType();
diff --git a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/FunctionCallCodeSnippet.cs b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/FunctionCallCodeSnippet.cs
index 567476ba21c..854a385dc34 100644
--- a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/FunctionCallCodeSnippet.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/FunctionCallCodeSnippet.cs
@@ -3,7 +3,6 @@
using AutoGen;
using AutoGen.Core;
-using AutoGen.OpenAI.V1;
using FluentAssertions;
public partial class FunctionCallCodeSnippet
diff --git a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/GetStartCodeSnippet.cs b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/GetStartCodeSnippet.cs
index c5cdb35f49b..c5ff7b77033 100644
--- a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/GetStartCodeSnippet.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/GetStartCodeSnippet.cs
@@ -4,7 +4,9 @@
#region snippet_GetStartCodeSnippet
using AutoGen;
using AutoGen.Core;
-using AutoGen.OpenAI.V1;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
+using OpenAI;
#endregion snippet_GetStartCodeSnippet
public class GetStartCodeSnippet
@@ -13,16 +15,14 @@ public async Task CodeSnippet1()
{
#region code_snippet_1
var openAIKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
- var gpt35Config = new OpenAIConfig(openAIKey, "gpt-3.5-turbo");
+ var openAIClient = new OpenAIClient(openAIKey);
+ var model = "gpt-4o-mini";
- var assistantAgent = new AssistantAgent(
+ var assistantAgent = new OpenAIChatAgent(
name: "assistant",
systemMessage: "You are an assistant that help user to do some tasks.",
- llmConfig: new ConversableAgentConfig
- {
- Temperature = 0,
- ConfigList = [gpt35Config],
- })
+ chatClient: openAIClient.GetChatClient(model))
+ .RegisterMessageConnector()
.RegisterPrintMessage(); // register a hook to print message nicely to console
// set human input mode to ALWAYS so that user always provide input
diff --git a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MiddlewareAgentCodeSnippet.cs b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MiddlewareAgentCodeSnippet.cs
index 9ad252c1ebe..1b5a9a90320 100644
--- a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MiddlewareAgentCodeSnippet.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MiddlewareAgentCodeSnippet.cs
@@ -3,7 +3,7 @@
using System.Text.Json;
using AutoGen.Core;
-using AutoGen.OpenAI.V1;
+using AutoGen.OpenAI;
using FluentAssertions;
namespace AutoGen.BasicSample.CodeSnippet;
diff --git a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/OpenAICodeSnippet.cs b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/OpenAICodeSnippet.cs
index b7b5104e990..60520078e72 100644
--- a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/OpenAICodeSnippet.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/OpenAICodeSnippet.cs
@@ -3,11 +3,12 @@
#region using_statement
using AutoGen.Core;
-using AutoGen.OpenAI.V1;
-using AutoGen.OpenAI.V1.Extension;
-using Azure.AI.OpenAI;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
#endregion using_statement
using FluentAssertions;
+using OpenAI;
+using OpenAI.Chat;
namespace AutoGen.BasicSample.CodeSnippet;
#region weather_function
@@ -32,31 +33,30 @@ public async Task CreateOpenAIChatAgentAsync()
{
#region create_openai_chat_agent
var openAIKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
- var modelId = "gpt-3.5-turbo";
+ var modelId = "gpt-4o-mini";
var openAIClient = new OpenAIClient(openAIKey);
// create an open ai chat agent
var openAIChatAgent = new OpenAIChatAgent(
- openAIClient: openAIClient,
+ chatClient: openAIClient.GetChatClient(modelId),
name: "assistant",
- modelName: modelId,
systemMessage: "You are an assistant that help user to do some tasks.");
// OpenAIChatAgent supports the following message types:
// - IMessage where ChatRequestMessage is from Azure.AI.OpenAI
- var helloMessage = new ChatRequestUserMessage("Hello");
+ var helloMessage = new UserChatMessage("Hello");
// Use MessageEnvelope.Create to create an IMessage
var chatMessageContent = MessageEnvelope.Create(helloMessage);
var reply = await openAIChatAgent.SendAsync(chatMessageContent);
- // The type of reply is MessageEnvelope where ChatResponseMessage is from Azure.AI.OpenAI
- reply.Should().BeOfType>();
+ // The type of reply is MessageEnvelope where ChatResponseMessage is from Azure.AI.OpenAI
+ reply.Should().BeOfType>();
// You can un-envelop the reply to get the ChatResponseMessage
- ChatResponseMessage response = reply.As>().Content;
- response.Role.Should().Be(ChatRole.Assistant);
+ ChatCompletion response = reply.As>().Content;
+ response.Role.Should().Be(ChatMessageRole.Assistant);
#endregion create_openai_chat_agent
#region create_openai_chat_agent_streaming
@@ -64,8 +64,8 @@ public async Task CreateOpenAIChatAgentAsync()
await foreach (var streamingMessage in streamingReply)
{
- streamingMessage.Should().BeOfType>();
- streamingMessage.As>().Content.Role.Should().Be(ChatRole.Assistant);
+ streamingMessage.Should().BeOfType>();
+ streamingMessage.As>().Content.Role.Should().Be(ChatMessageRole.Assistant);
}
#endregion create_openai_chat_agent_streaming
@@ -77,7 +77,7 @@ public async Task CreateOpenAIChatAgentAsync()
// now the agentWithConnector supports more message types
var messages = new IMessage[]
{
- MessageEnvelope.Create(new ChatRequestUserMessage("Hello")),
+ MessageEnvelope.Create(new UserChatMessage("Hello")),
new TextMessage(Role.Assistant, "Hello", from: "user"),
new MultiModalMessage(Role.Assistant,
[
@@ -106,9 +106,8 @@ public async Task OpenAIChatAgentGetWeatherFunctionCallAsync()
// create an open ai chat agent
var openAIChatAgent = new OpenAIChatAgent(
- openAIClient: openAIClient,
+ chatClient: openAIClient.GetChatClient(modelId),
name: "assistant",
- modelName: modelId,
systemMessage: "You are an assistant that help user to do some tasks.")
.RegisterMessageConnector();
diff --git a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/PrintMessageMiddlewareCodeSnippet.cs b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/PrintMessageMiddlewareCodeSnippet.cs
index be0329b7fd5..0ac7f71a3ca 100644
--- a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/PrintMessageMiddlewareCodeSnippet.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/PrintMessageMiddlewareCodeSnippet.cs
@@ -2,10 +2,8 @@
// PrintMessageMiddlewareCodeSnippet.cs
using AutoGen.Core;
-using AutoGen.OpenAI.V1;
-using AutoGen.OpenAI.V1.Extension;
-using Azure;
-using Azure.AI.OpenAI;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
namespace AutoGen.BasicSample.CodeSnippet;
@@ -15,8 +13,8 @@ public async Task PrintMessageMiddlewareAsync()
{
var config = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo();
var endpoint = new Uri(config.Endpoint);
- var openaiClient = new OpenAIClient(endpoint, new AzureKeyCredential(config.ApiKey));
- var agent = new OpenAIChatAgent(openaiClient, "assistant", config.DeploymentName)
+ var gpt4o = LLMConfiguration.GetOpenAIGPT4o_mini();
+ var agent = new OpenAIChatAgent(gpt4o, "assistant", config.DeploymentName)
.RegisterMessageConnector();
#region PrintMessageMiddleware
@@ -31,10 +29,10 @@ public async Task PrintMessageStreamingMiddlewareAsync()
{
var config = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo();
var endpoint = new Uri(config.Endpoint);
- var openaiClient = new OpenAIClient(endpoint, new AzureKeyCredential(config.ApiKey));
+ var gpt4o = LLMConfiguration.GetOpenAIGPT4o_mini();
#region print_message_streaming
- var streamingAgent = new OpenAIChatAgent(openaiClient, "assistant", config.DeploymentName)
+ var streamingAgent = new OpenAIChatAgent(gpt4o, "assistant")
.RegisterMessageConnector()
.RegisterPrintMessage();
diff --git a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/TypeSafeFunctionCallCodeSnippet.cs b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/TypeSafeFunctionCallCodeSnippet.cs
index cf3e25eeee4..667705835eb 100644
--- a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/TypeSafeFunctionCallCodeSnippet.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/TypeSafeFunctionCallCodeSnippet.cs
@@ -2,8 +2,7 @@
// TypeSafeFunctionCallCodeSnippet.cs
using System.Text.Json;
-using AutoGen.OpenAI.V1.Extension;
-using Azure.AI.OpenAI;
+using AutoGen.OpenAI.Extension;
#region weather_report_using_statement
using AutoGen.Core;
#endregion weather_report_using_statement
@@ -32,7 +31,7 @@ public async Task Consume()
var functionInstance = new TypeSafeFunctionCall();
// Get the generated function definition
- FunctionDefinition functionDefiniton = functionInstance.WeatherReportFunctionContract.ToOpenAIFunctionDefinition();
+ var functionDefiniton = functionInstance.WeatherReportFunctionContract.ToChatTool();
// Get the generated function wrapper
Func> functionWrapper = functionInstance.WeatherReportWrapper;
@@ -69,32 +68,31 @@ public async Task UpperCase(string input)
#region code_snippet_1
// file: FunctionDefinition.generated.cs
- public FunctionDefinition UpperCaseFunction
+ public FunctionContract WeatherReportFunctionContract
{
- get => new FunctionDefinition
+ get => new FunctionContract
{
- Name = @"UpperCase",
- Description = "convert input to upper case",
- Parameters = BinaryData.FromObjectAsJson(new
+ ClassName = @"TypeSafeFunctionCall",
+ Name = @"WeatherReport",
+ Description = @"Get weather report",
+ ReturnType = typeof(Task),
+ Parameters = new global::AutoGen.Core.FunctionParameterContract[]
{
- Type = "object",
- Properties = new
- {
- input = new
+ new FunctionParameterContract
{
- Type = @"string",
- Description = @"input",
+ Name = @"city",
+ Description = @"city",
+ ParameterType = typeof(string),
+ IsRequired = true,
},
- },
- Required = new[]
- {
- "input",
+ new FunctionParameterContract
+ {
+ Name = @"date",
+ Description = @"date",
+ ParameterType = typeof(string),
+ IsRequired = true,
},
},
- new JsonSerializerOptions
- {
- PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
- })
};
}
#endregion code_snippet_1
diff --git a/dotnet/sample/AutoGen.BasicSamples/Example01_AssistantAgent.cs b/dotnet/sample/AutoGen.BasicSamples/Example01_AssistantAgent.cs
index 3ee363bfc06..40c88102588 100644
--- a/dotnet/sample/AutoGen.BasicSamples/Example01_AssistantAgent.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/Example01_AssistantAgent.cs
@@ -4,6 +4,8 @@
using AutoGen;
using AutoGen.BasicSample;
using AutoGen.Core;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
using FluentAssertions;
///
@@ -13,18 +15,12 @@ public static class Example01_AssistantAgent
{
public static async Task RunAsync()
{
- var gpt35 = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo();
- var config = new ConversableAgentConfig
- {
- Temperature = 0,
- ConfigList = [gpt35],
- };
-
- // create assistant agent
- var assistantAgent = new AssistantAgent(
+ var gpt4oMini = LLMConfiguration.GetOpenAIGPT4o_mini();
+ var assistantAgent = new OpenAIChatAgent(
+ chatClient: gpt4oMini,
name: "assistant",
- systemMessage: "You convert what user said to all uppercase.",
- llmConfig: config)
+ systemMessage: "You convert what user said to all uppercase.")
+ .RegisterMessageConnector()
.RegisterPrintMessage();
// talk to the assistant agent
diff --git a/dotnet/sample/AutoGen.BasicSamples/Example02_TwoAgent_MathChat.cs b/dotnet/sample/AutoGen.BasicSamples/Example02_TwoAgent_MathChat.cs
index c2957f32da7..b2dd9726b4b 100644
--- a/dotnet/sample/AutoGen.BasicSamples/Example02_TwoAgent_MathChat.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/Example02_TwoAgent_MathChat.cs
@@ -1,30 +1,28 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Example02_TwoAgent_MathChat.cs
-using AutoGen;
using AutoGen.BasicSample;
using AutoGen.Core;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
using FluentAssertions;
public static class Example02_TwoAgent_MathChat
{
public static async Task RunAsync()
{
#region code_snippet_1
- // get gpt-3.5-turbo config
- var gpt35 = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo();
+ var gpt4oMini = LLMConfiguration.GetOpenAIGPT4o_mini();
+
// create teacher agent
// teacher agent will create math questions
- var teacher = new AssistantAgent(
+ var teacher = new OpenAIChatAgent(
+ chatClient: gpt4oMini,
name: "teacher",
systemMessage: @"You are a teacher that create pre-school math question for student and check answer.
If the answer is correct, you stop the conversation by saying [COMPLETE].
- If the answer is wrong, you ask student to fix it.",
- llmConfig: new ConversableAgentConfig
- {
- Temperature = 0,
- ConfigList = [gpt35],
- })
+ If the answer is wrong, you ask student to fix it.")
+ .RegisterMessageConnector()
.RegisterMiddleware(async (msgs, option, agent, _) =>
{
var reply = await agent.GenerateReplyAsync(msgs, option);
@@ -39,14 +37,11 @@ public static async Task RunAsync()
// create student agent
// student agent will answer the math questions
- var student = new AssistantAgent(
+ var student = new OpenAIChatAgent(
+ chatClient: gpt4oMini,
name: "student",
- systemMessage: "You are a student that answer question from teacher",
- llmConfig: new ConversableAgentConfig
- {
- Temperature = 0,
- ConfigList = [gpt35],
- })
+ systemMessage: "You are a student that answer question from teacher")
+ .RegisterMessageConnector()
.RegisterPrintMessage();
// start the conversation
diff --git a/dotnet/sample/AutoGen.BasicSamples/Example03_Agent_FunctionCall.cs b/dotnet/sample/AutoGen.BasicSamples/Example03_Agent_FunctionCall.cs
index 0ef8eaa48ae..94b67a94b14 100644
--- a/dotnet/sample/AutoGen.BasicSamples/Example03_Agent_FunctionCall.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/Example03_Agent_FunctionCall.cs
@@ -1,9 +1,10 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Example03_Agent_FunctionCall.cs
-using AutoGen;
using AutoGen.BasicSample;
using AutoGen.Core;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
using FluentAssertions;
///
@@ -45,33 +46,30 @@ public async Task CalculateTax(int price, float taxRate)
public static async Task RunAsync()
{
var instance = new Example03_Agent_FunctionCall();
- var gpt35 = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo();
+ var gpt4o = LLMConfiguration.GetOpenAIGPT4o_mini();
// AutoGen makes use of AutoGen.SourceGenerator to automatically generate FunctionDefinition and FunctionCallWrapper for you.
// The FunctionDefinition will be created based on function signature and XML documentation.
// The return type of type-safe function needs to be Task. And to get the best performance, please try only use primitive types and arrays of primitive types as parameters.
- var config = new ConversableAgentConfig
- {
- Temperature = 0,
- ConfigList = [gpt35],
- FunctionContracts = new[]
- {
+ var toolCallMiddleware = new FunctionCallMiddleware(
+ functions: [
instance.ConcatStringFunctionContract,
instance.UpperCaseFunctionContract,
instance.CalculateTaxFunctionContract,
- },
- };
-
- var agent = new AssistantAgent(
- name: "agent",
- systemMessage: "You are a helpful AI assistant",
- llmConfig: config,
+ ],
functionMap: new Dictionary>>
{
- { nameof(ConcatString), instance.ConcatStringWrapper },
- { nameof(UpperCase), instance.UpperCaseWrapper },
- { nameof(CalculateTax), instance.CalculateTaxWrapper },
- })
+ { nameof(instance.ConcatString), instance.ConcatStringWrapper },
+ { nameof(instance.UpperCase), instance.UpperCaseWrapper },
+ { nameof(instance.CalculateTax), instance.CalculateTaxWrapper },
+ });
+
+ var agent = new OpenAIChatAgent(
+ chatClient: gpt4o,
+ name: "agent",
+ systemMessage: "You are a helpful AI assistant")
+ .RegisterMessageConnector()
+ .RegisterStreamingMiddleware(toolCallMiddleware)
.RegisterPrintMessage();
// talk to the assistant agent
diff --git a/dotnet/sample/AutoGen.BasicSamples/Example04_Dynamic_GroupChat_Coding_Task.cs b/dotnet/sample/AutoGen.BasicSamples/Example04_Dynamic_GroupChat_Coding_Task.cs
index 32f06136a96..f90816d890e 100644
--- a/dotnet/sample/AutoGen.BasicSamples/Example04_Dynamic_GroupChat_Coding_Task.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/Example04_Dynamic_GroupChat_Coding_Task.cs
@@ -1,12 +1,12 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Example04_Dynamic_GroupChat_Coding_Task.cs
-using AutoGen;
using AutoGen.BasicSample;
using AutoGen.Core;
using AutoGen.DotnetInteractive;
using AutoGen.DotnetInteractive.Extension;
-using AutoGen.OpenAI.V1;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
using FluentAssertions;
public partial class Example04_Dynamic_GroupChat_Coding_Task
@@ -20,20 +20,21 @@ public static async Task RunAsync()
.AddPythonKernel("python3")
.Build();
- var gptConfig = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo();
+ var gpt4o = LLMConfiguration.GetOpenAIGPT4o_mini();
- var groupAdmin = new GPTAgent(
+ var groupAdmin = new OpenAIChatAgent(
+ chatClient: gpt4o,
name: "groupAdmin",
- systemMessage: "You are the admin of the group chat",
- temperature: 0f,
- config: gptConfig)
+ systemMessage: "You are the admin of the group chat")
+ .RegisterMessageConnector()
.RegisterPrintMessage();
- var userProxy = new UserProxyAgent(name: "user", defaultReply: GroupChatExtension.TERMINATE, humanInputMode: HumanInputMode.NEVER)
+ var userProxy = new DefaultReplyAgent(name: "user", defaultReply: GroupChatExtension.TERMINATE)
.RegisterPrintMessage();
// Create admin agent
- var admin = new AssistantAgent(
+ var admin = new OpenAIChatAgent(
+ chatClient: gpt4o,
name: "admin",
systemMessage: """
You are a manager who takes coding problem from user and resolve problem by splitting them into small tasks and assign each task to the most appropriate agent.
@@ -69,12 +70,8 @@ You are a manager who takes coding problem from user and resolve problem by spli
```
Your reply must contain one of [task|ask|summary] to indicate the type of your message.
- """,
- llmConfig: new ConversableAgentConfig
- {
- Temperature = 0,
- ConfigList = [gptConfig],
- })
+ """)
+ .RegisterMessageConnector()
.RegisterPrintMessage();
// create coder agent
@@ -82,8 +79,9 @@ Your reply must contain one of [task|ask|summary] to indicate the type of your m
// The dotnet coder write dotnet code to resolve the task.
// The code reviewer review the code block from coder's reply.
// The nuget agent install nuget packages if there's any.
- var coderAgent = new GPTAgent(
+ var coderAgent = new OpenAIChatAgent(
name: "coder",
+ chatClient: gpt4o,
systemMessage: @"You act as python coder, you write python code to resolve task. Once you finish writing code, ask runner to run the code for you.
Here're some rules to follow on writing dotnet code:
@@ -100,9 +98,8 @@ Your reply must contain one of [task|ask|summary] to indicate the type of your m
Here's some externel information
- The link to mlnet repo is: https://github.com/dotnet/machinelearning. you don't need a token to use github pr api. Make sure to include a User-Agent header, otherwise github will reject it.
-",
- config: gptConfig,
- temperature: 0.4f)
+")
+ .RegisterMessageConnector()
.RegisterPrintMessage();
// code reviewer agent will review if code block from coder's reply satisfy the following conditions:
@@ -110,7 +107,8 @@ Your reply must contain one of [task|ask|summary] to indicate the type of your m
// - The code block is csharp code block
// - The code block is top level statement
// - The code block is not using declaration
- var codeReviewAgent = new GPTAgent(
+ var codeReviewAgent = new OpenAIChatAgent(
+ chatClient: gpt4o,
name: "reviewer",
systemMessage: """
You are a code reviewer who reviews code from coder. You need to check if the code satisfy the following conditions:
@@ -133,9 +131,8 @@ Your reply must contain one of [task|ask|summary] to indicate the type of your m
result: REJECTED
```
- """,
- config: gptConfig,
- temperature: 0f)
+ """)
+ .RegisterMessageConnector()
.RegisterPrintMessage();
// create runner agent
diff --git a/dotnet/sample/AutoGen.BasicSamples/Example05_Dalle_And_GPT4V.cs b/dotnet/sample/AutoGen.BasicSamples/Example05_Dalle_And_GPT4V.cs
index 863f477630d..e8dd86474e7 100644
--- a/dotnet/sample/AutoGen.BasicSamples/Example05_Dalle_And_GPT4V.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/Example05_Dalle_And_GPT4V.cs
@@ -2,11 +2,11 @@
// Example05_Dalle_And_GPT4V.cs
using AutoGen.Core;
-using AutoGen.OpenAI.V1;
-using AutoGen.OpenAI.V1.Extension;
-using Azure.AI.OpenAI;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
using FluentAssertions;
-using autogen = AutoGen.LLMConfigAPI;
+using OpenAI;
+using OpenAI.Images;
public partial class Example05_Dalle_And_GPT4V
{
@@ -30,16 +30,12 @@ public async Task GenerateImage(string prompt)
// and return url.
var option = new ImageGenerationOptions
{
- Size = ImageSize.Size1024x1024,
- Style = ImageGenerationStyle.Vivid,
- ImageCount = 1,
- Prompt = prompt,
- Quality = ImageGenerationQuality.Standard,
- DeploymentName = "dall-e-3",
+ Size = GeneratedImageSize.W1024xH1024,
+ Style = GeneratedImageStyle.Vivid,
};
- var imageResponse = await openAIClient.GetImageGenerationsAsync(option);
- var imageUrl = imageResponse.Value.Data.First().Url.OriginalString;
+ var imageResponse = await openAIClient.GetImageClient("dall-e-3").GenerateImageAsync(prompt, option);
+ var imageUrl = imageResponse.Value.ImageUri.OriginalString;
return $@"// ignore this line [IMAGE_GENERATION]
The image is generated from prompt {prompt}
@@ -57,8 +53,6 @@ public static async Task RunAsync()
// get OpenAI Key and create config
var openAIKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
- var gpt35Config = autogen.GetOpenAIConfigList(openAIKey, new[] { "gpt-3.5-turbo" });
- var gpt4vConfig = autogen.GetOpenAIConfigList(openAIKey, new[] { "gpt-4-vision-preview" });
var openAIClient = new OpenAIClient(openAIKey);
var instance = new Example05_Dalle_And_GPT4V(openAIClient);
var imagePath = Path.Combine("resource", "images", "background.png");
@@ -74,8 +68,7 @@ public static async Task RunAsync()
{ nameof(GenerateImage), instance.GenerateImageWrapper },
});
var dalleAgent = new OpenAIChatAgent(
- openAIClient: openAIClient,
- modelName: "gpt-3.5-turbo",
+ chatClient: openAIClient.GetChatClient("gpt-4o-mini"),
name: "dalle",
systemMessage: "You are a DALL-E agent that generate image from prompt, when conversation is terminated, return the most recent image url")
.RegisterMessageConnector()
@@ -110,9 +103,8 @@ public static async Task RunAsync()
.RegisterPrintMessage();
var gpt4VAgent = new OpenAIChatAgent(
- openAIClient: openAIClient,
- name: "gpt4v",
- modelName: "gpt-4-vision-preview",
+ chatClient: openAIClient.GetChatClient("gpt-4o-mini"),
+ name: "gpt-4o-mini",
systemMessage: @"You are a critism that provide feedback to DALL-E agent.
Carefully check the image generated by DALL-E agent and provide feedback.
If the image satisfies the condition, then say [APPROVE].
diff --git a/dotnet/sample/AutoGen.BasicSamples/Example06_UserProxyAgent.cs b/dotnet/sample/AutoGen.BasicSamples/Example06_UserProxyAgent.cs
index 9e1cf42b48f..e1349cb32a9 100644
--- a/dotnet/sample/AutoGen.BasicSamples/Example06_UserProxyAgent.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/Example06_UserProxyAgent.cs
@@ -1,7 +1,8 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Example06_UserProxyAgent.cs
using AutoGen.Core;
-using AutoGen.OpenAI.V1;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
namespace AutoGen.BasicSample;
@@ -9,12 +10,13 @@ public static class Example06_UserProxyAgent
{
public static async Task RunAsync()
{
- var gpt35 = LLMConfiguration.GetOpenAIGPT3_5_Turbo();
+ var gpt4o = LLMConfiguration.GetOpenAIGPT4o_mini();
- var assistantAgent = new GPTAgent(
+ var assistantAgent = new OpenAIChatAgent(
+ chatClient: gpt4o,
name: "assistant",
- systemMessage: "You are an assistant that help user to do some tasks.",
- config: gpt35)
+ systemMessage: "You are an assistant that help user to do some tasks.")
+ .RegisterMessageConnector()
.RegisterPrintMessage();
// set human input mode to ALWAYS so that user always provide input
diff --git a/dotnet/sample/AutoGen.BasicSamples/Example07_Dynamic_GroupChat_Calculate_Fibonacci.cs b/dotnet/sample/AutoGen.BasicSamples/Example07_Dynamic_GroupChat_Calculate_Fibonacci.cs
index f4fd98c3d03..1f1315586a2 100644
--- a/dotnet/sample/AutoGen.BasicSamples/Example07_Dynamic_GroupChat_Calculate_Fibonacci.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/Example07_Dynamic_GroupChat_Calculate_Fibonacci.cs
@@ -7,10 +7,10 @@
using AutoGen.Core;
using AutoGen.DotnetInteractive;
using AutoGen.DotnetInteractive.Extension;
-using AutoGen.OpenAI.V1;
-using AutoGen.OpenAI.V1.Extension;
-using Azure.AI.OpenAI;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
using Microsoft.DotNet.Interactive;
+using OpenAI.Chat;
public partial class Example07_Dynamic_GroupChat_Calculate_Fibonacci
{
@@ -50,11 +50,10 @@ public async Task ReviewCodeBlock(
#endregion reviewer_function
#region create_coder
- public static async Task CreateCoderAgentAsync(OpenAIClient client, string deployModel)
+ public static async Task CreateCoderAgentAsync(ChatClient client)
{
var coder = new OpenAIChatAgent(
- openAIClient: client,
- modelName: deployModel,
+ chatClient: client,
name: "coder",
systemMessage: @"You act as dotnet coder, you write dotnet code to resolve task. Once you finish writing code, ask runner to run the code for you.
@@ -122,11 +121,10 @@ public static async Task CreateRunnerAgentAsync(Kernel kernel)
#endregion create_runner
#region create_admin
- public static async Task CreateAdminAsync(OpenAIClient client, string deployModel)
+ public static async Task CreateAdminAsync(ChatClient client)
{
var admin = new OpenAIChatAgent(
- openAIClient: client,
- modelName: deployModel,
+ chatClient: client,
name: "admin",
temperature: 0)
.RegisterMessageConnector()
@@ -137,9 +135,8 @@ public static async Task CreateAdminAsync(OpenAIClient client, string de
#endregion create_admin
#region create_reviewer
- public static async Task CreateReviewerAgentAsync(OpenAIClient openAIClient, string deployModel)
+ public static async Task CreateReviewerAgentAsync(ChatClient chatClient)
{
- var gpt3Config = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo();
var functions = new Example07_Dynamic_GroupChat_Calculate_Fibonacci();
var functionCallMiddleware = new FunctionCallMiddleware(
functions: [functions.ReviewCodeBlockFunctionContract],
@@ -148,10 +145,9 @@ public static async Task CreateReviewerAgentAsync(OpenAIClient openAICli
{ nameof(functions.ReviewCodeBlock), functions.ReviewCodeBlockWrapper },
});
var reviewer = new OpenAIChatAgent(
- openAIClient: openAIClient,
+ chatClient: chatClient,
name: "code_reviewer",
- systemMessage: @"You review code block from coder",
- modelName: deployModel)
+ systemMessage: @"You review code block from coder")
.RegisterMessageConnector()
.RegisterStreamingMiddleware(functionCallMiddleware)
.RegisterMiddleware(async (msgs, option, innerAgent, ct) =>
@@ -237,14 +233,13 @@ public static async Task RunWorkflowAsync()
.CreateDefaultInProcessKernelBuilder()
.Build();
- var config = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo();
- var openaiClient = new OpenAIClient(new Uri(config.Endpoint), new Azure.AzureKeyCredential(config.ApiKey));
+ var gpt4o = LLMConfiguration.GetOpenAIGPT4o_mini();
#region create_workflow
- var reviewer = await CreateReviewerAgentAsync(openaiClient, config.DeploymentName);
- var coder = await CreateCoderAgentAsync(openaiClient, config.DeploymentName);
+ var reviewer = await CreateReviewerAgentAsync(gpt4o);
+ var coder = await CreateCoderAgentAsync(gpt4o);
var runner = await CreateRunnerAgentAsync(kernel);
- var admin = await CreateAdminAsync(openaiClient, config.DeploymentName);
+ var admin = await CreateAdminAsync(gpt4o);
var admin2CoderTransition = Transition.Create(admin, coder);
var coder2ReviewerTransition = Transition.Create(coder, reviewer);
@@ -343,17 +338,16 @@ public static async Task RunAsync()
Directory.CreateDirectory(workDir);
}
- var config = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo();
- var openaiClient = new OpenAIClient(new Uri(config.Endpoint), new Azure.AzureKeyCredential(config.ApiKey));
+ var gpt4o = LLMConfiguration.GetOpenAIGPT4o_mini();
var kernel = DotnetInteractiveKernelBuilder
.CreateDefaultInProcessKernelBuilder()
.Build();
#region create_group_chat
- var reviewer = await CreateReviewerAgentAsync(openaiClient, config.DeploymentName);
- var coder = await CreateCoderAgentAsync(openaiClient, config.DeploymentName);
+ var reviewer = await CreateReviewerAgentAsync(gpt4o);
+ var coder = await CreateCoderAgentAsync(gpt4o);
var runner = await CreateRunnerAgentAsync(kernel);
- var admin = await CreateAdminAsync(openaiClient, config.DeploymentName);
+ var admin = await CreateAdminAsync(gpt4o);
var groupChat = new GroupChat(
admin: admin,
members:
diff --git a/dotnet/sample/AutoGen.BasicSamples/Example08_LMStudio.cs b/dotnet/sample/AutoGen.BasicSamples/Example08_LMStudio.cs
index cce33011762..e58454fdb5f 100644
--- a/dotnet/sample/AutoGen.BasicSamples/Example08_LMStudio.cs
+++ b/dotnet/sample/AutoGen.BasicSamples/Example08_LMStudio.cs
@@ -3,7 +3,9 @@
#region lmstudio_using_statements
using AutoGen.Core;
-using AutoGen.LMStudio;
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
+using OpenAI;
#endregion lmstudio_using_statements
namespace AutoGen.BasicSample;
@@ -13,8 +15,16 @@ public class Example08_LMStudio
public static async Task RunAsync()
{
#region lmstudio_example_1
- var config = new LMStudioConfig("localhost", 1234);
- var lmAgent = new LMStudioAgent("asssistant", config: config)
+ var endpoint = "http://localhost:1234";
+ var openaiClient = new OpenAIClient("api-key", new OpenAIClientOptions
+ {
+ Endpoint = new Uri(endpoint),
+ });
+
+ var lmAgent = new OpenAIChatAgent(
+ chatClient: openaiClient.GetChatClient(""),
+ name: "assistant")
+ .RegisterMessageConnector()
.RegisterPrintMessage();
await lmAgent.SendAsync("Can you write a piece of C# code to calculate 100th of fibonacci?");
diff --git a/dotnet/sample/AutoGen.BasicSamples/Example09_LMStudio_FunctionCall.cs b/dotnet/sample/AutoGen.BasicSamples/Example09_LMStudio_FunctionCall.cs
deleted file mode 100644
index afa7d43b975..00000000000
--- a/dotnet/sample/AutoGen.BasicSamples/Example09_LMStudio_FunctionCall.cs
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Example09_LMStudio_FunctionCall.cs
-
-using System.Text.Json;
-using System.Text.Json.Serialization;
-using AutoGen.Core;
-using AutoGen.LMStudio;
-using AutoGen.OpenAI.V1.Extension;
-using Azure.AI.OpenAI;
-
-namespace AutoGen.BasicSample;
-
-public class LLaMAFunctionCall
-{
- [JsonPropertyName("name")]
- public string Name { get; set; }
-
- [JsonPropertyName("arguments")]
- public JsonElement Arguments { get; set; }
-}
-
-public partial class Example09_LMStudio_FunctionCall
-{
- ///
- /// Get weather from location.
- ///
- /// location
- /// date. type is string
- [Function]
- public async Task GetWeather(string location, string date)
- {
- return $"[Function] The weather on {date} in {location} is sunny.";
- }
-
-
- ///
- /// Search query on Google and return the results.
- ///
- /// search query
- [Function]
- public async Task GoogleSearch(string query)
- {
- return $"[Function] Here are the search results for {query}.";
- }
-
- private static object SerializeFunctionDefinition(FunctionDefinition functionDefinition)
- {
- return new
- {
- type = "function",
- function = new
- {
- name = functionDefinition.Name,
- description = functionDefinition.Description,
- parameters = functionDefinition.Parameters.ToObjectFromJson
+[Obsolete("Use OpenAIChatAgent instead")]
public class GPTAgent : IStreamingAgent
{
private readonly OpenAIClient openAIClient;
diff --git a/dotnet/src/AutoGen.OpenAI.V1/AutoGen.OpenAI.V1.csproj b/dotnet/src/AutoGen.OpenAI.V1/AutoGen.OpenAI.V1.csproj
index e3a2f41c8f7..21951cb32fb 100644
--- a/dotnet/src/AutoGen.OpenAI.V1/AutoGen.OpenAI.V1.csproj
+++ b/dotnet/src/AutoGen.OpenAI.V1/AutoGen.OpenAI.V1.csproj
@@ -8,9 +8,11 @@
- AutoGen.OpenAI
+ AutoGen.OpenAI.V1
OpenAI Intergration for AutoGen.
+ This package connects to openai using Azure.AI.OpenAI v1 package. It is reserved to keep compatibility with the projects which stick to that v1 package.
+ To use the latest version of OpenAI SDK, please use AutoGen.OpenAI package.
diff --git a/dotnet/src/AutoGen.OpenAI/Agent/OpenAIChatAgent.cs b/dotnet/src/AutoGen.OpenAI/Agent/OpenAIChatAgent.cs
new file mode 100644
index 00000000000..1ae1e45db15
--- /dev/null
+++ b/dotnet/src/AutoGen.OpenAI/Agent/OpenAIChatAgent.cs
@@ -0,0 +1,210 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// OpenAIChatAgent.cs
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Threading;
+using System.Threading.Tasks;
+using AutoGen.OpenAI.Extension;
+using global::OpenAI;
+using global::OpenAI.Chat;
+
+namespace AutoGen.OpenAI;
+
+///
+/// OpenAI client agent. This agent is a thin wrapper around to provide a simple interface for chat completions.
+/// supports the following message types:
+///
+/// -
+/// where T is : chat message.
+///
+///
+/// returns the following message types:
+///
+/// -
+/// where T is : chat response message.
+/// where T is : streaming chat completions update.
+///
+///
+///
+public class OpenAIChatAgent : IStreamingAgent
+{
+ private readonly ChatClient chatClient;
+ private readonly ChatCompletionOptions options;
+ private readonly string systemMessage;
+
+ ///
+ /// Create a new instance of .
+ ///
+ /// openai client
+ /// agent name
+ /// system message
+ /// temperature
+ /// max tokens to generated
+ /// response format, set it to to enable json mode.
+ /// seed to use, set it to enable deterministic output
+ /// functions
+ public OpenAIChatAgent(
+ ChatClient chatClient,
+ string name,
+ string systemMessage = "You are a helpful AI assistant",
+ float temperature = 0.7f,
+ int maxTokens = 1024,
+ int? seed = null,
+ ChatResponseFormat? responseFormat = null,
+ IEnumerable? functions = null)
+ : this(
+ chatClient: chatClient,
+ name: name,
+ options: CreateChatCompletionOptions(temperature, maxTokens, seed, responseFormat, functions),
+ systemMessage: systemMessage)
+ {
+ }
+
+ ///
+ /// Create a new instance of .
+ ///
+ /// openai chat client
+ /// agent name
+ /// system message
+ /// chat completion option. The option can't contain messages
+ public OpenAIChatAgent(
+ ChatClient chatClient,
+ string name,
+ ChatCompletionOptions options,
+ string systemMessage = "You are a helpful AI assistant")
+ {
+ this.chatClient = chatClient;
+ this.Name = name;
+ this.options = options;
+ this.systemMessage = systemMessage;
+ }
+
+ public string Name { get; }
+
+ public async Task GenerateReplyAsync(
+ IEnumerable messages,
+ GenerateReplyOptions? options = null,
+ CancellationToken cancellationToken = default)
+ {
+ var chatHistory = this.CreateChatMessages(messages);
+ var settings = this.CreateChatCompletionsOptions(options);
+ var reply = await this.chatClient.CompleteChatAsync(chatHistory, settings, cancellationToken);
+ return new MessageEnvelope(reply.Value, from: this.Name);
+ }
+
+ public async IAsyncEnumerable GenerateStreamingReplyAsync(
+ IEnumerable messages,
+ GenerateReplyOptions? options = null,
+ [EnumeratorCancellation] CancellationToken cancellationToken = default)
+ {
+ var chatHistory = this.CreateChatMessages(messages);
+ var settings = this.CreateChatCompletionsOptions(options);
+ var response = this.chatClient.CompleteChatStreamingAsync(chatHistory, settings, cancellationToken);
+ await foreach (var update in response.WithCancellation(cancellationToken))
+ {
+ if (update.ContentUpdate.Count > 1)
+ {
+ throw new InvalidOperationException("Only one choice is supported in streaming response");
+ }
+
+ yield return new MessageEnvelope(update, from: this.Name);
+ }
+ }
+
+ private IEnumerable CreateChatMessages(IEnumerable messages)
+ {
+ var oaiMessages = messages.Select(m => m switch
+ {
+ IMessage chatMessage => chatMessage.Content,
+ _ => throw new ArgumentException("Invalid message type")
+ });
+
+ // add system message if there's no system message in messages
+ if (!oaiMessages.Any(m => m is SystemChatMessage))
+ {
+ oaiMessages = new[] { new SystemChatMessage(systemMessage) }.Concat(oaiMessages);
+ }
+
+ return oaiMessages;
+ }
+
+ private ChatCompletionOptions CreateChatCompletionsOptions(GenerateReplyOptions? options)
+ {
+ var option = new ChatCompletionOptions()
+ {
+ Seed = this.options.Seed,
+ Temperature = options?.Temperature ?? this.options.Temperature,
+ MaxTokens = options?.MaxToken ?? this.options.MaxTokens,
+ ResponseFormat = this.options.ResponseFormat,
+ FrequencyPenalty = this.options.FrequencyPenalty,
+ FunctionChoice = this.options.FunctionChoice,
+ IncludeLogProbabilities = this.options.IncludeLogProbabilities,
+ ParallelToolCallsEnabled = this.options.ParallelToolCallsEnabled,
+ PresencePenalty = this.options.PresencePenalty,
+ ToolChoice = this.options.ToolChoice,
+ TopLogProbabilityCount = this.options.TopLogProbabilityCount,
+ TopP = this.options.TopP,
+ EndUserId = this.options.EndUserId,
+ };
+
+ // add tools from this.options to option
+ foreach (var tool in this.options.Tools)
+ {
+ option.Tools.Add(tool);
+ }
+
+ // add stop sequences from this.options to option
+ foreach (var seq in this.options.StopSequences)
+ {
+ option.StopSequences.Add(seq);
+ }
+
+ var openAIFunctionDefinitions = options?.Functions?.Select(f => f.ToChatTool()).ToList();
+ if (openAIFunctionDefinitions is { Count: > 0 })
+ {
+ foreach (var f in openAIFunctionDefinitions)
+ {
+ option.Tools.Add(f);
+ }
+ }
+
+ if (options?.StopSequence is var sequence && sequence is { Length: > 0 })
+ {
+ foreach (var seq in sequence)
+ {
+ option.StopSequences.Add(seq);
+ }
+ }
+
+ return option;
+ }
+
+ private static ChatCompletionOptions CreateChatCompletionOptions(
+ float temperature = 0.7f,
+ int maxTokens = 1024,
+ int? seed = null,
+ ChatResponseFormat? responseFormat = null,
+ IEnumerable? functions = null)
+ {
+ var options = new ChatCompletionOptions
+ {
+ Temperature = temperature,
+ MaxTokens = maxTokens,
+ Seed = seed,
+ ResponseFormat = responseFormat,
+ };
+
+ if (functions is not null)
+ {
+ foreach (var f in functions)
+ {
+ options.Tools.Add(f);
+ }
+ }
+
+ return options;
+ }
+}
diff --git a/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj b/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj
new file mode 100644
index 00000000000..f93fdd4bc5e
--- /dev/null
+++ b/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj
@@ -0,0 +1,26 @@
+
+
+ $(PackageTargetFrameworks)
+ AutoGen.OpenAI
+
+
+
+
+
+
+ AutoGen.OpenAI
+
+ OpenAI Intergration for AutoGen.
+ If your project still depends on Azure.AI.OpenAI v1, please use AutoGen.OpenAI.V1 package instead.
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dotnet/src/AutoGen.OpenAI/Extension/FunctionContractExtension.cs b/dotnet/src/AutoGen.OpenAI/Extension/FunctionContractExtension.cs
new file mode 100644
index 00000000000..dd1c1125aec
--- /dev/null
+++ b/dotnet/src/AutoGen.OpenAI/Extension/FunctionContractExtension.cs
@@ -0,0 +1,72 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// FunctionContractExtension.cs
+
+using System;
+using System.Collections.Generic;
+using Json.Schema;
+using Json.Schema.Generation;
+using OpenAI.Chat;
+
+namespace AutoGen.OpenAI.Extension;
+
+public static class FunctionContractExtension
+{
+ ///
+ /// Convert a to a that can be used in gpt funciton call.
+ ///
+ /// function contract
+ ///
+ public static ChatTool ToChatTool(this FunctionContract functionContract)
+ {
+ var requiredParameterNames = new List();
+ var propertiesSchemas = new Dictionary();
+ var propertySchemaBuilder = new JsonSchemaBuilder().Type(SchemaValueType.Object);
+ foreach (var param in functionContract.Parameters ?? [])
+ {
+ if (param.Name is null)
+ {
+ throw new InvalidOperationException("Parameter name cannot be null");
+ }
+
+ var schemaBuilder = new JsonSchemaBuilder().FromType(param.ParameterType ?? throw new ArgumentNullException(nameof(param.ParameterType)));
+ if (param.Description != null)
+ {
+ schemaBuilder = schemaBuilder.Description(param.Description);
+ }
+
+ if (param.IsRequired)
+ {
+ requiredParameterNames.Add(param.Name);
+ }
+
+ var schema = schemaBuilder.Build();
+ propertiesSchemas[param.Name] = schema;
+
+ }
+ propertySchemaBuilder = propertySchemaBuilder.Properties(propertiesSchemas);
+ propertySchemaBuilder = propertySchemaBuilder.Required(requiredParameterNames);
+
+ var option = new System.Text.Json.JsonSerializerOptions()
+ {
+ PropertyNamingPolicy = System.Text.Json.JsonNamingPolicy.CamelCase
+ };
+
+ var functionDefinition = ChatTool.CreateFunctionTool(
+ functionContract.Name ?? throw new ArgumentNullException(nameof(functionContract.Name)),
+ functionContract.Description,
+ BinaryData.FromObjectAsJson(propertySchemaBuilder.Build(), option));
+
+ return functionDefinition;
+ }
+
+ ///
+ /// Convert a to a that can be used in gpt funciton call.
+ ///
+ /// function contract
+ ///
+ [Obsolete("Use ToChatTool instead")]
+ public static ChatTool ToOpenAIFunctionDefinition(this FunctionContract functionContract)
+ {
+ return functionContract.ToChatTool();
+ }
+}
diff --git a/dotnet/src/AutoGen.OpenAI/Extension/OpenAIAgentExtension.cs b/dotnet/src/AutoGen.OpenAI/Extension/OpenAIAgentExtension.cs
new file mode 100644
index 00000000000..1e8ae58954e
--- /dev/null
+++ b/dotnet/src/AutoGen.OpenAI/Extension/OpenAIAgentExtension.cs
@@ -0,0 +1,37 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// OpenAIAgentExtension.cs
+
+namespace AutoGen.OpenAI.Extension;
+
+public static class OpenAIAgentExtension
+{
+ ///
+ /// Register an to the
+ ///
+ /// the connector to use. If null, a new instance of will be created.
+ public static MiddlewareStreamingAgent RegisterMessageConnector(
+ this OpenAIChatAgent agent, OpenAIChatRequestMessageConnector? connector = null)
+ {
+ if (connector == null)
+ {
+ connector = new OpenAIChatRequestMessageConnector();
+ }
+
+ return agent.RegisterStreamingMiddleware(connector);
+ }
+
+ ///
+ /// Register an to the where T is
+ ///
+ /// the connector to use. If null, a new instance of will be created.
+ public static MiddlewareStreamingAgent RegisterMessageConnector(
+ this MiddlewareStreamingAgent agent, OpenAIChatRequestMessageConnector? connector = null)
+ {
+ if (connector == null)
+ {
+ connector = new OpenAIChatRequestMessageConnector();
+ }
+
+ return agent.RegisterStreamingMiddleware(connector);
+ }
+}
diff --git a/dotnet/src/AutoGen.OpenAI/GlobalUsing.cs b/dotnet/src/AutoGen.OpenAI/GlobalUsing.cs
new file mode 100644
index 00000000000..d66bf001ed5
--- /dev/null
+++ b/dotnet/src/AutoGen.OpenAI/GlobalUsing.cs
@@ -0,0 +1,4 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// GlobalUsing.cs
+
+global using AutoGen.Core;
diff --git a/dotnet/src/AutoGen.OpenAI/Middleware/OpenAIChatRequestMessageConnector.cs b/dotnet/src/AutoGen.OpenAI/Middleware/OpenAIChatRequestMessageConnector.cs
new file mode 100644
index 00000000000..2297d123bf8
--- /dev/null
+++ b/dotnet/src/AutoGen.OpenAI/Middleware/OpenAIChatRequestMessageConnector.cs
@@ -0,0 +1,358 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// OpenAIChatRequestMessageConnector.cs
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Threading;
+using System.Threading.Tasks;
+using OpenAI.Chat;
+
+namespace AutoGen.OpenAI;
+
+///
+/// This middleware converts the incoming to where T is before sending to agent. And converts the output to after receiving from agent.
+/// Supported are
+/// -
+/// -
+/// -
+/// -
+/// -
+/// - where T is
+/// - where TMessage1 is and TMessage2 is
+///
+public class OpenAIChatRequestMessageConnector : IMiddleware, IStreamingMiddleware
+{
+ private bool strictMode = false;
+
+ ///
+ /// Create a new instance of .
+ ///
+ /// If true, will throw an
+ /// When the message type is not supported. If false, it will ignore the unsupported message type.
+ public OpenAIChatRequestMessageConnector(bool strictMode = false)
+ {
+ this.strictMode = strictMode;
+ }
+
+ public string? Name => nameof(OpenAIChatRequestMessageConnector);
+
+ public async Task InvokeAsync(MiddlewareContext context, IAgent agent, CancellationToken cancellationToken = default)
+ {
+ var chatMessages = ProcessIncomingMessages(agent, context.Messages);
+
+ var reply = await agent.GenerateReplyAsync(chatMessages, context.Options, cancellationToken);
+
+ return PostProcessMessage(reply);
+ }
+
+ public async IAsyncEnumerable InvokeAsync(
+ MiddlewareContext context,
+ IStreamingAgent agent,
+ [EnumeratorCancellation] CancellationToken cancellationToken = default)
+ {
+ var chatMessages = ProcessIncomingMessages(agent, context.Messages);
+ var streamingReply = agent.GenerateStreamingReplyAsync(chatMessages, context.Options, cancellationToken);
+ var chunks = new List();
+
+ // only streaming the text content
+ await foreach (var reply in streamingReply)
+ {
+ if (reply is IMessage update)
+ {
+ if (update.Content.ContentUpdate.Count == 1 && update.Content.ContentUpdate[0].Kind == ChatMessageContentPartKind.Text)
+ {
+ yield return new TextMessageUpdate(Role.Assistant, update.Content.ContentUpdate[0].Text, from: update.From);
+ }
+
+ chunks.Add(update.Content);
+ }
+ else
+ {
+ if (this.strictMode)
+ {
+ throw new InvalidOperationException($"Invalid streaming message type {reply.GetType().Name}");
+ }
+ else
+ {
+ yield return reply;
+ }
+ }
+ }
+
+ // process the tool call
+ var streamingChatToolCallUpdates = chunks.Where(c => c.ToolCallUpdates.Count > 0)
+ .SelectMany(c => c.ToolCallUpdates)
+ .ToList();
+
+ // collect all text parts
+ var textParts = chunks.SelectMany(c => c.ContentUpdate)
+ .Where(c => c.Kind == ChatMessageContentPartKind.Text)
+ .Select(c => c.Text)
+ .ToList();
+
+ // combine the tool call and function call into one ToolCallMessages
+ var text = string.Join(string.Empty, textParts);
+ var toolCalls = new List();
+ var currentToolName = string.Empty;
+ var currentToolArguments = string.Empty;
+ var currentToolId = string.Empty;
+ int? currentIndex = null;
+ foreach (var toolCall in streamingChatToolCallUpdates)
+ {
+ if (currentIndex is null)
+ {
+ currentIndex = toolCall.Index;
+ }
+
+ if (toolCall.Index == currentIndex)
+ {
+ currentToolName += toolCall.FunctionName;
+ currentToolArguments += toolCall.FunctionArgumentsUpdate;
+ currentToolId += toolCall.Id;
+
+ yield return new ToolCallMessageUpdate(currentToolName, currentToolArguments, from: agent.Name);
+ }
+ else
+ {
+ toolCalls.Add(new ToolCall(currentToolName, currentToolArguments) { ToolCallId = currentToolId });
+ currentToolName = toolCall.FunctionName;
+ currentToolArguments = toolCall.FunctionArgumentsUpdate;
+ currentToolId = toolCall.Id;
+ currentIndex = toolCall.Index;
+
+ yield return new ToolCallMessageUpdate(currentToolName, currentToolArguments, from: agent.Name);
+ }
+ }
+
+ if (string.IsNullOrEmpty(currentToolName) is false)
+ {
+ toolCalls.Add(new ToolCall(currentToolName, currentToolArguments) { ToolCallId = currentToolId });
+ }
+
+ if (toolCalls.Any())
+ {
+ yield return new ToolCallMessage(toolCalls, from: agent.Name)
+ {
+ Content = text,
+ };
+ }
+ }
+
+ public IMessage PostProcessMessage(IMessage message)
+ {
+ return message switch
+ {
+ IMessage m => PostProcessChatCompletions(m),
+ _ when strictMode is false => message,
+ _ => throw new InvalidOperationException($"Invalid return message type {message.GetType().Name}"),
+ };
+ }
+
+ private IMessage PostProcessChatCompletions(IMessage message)
+ {
+ // throw exception if prompt filter results is not null
+ if (message.Content.FinishReason == ChatFinishReason.ContentFilter)
+ {
+ throw new InvalidOperationException("The content is filtered because its potential risk. Please try another input.");
+ }
+
+ // throw exception is there is more than on choice
+ if (message.Content.Content.Count > 1)
+ {
+ throw new InvalidOperationException("The content has more than one choice. Please try another input.");
+ }
+
+ return PostProcessChatResponseMessage(message.Content, message.From);
+ }
+
+ private IMessage PostProcessChatResponseMessage(ChatCompletion chatCompletion, string? from)
+ {
+ // throw exception if prompt filter results is not null
+ if (chatCompletion.FinishReason == ChatFinishReason.ContentFilter)
+ {
+ throw new InvalidOperationException("The content is filtered because its potential risk. Please try another input.");
+ }
+
+ // throw exception is there is more than on choice
+ if (chatCompletion.Content.Count > 1)
+ {
+ throw new InvalidOperationException("The content has more than one choice. Please try another input.");
+ }
+ var textContent = chatCompletion.Content.FirstOrDefault();
+
+ // if tool calls is not empty, return ToolCallMessage
+ if (chatCompletion.ToolCalls is { Count: > 0 })
+ {
+ var toolCalls = chatCompletion.ToolCalls.Select(tc => new ToolCall(tc.FunctionName, tc.FunctionArguments) { ToolCallId = tc.Id });
+ return new ToolCallMessage(toolCalls, from)
+ {
+ Content = textContent?.Kind switch
+ {
+ _ when textContent?.Kind == ChatMessageContentPartKind.Text => textContent.Text,
+ _ => null,
+ },
+ };
+ }
+
+ // else, process function call.
+ // This is deprecated and will be removed in the future.
+ if (chatCompletion.FunctionCall is ChatFunctionCall fc)
+ {
+ return new ToolCallMessage(fc.FunctionName, fc.FunctionArguments, from)
+ {
+ Content = textContent?.Kind switch
+ {
+ _ when textContent?.Kind == ChatMessageContentPartKind.Text => textContent.Text,
+ _ => null,
+ },
+ };
+ }
+
+ // if the content is text, return TextMessage
+ if (textContent?.Kind == ChatMessageContentPartKind.Text)
+ {
+ return new TextMessage(Role.Assistant, textContent.Text, from);
+ }
+
+ throw new InvalidOperationException("Invalid ChatResponseMessage");
+ }
+
+ public IEnumerable ProcessIncomingMessages(IAgent agent, IEnumerable messages)
+ {
+ return messages.SelectMany(m =>
+ {
+ if (m is IMessage crm)
+ {
+ return [crm];
+ }
+ else
+ {
+ var chatRequestMessages = m switch
+ {
+ TextMessage textMessage => ProcessTextMessage(agent, textMessage),
+ ImageMessage imageMessage when (imageMessage.From is null || imageMessage.From != agent.Name) => ProcessImageMessage(agent, imageMessage),
+ MultiModalMessage multiModalMessage when (multiModalMessage.From is null || multiModalMessage.From != agent.Name) => ProcessMultiModalMessage(agent, multiModalMessage),
+ ToolCallMessage toolCallMessage when (toolCallMessage.From is null || toolCallMessage.From == agent.Name) => ProcessToolCallMessage(agent, toolCallMessage),
+ ToolCallResultMessage toolCallResultMessage => ProcessToolCallResultMessage(toolCallResultMessage),
+ AggregateMessage aggregateMessage => ProcessFunctionCallMiddlewareMessage(agent, aggregateMessage),
+ _ when strictMode is false => [],
+ _ => throw new InvalidOperationException($"Invalid message type: {m.GetType().Name}"),
+ };
+
+ if (chatRequestMessages.Any())
+ {
+ return chatRequestMessages.Select(cm => MessageEnvelope.Create(cm, m.From));
+ }
+ else
+ {
+ return [m];
+ }
+ }
+ });
+ }
+
+ private IEnumerable ProcessTextMessage(IAgent agent, TextMessage message)
+ {
+ if (message.Role == Role.System)
+ {
+ return [new SystemChatMessage(message.Content) { ParticipantName = message.From }];
+ }
+
+ if (agent.Name == message.From)
+ {
+ return [new AssistantChatMessage(message.Content) { ParticipantName = agent.Name }];
+ }
+ else
+ {
+ return message.From switch
+ {
+ null when message.Role == Role.User => [new UserChatMessage(message.Content)],
+ null when message.Role == Role.Assistant => [new AssistantChatMessage(message.Content)],
+ null => throw new InvalidOperationException("Invalid Role"),
+ _ => [new UserChatMessage(message.Content) { ParticipantName = message.From }]
+ };
+ }
+ }
+
+ private IEnumerable ProcessImageMessage(IAgent agent, ImageMessage message)
+ {
+ if (agent.Name == message.From)
+ {
+ // image message from assistant is not supported
+ throw new ArgumentException("ImageMessage is not supported when message.From is the same with agent");
+ }
+
+ var imageContentItem = this.CreateChatMessageImageContentItemFromImageMessage(message);
+ return [new UserChatMessage([imageContentItem]) { ParticipantName = message.From }];
+ }
+
+ private IEnumerable ProcessMultiModalMessage(IAgent agent, MultiModalMessage message)
+ {
+ if (agent.Name == message.From)
+ {
+ // image message from assistant is not supported
+ throw new ArgumentException("MultiModalMessage is not supported when message.From is the same with agent");
+ }
+
+ IEnumerable items = message.Content.Select(ci => ci switch
+ {
+ TextMessage text => ChatMessageContentPart.CreateTextMessageContentPart(text.Content),
+ ImageMessage image => this.CreateChatMessageImageContentItemFromImageMessage(image),
+ _ => throw new NotImplementedException(),
+ });
+
+ return [new UserChatMessage(items) { ParticipantName = message.From }];
+ }
+
+ private ChatMessageContentPart CreateChatMessageImageContentItemFromImageMessage(ImageMessage message)
+ {
+ return message.Data is null && message.Url is not null
+ ? ChatMessageContentPart.CreateImageMessageContentPart(new Uri(message.Url))
+ : ChatMessageContentPart.CreateImageMessageContentPart(message.Data, message.Data?.MediaType);
+ }
+
+ private IEnumerable ProcessToolCallMessage(IAgent agent, ToolCallMessage message)
+ {
+ if (message.From is not null && message.From != agent.Name)
+ {
+ throw new ArgumentException("ToolCallMessage is not supported when message.From is not the same with agent");
+ }
+
+ var toolCallParts = message.ToolCalls.Select((tc, i) => ChatToolCall.CreateFunctionToolCall(tc.ToolCallId ?? $"{tc.FunctionName}_{i}", tc.FunctionName, tc.FunctionArguments));
+ var textContent = message.GetContent() ?? null;
+ var chatRequestMessage = new AssistantChatMessage(toolCallParts, textContent) { ParticipantName = message.From };
+
+ return [chatRequestMessage];
+ }
+
+ private IEnumerable ProcessToolCallResultMessage(ToolCallResultMessage message)
+ {
+ return message.ToolCalls
+ .Where(tc => tc.Result is not null)
+ .Select((tc, i) => new ToolChatMessage(tc.ToolCallId ?? $"{tc.FunctionName}_{i}", tc.Result));
+ }
+
+
+ private IEnumerable ProcessFunctionCallMiddlewareMessage(IAgent agent, AggregateMessage aggregateMessage)
+ {
+ if (aggregateMessage.From is not null && aggregateMessage.From != agent.Name)
+ {
+ // convert as user message
+ var resultMessage = aggregateMessage.Message2;
+
+ return resultMessage.ToolCalls.Select(tc => new UserChatMessage(tc.Result) { ParticipantName = aggregateMessage.From });
+ }
+ else
+ {
+ var toolCallMessage1 = aggregateMessage.Message1;
+ var toolCallResultMessage = aggregateMessage.Message2;
+
+ var assistantMessage = this.ProcessToolCallMessage(agent, toolCallMessage1);
+ var toolCallResults = this.ProcessToolCallResultMessage(toolCallResultMessage);
+
+ return assistantMessage.Concat(toolCallResults);
+ }
+ }
+}
diff --git a/dotnet/src/AutoGen.SemanticKernel/AutoGen.SemanticKernel.csproj b/dotnet/src/AutoGen.SemanticKernel/AutoGen.SemanticKernel.csproj
index 8769c3ac487..b89626c01a0 100644
--- a/dotnet/src/AutoGen.SemanticKernel/AutoGen.SemanticKernel.csproj
+++ b/dotnet/src/AutoGen.SemanticKernel/AutoGen.SemanticKernel.csproj
@@ -19,6 +19,7 @@
+
diff --git a/dotnet/src/AutoGen.SemanticKernel/SemanticKernelAgent.cs b/dotnet/src/AutoGen.SemanticKernel/SemanticKernelAgent.cs
index d12c54c1b3b..e10f5b043f2 100644
--- a/dotnet/src/AutoGen.SemanticKernel/SemanticKernelAgent.cs
+++ b/dotnet/src/AutoGen.SemanticKernel/SemanticKernelAgent.cs
@@ -106,7 +106,6 @@ private PromptExecutionSettings BuildOption(GenerateReplyOptions? options)
MaxTokens = options?.MaxToken ?? 1024,
StopSequences = options?.StopSequence,
ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions,
- ResultsPerPrompt = 1,
};
}
diff --git a/dotnet/src/AutoGen.SemanticKernel/SemanticKernelChatCompletionAgent.cs b/dotnet/src/AutoGen.SemanticKernel/SemanticKernelChatCompletionAgent.cs
index 82d83a9e855..1354996430b 100644
--- a/dotnet/src/AutoGen.SemanticKernel/SemanticKernelChatCompletionAgent.cs
+++ b/dotnet/src/AutoGen.SemanticKernel/SemanticKernelChatCompletionAgent.cs
@@ -27,7 +27,7 @@ public async Task GenerateReplyAsync(IEnumerable messages, G
CancellationToken cancellationToken = default)
{
ChatMessageContent[] reply = await _chatCompletionAgent
- .InvokeAsync(BuildChatHistory(messages), cancellationToken)
+ .InvokeAsync(BuildChatHistory(messages), cancellationToken: cancellationToken)
.ToArrayAsync(cancellationToken: cancellationToken);
return reply.Length > 1
diff --git a/dotnet/src/AutoGen.WebAPI/OpenAI/Service/OpenAIChatCompletionService.cs b/dotnet/src/AutoGen.WebAPI/OpenAI/Service/OpenAIChatCompletionService.cs
index 27481da006a..80d49050ee4 100644
--- a/dotnet/src/AutoGen.WebAPI/OpenAI/Service/OpenAIChatCompletionService.cs
+++ b/dotnet/src/AutoGen.WebAPI/OpenAI/Service/OpenAIChatCompletionService.cs
@@ -7,7 +7,6 @@
using System.Threading.Tasks;
using AutoGen.Core;
using AutoGen.WebAPI.OpenAI.DTO;
-
namespace AutoGen.Server;
internal class OpenAIChatCompletionService
@@ -44,7 +43,7 @@ public async Task GetChatCompletionAsync(OpenAIChatComplet
{
Message = message,
Index = 0,
- FinishReason = "completed",
+ FinishReason = "stop",
};
openAIChatCompletion.Choices = [choice];
diff --git a/dotnet/src/AutoGen/API/LLMConfigAPI.cs b/dotnet/src/AutoGen/API/LLMConfigAPI.cs
index fcc74435173..28b5ad44312 100644
--- a/dotnet/src/AutoGen/API/LLMConfigAPI.cs
+++ b/dotnet/src/AutoGen/API/LLMConfigAPI.cs
@@ -4,7 +4,6 @@
using System;
using System.Collections.Generic;
using System.Linq;
-using AutoGen.OpenAI.V1;
namespace AutoGen
{
diff --git a/dotnet/src/AutoGen/Agent/ConversableAgent.cs b/dotnet/src/AutoGen/Agent/ConversableAgent.cs
index b60d2eba099..da61c812f46 100644
--- a/dotnet/src/AutoGen/Agent/ConversableAgent.cs
+++ b/dotnet/src/AutoGen/Agent/ConversableAgent.cs
@@ -6,9 +6,8 @@
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
-using AutoGen.LMStudio;
-using AutoGen.OpenAI.V1;
-
+using AutoGen.OpenAI;
+using AutoGen.OpenAI.Extension;
namespace AutoGen;
public enum HumanInputMode
@@ -87,13 +86,21 @@ public ConversableAgent(
{
IAgent nextAgent = llmConfig switch
{
- AzureOpenAIConfig azureConfig => new GPTAgent(this.Name!, this.systemMessage, azureConfig, temperature: config.Temperature ?? 0),
- OpenAIConfig openAIConfig => new GPTAgent(this.Name!, this.systemMessage, openAIConfig, temperature: config.Temperature ?? 0),
- LMStudioConfig lmStudioConfig => new LMStudioAgent(
- name: this.Name,
- config: lmStudioConfig,
- systemMessage: this.systemMessage,
- temperature: config.Temperature ?? 0),
+ AzureOpenAIConfig azureConfig => new OpenAIChatAgent(
+ chatClient: azureConfig.CreateChatClient(),
+ name: this.Name!,
+ systemMessage: this.systemMessage)
+ .RegisterMessageConnector(),
+ OpenAIConfig openAIConfig => new OpenAIChatAgent(
+ chatClient: openAIConfig.CreateChatClient(),
+ name: this.Name!,
+ systemMessage: this.systemMessage)
+ .RegisterMessageConnector(),
+ LMStudioConfig lmStudioConfig => new OpenAIChatAgent(
+ chatClient: lmStudioConfig.CreateChatClient(),
+ name: this.Name!,
+ systemMessage: this.systemMessage)
+ .RegisterMessageConnector(),
_ => throw new ArgumentException($"Unsupported config type {llmConfig.GetType()}"),
};
diff --git a/dotnet/src/AutoGen/AutoGen.csproj b/dotnet/src/AutoGen/AutoGen.csproj
index 4c3b2a5ab81..fe4431a3573 100644
--- a/dotnet/src/AutoGen/AutoGen.csproj
+++ b/dotnet/src/AutoGen/AutoGen.csproj
@@ -17,7 +17,6 @@
-
@@ -27,7 +26,8 @@
-
+
+
diff --git a/dotnet/src/AutoGen/AzureOpenAIConfig.cs b/dotnet/src/AutoGen/AzureOpenAIConfig.cs
new file mode 100644
index 00000000000..6112a3815d5
--- /dev/null
+++ b/dotnet/src/AutoGen/AzureOpenAIConfig.cs
@@ -0,0 +1,30 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// AzureOpenAIConfig.cs
+
+using Azure.AI.OpenAI;
+using OpenAI.Chat;
+
+namespace AutoGen;
+
+public class AzureOpenAIConfig : ILLMConfig
+{
+ public AzureOpenAIConfig(string endpoint, string deploymentName, string apiKey)
+ {
+ this.Endpoint = endpoint;
+ this.DeploymentName = deploymentName;
+ this.ApiKey = apiKey;
+ }
+
+ public string Endpoint { get; }
+
+ public string DeploymentName { get; }
+
+ public string ApiKey { get; }
+
+ internal ChatClient CreateChatClient()
+ {
+ var client = new AzureOpenAIClient(new System.Uri(this.Endpoint), this.ApiKey);
+
+ return client.GetChatClient(DeploymentName);
+ }
+}
diff --git a/dotnet/src/AutoGen/LMStudioConfig.cs b/dotnet/src/AutoGen/LMStudioConfig.cs
new file mode 100644
index 00000000000..5fd9edc7080
--- /dev/null
+++ b/dotnet/src/AutoGen/LMStudioConfig.cs
@@ -0,0 +1,45 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// LMStudioConfig.cs
+using System;
+using OpenAI;
+using OpenAI.Chat;
+
+namespace AutoGen;
+
+///
+/// Add support for consuming openai-like API from LM Studio
+///
+public class LMStudioConfig : ILLMConfig
+{
+ public LMStudioConfig(string host, int port)
+ {
+ this.Host = host;
+ this.Port = port;
+ this.Uri = new Uri($"http://{host}:{port}");
+ }
+
+ public LMStudioConfig(Uri uri)
+ {
+ this.Uri = uri;
+ this.Host = uri.Host;
+ this.Port = uri.Port;
+ }
+
+ public string Host { get; }
+
+ public int Port { get; }
+
+ public Uri Uri { get; }
+
+ internal ChatClient CreateChatClient()
+ {
+ var client = new OpenAIClient("api-key", new OpenAIClientOptions
+ {
+ Endpoint = this.Uri,
+ });
+
+ // model name doesn't matter for LM Studio
+
+ return client.GetChatClient("model-name");
+ }
+}
diff --git a/dotnet/src/AutoGen/OpenAIConfig.cs b/dotnet/src/AutoGen/OpenAIConfig.cs
new file mode 100644
index 00000000000..ea50fa085f1
--- /dev/null
+++ b/dotnet/src/AutoGen/OpenAIConfig.cs
@@ -0,0 +1,27 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// OpenAIConfig.cs
+
+using OpenAI;
+using OpenAI.Chat;
+
+namespace AutoGen;
+
+public class OpenAIConfig : ILLMConfig
+{
+ public OpenAIConfig(string apiKey, string modelId)
+ {
+ this.ApiKey = apiKey;
+ this.ModelId = modelId;
+ }
+
+ public string ApiKey { get; }
+
+ public string ModelId { get; }
+
+ internal ChatClient CreateChatClient()
+ {
+ var client = new OpenAIClient(this.ApiKey);
+
+ return client.GetChatClient(this.ModelId);
+ }
+}
diff --git a/dotnet/test/AutoGen.OpenAI.Tests/ApprovalTests/OpenAIMessageTests.BasicMessageTest.approved.txt b/dotnet/test/AutoGen.OpenAI.Tests/ApprovalTests/OpenAIMessageTests.BasicMessageTest.approved.txt
new file mode 100644
index 00000000000..3574e593d8d
--- /dev/null
+++ b/dotnet/test/AutoGen.OpenAI.Tests/ApprovalTests/OpenAIMessageTests.BasicMessageTest.approved.txt
@@ -0,0 +1,232 @@
+[
+ {
+ "OriginalMessage": "TextMessage(system, You are a helpful AI assistant, )",
+ "ConvertedMessages": [
+ {
+ "Name": null,
+ "Role": "system",
+ "Content": [
+ {
+ "Kind": {},
+ "Text": "You are a helpful AI assistant",
+ "Refusal": null,
+ "ImageUri": null,
+ "ImageBytes": null,
+ "ImageBytesMediaType": null,
+ "ImageDetail": null
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OriginalMessage": "TextMessage(user, Hello, user)",
+ "ConvertedMessages": [
+ {
+ "Role": "user",
+ "Content": [
+ {
+ "Kind": {},
+ "Text": "Hello",
+ "Refusal": null,
+ "ImageUri": null,
+ "ImageBytes": null,
+ "ImageBytesMediaType": null,
+ "ImageDetail": null
+ }
+ ],
+ "Name": "user",
+ "MultiModaItem": [
+ {
+ "Type": "Text",
+ "Text": "Hello"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OriginalMessage": "TextMessage(assistant, How can I help you?, assistant)",
+ "ConvertedMessages": [
+ {
+ "Role": "assistant",
+ "Content": [
+ {
+ "Kind": {},
+ "Text": "How can I help you?",
+ "Refusal": null,
+ "ImageUri": null,
+ "ImageBytes": null,
+ "ImageBytesMediaType": null,
+ "ImageDetail": null
+ }
+ ],
+ "Name": "assistant",
+ "TooCall": [],
+ "FunctionCallName": null,
+ "FunctionCallArguments": null
+ }
+ ]
+ },
+ {
+ "OriginalMessage": "ImageMessage(user, https://example.com/image.png, user)",
+ "ConvertedMessages": [
+ {
+ "Role": "user",
+ "Content": [
+ {
+ "Kind": {},
+ "Text": null,
+ "Refusal": null,
+ "ImageUri": "https://example.com/image.png",
+ "ImageBytes": null,
+ "ImageBytesMediaType": null,
+ "ImageDetail": null
+ }
+ ],
+ "Name": "user",
+ "MultiModaItem": [
+ {
+ "Type": "Image",
+ "ImageUrl": "https://example.com/image.png"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OriginalMessage": "MultiModalMessage(assistant, user)\n\tTextMessage(user, Hello, user)\n\tImageMessage(user, https://example.com/image.png, user)",
+ "ConvertedMessages": [
+ {
+ "Role": "user",
+ "Content": [
+ {
+ "Kind": {},
+ "Text": "Hello",
+ "Refusal": null,
+ "ImageUri": null,
+ "ImageBytes": null,
+ "ImageBytesMediaType": null,
+ "ImageDetail": null
+ },
+ {
+ "Kind": {},
+ "Text": null,
+ "Refusal": null,
+ "ImageUri": "https://example.com/image.png",
+ "ImageBytes": null,
+ "ImageBytesMediaType": null,
+ "ImageDetail": null
+ }
+ ],
+ "Name": "user",
+ "MultiModaItem": [
+ {
+ "Type": "Text",
+ "Text": "Hello"
+ },
+ {
+ "Type": "Image",
+ "ImageUrl": "https://example.com/image.png"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OriginalMessage": "ToolCallMessage(assistant)\n\tToolCall(test, test, )",
+ "ConvertedMessages": [
+ {
+ "Role": "assistant",
+ "Content": [],
+ "Name": "assistant",
+ "TooCall": [
+ {
+ "Type": "Function",
+ "Name": "test",
+ "Arguments": "test",
+ "Id": "test"
+ }
+ ],
+ "FunctionCallName": null,
+ "FunctionCallArguments": null
+ }
+ ]
+ },
+ {
+ "OriginalMessage": "ToolCallResultMessage(user)\n\tToolCall(test, test, result)",
+ "ConvertedMessages": [
+ {
+ "Role": "tool",
+ "Content": "result",
+ "ToolCallId": "test"
+ }
+ ]
+ },
+ {
+ "OriginalMessage": "ToolCallResultMessage(user)\n\tToolCall(result, test, test)\n\tToolCall(result, test, test)",
+ "ConvertedMessages": [
+ {
+ "Role": "tool",
+ "Content": "test",
+ "ToolCallId": "result_0"
+ },
+ {
+ "Role": "tool",
+ "Content": "test",
+ "ToolCallId": "result_1"
+ }
+ ]
+ },
+ {
+ "OriginalMessage": "ToolCallMessage(assistant)\n\tToolCall(test, test, )\n\tToolCall(test, test, )",
+ "ConvertedMessages": [
+ {
+ "Role": "assistant",
+ "Content": [],
+ "Name": "assistant",
+ "TooCall": [
+ {
+ "Type": "Function",
+ "Name": "test",
+ "Arguments": "test",
+ "Id": "test_0"
+ },
+ {
+ "Type": "Function",
+ "Name": "test",
+ "Arguments": "test",
+ "Id": "test_1"
+ }
+ ],
+ "FunctionCallName": null,
+ "FunctionCallArguments": null
+ }
+ ]
+ },
+ {
+ "OriginalMessage": "AggregateMessage(assistant)\n\tToolCallMessage(assistant)\n\tToolCall(test, test, )\n\tToolCallResultMessage(assistant)\n\tToolCall(test, test, result)",
+ "ConvertedMessages": [
+ {
+ "Role": "assistant",
+ "Content": [],
+ "Name": "assistant",
+ "TooCall": [
+ {
+ "Type": "Function",
+ "Name": "test",
+ "Arguments": "test",
+ "Id": "test"
+ }
+ ],
+ "FunctionCallName": null,
+ "FunctionCallArguments": null
+ },
+ {
+ "Role": "tool",
+ "Content": "result",
+ "ToolCallId": "test"
+ }
+ ]
+ }
+]
\ No newline at end of file
diff --git a/dotnet/test/AutoGen.OpenAI.Tests/AutoGen.OpenAI.Tests.csproj b/dotnet/test/AutoGen.OpenAI.Tests/AutoGen.OpenAI.Tests.csproj
new file mode 100644
index 00000000000..a6495fc4487
--- /dev/null
+++ b/dotnet/test/AutoGen.OpenAI.Tests/AutoGen.OpenAI.Tests.csproj
@@ -0,0 +1,19 @@
+
+
+
+ $(TestTargetFrameworks)
+ false
+ True
+ True
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dotnet/test/AutoGen.OpenAI.Tests/GlobalUsing.cs b/dotnet/test/AutoGen.OpenAI.Tests/GlobalUsing.cs
new file mode 100644
index 00000000000..d66bf001ed5
--- /dev/null
+++ b/dotnet/test/AutoGen.OpenAI.Tests/GlobalUsing.cs
@@ -0,0 +1,4 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// GlobalUsing.cs
+
+global using AutoGen.Core;
diff --git a/dotnet/test/AutoGen.OpenAI.Tests/MathClassTest.cs b/dotnet/test/AutoGen.OpenAI.Tests/MathClassTest.cs
new file mode 100644
index 00000000000..be1c38ad0a3
--- /dev/null
+++ b/dotnet/test/AutoGen.OpenAI.Tests/MathClassTest.cs
@@ -0,0 +1,219 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// MathClassTest.cs
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using AutoGen.OpenAI.Extension;
+using AutoGen.Tests;
+using Azure.AI.OpenAI;
+using FluentAssertions;
+using OpenAI;
+using Xunit.Abstractions;
+
+namespace AutoGen.OpenAI.Tests
+{
+ public partial class MathClassTest
+ {
+ private readonly ITestOutputHelper _output;
+
+ // as of 2024-05-20, aoai return 500 error when round > 1
+ // I'm pretty sure that round > 5 was supported before
+ // So this is probably some wield regression on aoai side
+ // I'll keep this test case here for now, plus setting round to 1
+ // so the test can still pass.
+ // In the future, we should rewind this test case to round > 1 (previously was 5)
+ private int round = 1;
+ public MathClassTest(ITestOutputHelper output)
+ {
+ _output = output;
+ }
+
+ private Task Print(IEnumerable messages, GenerateReplyOptions? option, IAgent agent, CancellationToken ct)
+ {
+ try
+ {
+ var reply = agent.GenerateReplyAsync(messages, option, ct).Result;
+
+ _output.WriteLine(reply.FormatMessage());
+ return Task.FromResult(reply);
+ }
+ catch (Exception)
+ {
+ _output.WriteLine("Request failed");
+ _output.WriteLine($"agent name: {agent.Name}");
+ foreach (var message in messages)
+ {
+ _output.WriteLine(message.FormatMessage());
+ }
+
+ throw;
+ }
+
+ }
+
+ [FunctionAttribute]
+ public async Task CreateMathQuestion(string question, int question_index)
+ {
+ return $@"[MATH_QUESTION]
+Question {question_index}:
+{question}
+
+Student, please answer";
+ }
+
+ [FunctionAttribute]
+ public async Task AnswerQuestion(string answer)
+ {
+ return $@"[MATH_ANSWER]
+The answer is {answer}
+teacher please check answer";
+ }
+
+ [FunctionAttribute]
+ public async Task AnswerIsCorrect(string message)
+ {
+ return $@"[ANSWER_IS_CORRECT]
+{message}
+please update progress";
+ }
+
+ [FunctionAttribute]
+ public async Task UpdateProgress(int correctAnswerCount)
+ {
+ if (correctAnswerCount >= this.round)
+ {
+ return $@"[UPDATE_PROGRESS]
+{GroupChatExtension.TERMINATE}";
+ }
+ else
+ {
+ return $@"[UPDATE_PROGRESS]
+the number of resolved question is {correctAnswerCount}
+teacher, please create the next math question";
+ }
+ }
+
+
+ [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
+ public async Task OpenAIAgentMathChatTestAsync()
+ {
+ var key = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? throw new ArgumentException("AZURE_OPENAI_API_KEY is not set");
+ var endPoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new ArgumentException("AZURE_OPENAI_ENDPOINT is not set");
+ var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new ArgumentException("AZURE_OPENAI_DEPLOY_NAME is not set");
+ var openaiClient = new AzureOpenAIClient(new Uri(endPoint), new Azure.AzureKeyCredential(key));
+ var teacher = await CreateTeacherAgentAsync(openaiClient, deployName);
+ var student = await CreateStudentAssistantAgentAsync(openaiClient, deployName);
+
+ var adminFunctionMiddleware = new FunctionCallMiddleware(
+ functions: [this.UpdateProgressFunctionContract],
+ functionMap: new Dictionary>>
+ {
+ { this.UpdateProgressFunctionContract.Name, this.UpdateProgressWrapper },
+ });
+ var admin = new OpenAIChatAgent(
+ chatClient: openaiClient.GetChatClient(deployName),
+ name: "Admin",
+ systemMessage: $@"You are admin. You update progress after each question is answered.")
+ .RegisterMessageConnector()
+ .RegisterStreamingMiddleware(adminFunctionMiddleware)
+ .RegisterMiddleware(Print);
+
+ var groupAdmin = new OpenAIChatAgent(
+ chatClient: openaiClient.GetChatClient(deployName),
+ name: "GroupAdmin",
+ systemMessage: "You are group admin. You manage the group chat.")
+ .RegisterMessageConnector()
+ .RegisterMiddleware(Print);
+ await RunMathChatAsync(teacher, student, admin, groupAdmin);
+ }
+
+ private async Task CreateTeacherAgentAsync(OpenAIClient client, string model)
+ {
+ var functionCallMiddleware = new FunctionCallMiddleware(
+ functions: [this.CreateMathQuestionFunctionContract, this.AnswerIsCorrectFunctionContract],
+ functionMap: new Dictionary>>
+ {
+ { this.CreateMathQuestionFunctionContract.Name!, this.CreateMathQuestionWrapper },
+ { this.AnswerIsCorrectFunctionContract.Name!, this.AnswerIsCorrectWrapper },
+ });
+
+ var teacher = new OpenAIChatAgent(
+ chatClient: client.GetChatClient(model),
+ name: "Teacher",
+ systemMessage: @"You are a preschool math teacher.
+You create math question and ask student to answer it.
+Then you check if the answer is correct.
+If the answer is wrong, you ask student to fix it")
+ .RegisterMessageConnector()
+ .RegisterStreamingMiddleware(functionCallMiddleware)
+ .RegisterMiddleware(Print);
+
+ return teacher;
+ }
+
+ private async Task CreateStudentAssistantAgentAsync(OpenAIClient client, string model)
+ {
+ var functionCallMiddleware = new FunctionCallMiddleware(
+ functions: [this.AnswerQuestionFunctionContract],
+ functionMap: new Dictionary>>
+ {
+ { this.AnswerQuestionFunctionContract.Name!, this.AnswerQuestionWrapper },
+ });
+ var student = new OpenAIChatAgent(
+ chatClient: client.GetChatClient(model),
+ name: "Student",
+ systemMessage: @"You are a student. You answer math question from teacher.")
+ .RegisterMessageConnector()
+ .RegisterStreamingMiddleware(functionCallMiddleware)
+ .RegisterMiddleware(Print);
+
+ return student;
+ }
+
+ private async Task RunMathChatAsync(IAgent teacher, IAgent student, IAgent admin, IAgent groupAdmin)
+ {
+ var teacher2Student = Transition.Create(teacher, student);
+ var student2Teacher = Transition.Create(student, teacher);
+ var teacher2Admin = Transition.Create(teacher, admin);
+ var admin2Teacher = Transition.Create(admin, teacher);
+ var workflow = new Graph(
+ [
+ teacher2Student,
+ student2Teacher,
+ teacher2Admin,
+ admin2Teacher,
+ ]);
+ var group = new GroupChat(
+ workflow: workflow,
+ members: [
+ admin,
+ teacher,
+ student,
+ ],
+ admin: groupAdmin);
+
+ var groupChatManager = new GroupChatManager(group);
+ var chatHistory = await admin.InitiateChatAsync(groupChatManager, "teacher, create question", maxRound: 50);
+
+ chatHistory.Where(msg => msg.From == teacher.Name && msg.GetContent()?.Contains("[MATH_QUESTION]") is true)
+ .Count()
+ .Should().BeGreaterThanOrEqualTo(this.round);
+
+ chatHistory.Where(msg => msg.From == student.Name && msg.GetContent()?.Contains("[MATH_ANSWER]") is true)
+ .Count()
+ .Should().BeGreaterThanOrEqualTo(this.round);
+
+ chatHistory.Where(msg => msg.From == teacher.Name && msg.GetContent()?.Contains("[ANSWER_IS_CORRECT]") is true)
+ .Count()
+ .Should().BeGreaterThanOrEqualTo(this.round);
+
+ // check if there's terminate chat message from admin
+ chatHistory.Where(msg => msg.From == admin.Name && msg.IsGroupChatTerminateMessage())
+ .Count()
+ .Should().Be(1);
+ }
+ }
+}
diff --git a/dotnet/test/AutoGen.OpenAI.Tests/OpenAIChatAgentTest.cs b/dotnet/test/AutoGen.OpenAI.Tests/OpenAIChatAgentTest.cs
new file mode 100644
index 00000000000..bcbfee6e208
--- /dev/null
+++ b/dotnet/test/AutoGen.OpenAI.Tests/OpenAIChatAgentTest.cs
@@ -0,0 +1,256 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// OpenAIChatAgentTest.cs
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading.Tasks;
+using AutoGen.OpenAI.Extension;
+using AutoGen.Tests;
+using Azure.AI.OpenAI;
+using FluentAssertions;
+using OpenAI;
+using OpenAI.Chat;
+
+namespace AutoGen.OpenAI.Tests;
+
+public partial class OpenAIChatAgentTest
+{
+ ///
+ /// Get the weather for a location.
+ ///
+ /// location
+ ///
+ [Function]
+ public async Task GetWeatherAsync(string location)
+ {
+ return $"The weather in {location} is sunny.";
+ }
+
+ [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
+ public async Task BasicConversationTestAsync()
+ {
+ var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new Exception("Please set AZURE_OPENAI_DEPLOY_NAME environment variable.");
+ var openaiClient = CreateOpenAIClientFromAzureOpenAI();
+ var openAIChatAgent = new OpenAIChatAgent(
+ chatClient: openaiClient.GetChatClient(deployName),
+ name: "assistant");
+
+ // By default, OpenAIChatClient supports the following message types
+ // - IMessage
+ var chatMessageContent = MessageEnvelope.Create(new UserChatMessage("Hello"));
+ var reply = await openAIChatAgent.SendAsync(chatMessageContent);
+
+ reply.Should().BeOfType>();
+ reply.As>().From.Should().Be("assistant");
+ reply.As>().Content.Role.Should().Be(ChatMessageRole.Assistant);
+ reply.As>().Content.Usage.TotalTokens.Should().BeGreaterThan(0);
+
+ // test streaming
+ var streamingReply = openAIChatAgent.GenerateStreamingReplyAsync(new[] { chatMessageContent });
+
+ await foreach (var streamingMessage in streamingReply)
+ {
+ streamingMessage.Should().BeOfType>();
+ streamingMessage.As>().From.Should().Be("assistant");
+ }
+ }
+
+ [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
+ public async Task OpenAIChatMessageContentConnectorTestAsync()
+ {
+ var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new Exception("Please set AZURE_OPENAI_DEPLOY_NAME environment variable.");
+ var openaiClient = CreateOpenAIClientFromAzureOpenAI();
+ var openAIChatAgent = new OpenAIChatAgent(
+ chatClient: openaiClient.GetChatClient(deployName),
+ name: "assistant");
+
+ MiddlewareStreamingAgent assistant = openAIChatAgent
+ .RegisterMessageConnector();
+
+ var messages = new IMessage[]
+ {
+ MessageEnvelope.Create(new UserChatMessage("Hello")),
+ new TextMessage(Role.Assistant, "Hello", from: "user"),
+ new MultiModalMessage(Role.Assistant,
+ [
+ new TextMessage(Role.Assistant, "Hello", from: "user"),
+ ],
+ from: "user"),
+ };
+
+ foreach (var message in messages)
+ {
+ var reply = await assistant.SendAsync(message);
+
+ reply.Should().BeOfType();
+ reply.As().From.Should().Be("assistant");
+ }
+
+ // test streaming
+ foreach (var message in messages)
+ {
+ var reply = assistant.GenerateStreamingReplyAsync([message]);
+
+ await foreach (var streamingMessage in reply)
+ {
+ streamingMessage.Should().BeOfType();
+ streamingMessage.As().From.Should().Be("assistant");
+ }
+ }
+ }
+
+ [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
+ public async Task OpenAIChatAgentToolCallTestAsync()
+ {
+ var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new Exception("Please set AZURE_OPENAI_DEPLOY_NAME environment variable.");
+ var openaiClient = CreateOpenAIClientFromAzureOpenAI();
+ var openAIChatAgent = new OpenAIChatAgent(
+ chatClient: openaiClient.GetChatClient(deployName),
+ name: "assistant");
+
+ var functionCallMiddleware = new FunctionCallMiddleware(
+ functions: [this.GetWeatherAsyncFunctionContract]);
+ MiddlewareStreamingAgent assistant = openAIChatAgent
+ .RegisterMessageConnector();
+
+ assistant.StreamingMiddlewares.Count().Should().Be(1);
+ var functionCallAgent = assistant
+ .RegisterStreamingMiddleware(functionCallMiddleware);
+
+ var question = "What's the weather in Seattle";
+ var messages = new IMessage[]
+ {
+ MessageEnvelope.Create(new UserChatMessage(question)),
+ new TextMessage(Role.Assistant, question, from: "user"),
+ new MultiModalMessage(Role.Assistant,
+ [
+ new TextMessage(Role.Assistant, question, from: "user"),
+ ],
+ from: "user"),
+ };
+
+ foreach (var message in messages)
+ {
+ var reply = await functionCallAgent.SendAsync(message);
+
+ reply.Should().BeOfType();
+ reply.As().From.Should().Be("assistant");
+ reply.As().ToolCalls.Count().Should().Be(1);
+ reply.As().ToolCalls.First().FunctionName.Should().Be(this.GetWeatherAsyncFunctionContract.Name);
+ }
+
+ // test streaming
+ foreach (var message in messages)
+ {
+ var reply = functionCallAgent.GenerateStreamingReplyAsync([message]);
+ ToolCallMessage? toolCallMessage = null;
+ await foreach (var streamingMessage in reply)
+ {
+ if (streamingMessage is ToolCallMessage finalMessage)
+ {
+ toolCallMessage = finalMessage;
+ break;
+ }
+
+ streamingMessage.Should().BeOfType();
+ streamingMessage.As().From.Should().Be("assistant");
+ }
+
+ toolCallMessage.Should().NotBeNull();
+ toolCallMessage!.From.Should().Be("assistant");
+ toolCallMessage.ToolCalls.Count().Should().Be(1);
+ toolCallMessage.ToolCalls.First().FunctionName.Should().Be(this.GetWeatherAsyncFunctionContract.Name);
+ }
+ }
+
+ [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
+ public async Task OpenAIChatAgentToolCallInvokingTestAsync()
+ {
+ var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new Exception("Please set AZURE_OPENAI_DEPLOY_NAME environment variable.");
+ var openaiClient = CreateOpenAIClientFromAzureOpenAI();
+ var openAIChatAgent = new OpenAIChatAgent(
+ chatClient: openaiClient.GetChatClient(deployName),
+ name: "assistant");
+
+ var functionCallMiddleware = new FunctionCallMiddleware(
+ functions: [this.GetWeatherAsyncFunctionContract],
+ functionMap: new Dictionary>> { { this.GetWeatherAsyncFunctionContract.Name!, this.GetWeatherAsyncWrapper } });
+ MiddlewareStreamingAgent assistant = openAIChatAgent
+ .RegisterMessageConnector();
+
+ var functionCallAgent = assistant
+ .RegisterStreamingMiddleware(functionCallMiddleware);
+
+ var question = "What's the weather in Seattle";
+ var messages = new IMessage[]
+ {
+ MessageEnvelope.Create(new UserChatMessage(question)),
+ new TextMessage(Role.Assistant, question, from: "user"),
+ new MultiModalMessage(Role.Assistant,
+ [
+ new TextMessage(Role.Assistant, question, from: "user"),
+ ],
+ from: "user"),
+ };
+
+ foreach (var message in messages)
+ {
+ var reply = await functionCallAgent.SendAsync(message);
+
+ reply.Should().BeOfType();
+ reply.From.Should().Be("assistant");
+ reply.GetToolCalls()!.Count().Should().Be(1);
+ reply.GetToolCalls()!.First().FunctionName.Should().Be(this.GetWeatherAsyncFunctionContract.Name);
+ reply.GetContent()!.ToLower().Should().Contain("seattle");
+ }
+
+ // test streaming
+ foreach (var message in messages)
+ {
+ var reply = functionCallAgent.GenerateStreamingReplyAsync([message]);
+ await foreach (var streamingMessage in reply)
+ {
+ if (streamingMessage is not IMessage)
+ {
+ streamingMessage.Should().BeOfType();
+ streamingMessage.As().From.Should().Be("assistant");
+ }
+ else
+ {
+ streamingMessage.Should().BeOfType();
+ streamingMessage.As().GetContent()!.ToLower().Should().Contain("seattle");
+ }
+ }
+ }
+ }
+
+ [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
+ public async Task ItCreateOpenAIChatAgentWithChatCompletionOptionAsync()
+ {
+ var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new Exception("Please set AZURE_OPENAI_DEPLOY_NAME environment variable.");
+ var openaiClient = CreateOpenAIClientFromAzureOpenAI();
+ var options = new ChatCompletionOptions()
+ {
+ Temperature = 0.7f,
+ MaxTokens = 1,
+ };
+
+ var openAIChatAgent = new OpenAIChatAgent(
+ chatClient: openaiClient.GetChatClient(deployName),
+ name: "assistant",
+ options: options)
+ .RegisterMessageConnector();
+
+ var respond = await openAIChatAgent.SendAsync("hello");
+ respond.GetContent()?.Should().NotBeNullOrEmpty();
+ }
+
+
+ private OpenAIClient CreateOpenAIClientFromAzureOpenAI()
+ {
+ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new Exception("Please set AZURE_OPENAI_ENDPOINT environment variable.");
+ var key = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? throw new Exception("Please set AZURE_OPENAI_API_KEY environment variable.");
+ return new AzureOpenAIClient(new Uri(endpoint), new Azure.AzureKeyCredential(key));
+ }
+}
diff --git a/dotnet/test/AutoGen.OpenAI.Tests/OpenAIMessageTests.cs b/dotnet/test/AutoGen.OpenAI.Tests/OpenAIMessageTests.cs
new file mode 100644
index 00000000000..a05f440a17b
--- /dev/null
+++ b/dotnet/test/AutoGen.OpenAI.Tests/OpenAIMessageTests.cs
@@ -0,0 +1,692 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// OpenAIMessageTests.cs
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Reflection;
+using System.Text.Json;
+using System.Threading.Tasks;
+using ApprovalTests;
+using ApprovalTests.Namers;
+using ApprovalTests.Reporters;
+using AutoGen.Tests;
+using FluentAssertions;
+using OpenAI.Chat;
+using Xunit;
+
+namespace AutoGen.OpenAI.Tests;
+
+public class OpenAIMessageTests
+{
+ private readonly JsonSerializerOptions jsonSerializerOptions = new JsonSerializerOptions
+ {
+ WriteIndented = true,
+ IgnoreReadOnlyProperties = false,
+ };
+
+ [Fact]
+ [UseReporter(typeof(DiffReporter))]
+ [UseApprovalSubdirectory("ApprovalTests")]
+ public void BasicMessageTest()
+ {
+ IMessage[] messages = [
+ new TextMessage(Role.System, "You are a helpful AI assistant"),
+ new TextMessage(Role.User, "Hello", "user"),
+ new TextMessage(Role.Assistant, "How can I help you?", from: "assistant"),
+ new ImageMessage(Role.User, "https://example.com/image.png", "user"),
+ new MultiModalMessage(Role.Assistant,
+ [
+ new TextMessage(Role.User, "Hello", "user"),
+ new ImageMessage(Role.User, "https://example.com/image.png", "user"),
+ ], "user"),
+ new ToolCallMessage("test", "test", "assistant"),
+ new ToolCallResultMessage("result", "test", "test", "user"),
+ new ToolCallResultMessage(
+ [
+ new ToolCall("result", "test", "test"),
+ new ToolCall("result", "test", "test"),
+ ], "user"),
+ new ToolCallMessage(
+ [
+ new ToolCall("test", "test"),
+ new ToolCall("test", "test"),
+ ], "assistant"),
+ new AggregateMessage(
+ message1: new ToolCallMessage("test", "test", "assistant"),
+ message2: new ToolCallResultMessage("result", "test", "test", "assistant"), "assistant"),
+ ];
+ var openaiMessageConnectorMiddleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant");
+
+ var oaiMessages = messages.Select(m => (m, openaiMessageConnectorMiddleware.ProcessIncomingMessages(agent, [m])));
+ VerifyOAIMessages(oaiMessages);
+ }
+
+ [Fact]
+ public async Task ItProcessUserTextMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (UserChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Content.First().Text.Should().Be("Hello");
+ chatRequestMessage.ParticipantName.Should().Be("user");
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ IMessage message = new TextMessage(Role.User, "Hello", "user");
+ await agent.GenerateReplyAsync([message]);
+ }
+
+ [Fact]
+ public async Task ItShortcutChatRequestMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+
+ var chatRequestMessage = (UserChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Content.First().Text.Should().Be("hello");
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ var userMessage = new UserChatMessage("hello");
+ var chatRequestMessage = MessageEnvelope.Create(userMessage);
+ await agent.GenerateReplyAsync([chatRequestMessage]);
+ }
+
+ [Fact]
+ public async Task ItShortcutMessageWhenStrictModelIsFalseAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+
+ var chatRequestMessage = ((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Should().Be("hello");
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ var userMessage = "hello";
+ var chatRequestMessage = MessageEnvelope.Create(userMessage);
+ await agent.GenerateReplyAsync([chatRequestMessage]);
+ }
+
+ [Fact]
+ public async Task ItThrowExceptionWhenStrictModeIsTrueAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector(true);
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(middleware);
+
+ // user message
+ var userMessage = "hello";
+ var chatRequestMessage = MessageEnvelope.Create(userMessage);
+ Func action = async () => await agent.GenerateReplyAsync([chatRequestMessage]);
+
+ await action.Should().ThrowAsync().WithMessage("Invalid message type: MessageEnvelope`1");
+ }
+
+ [Fact]
+ public async Task ItProcessAssistantTextMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (AssistantChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Content.First().Text.Should().Be("How can I help you?");
+ chatRequestMessage.ParticipantName.Should().Be("assistant");
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // assistant message
+ IMessage message = new TextMessage(Role.Assistant, "How can I help you?", "assistant");
+ await agent.GenerateReplyAsync([message]);
+ }
+
+ [Fact]
+ public async Task ItProcessSystemTextMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (SystemChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Content.First().Text.Should().Be("You are a helpful AI assistant");
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // system message
+ IMessage message = new TextMessage(Role.System, "You are a helpful AI assistant");
+ await agent.GenerateReplyAsync([message]);
+ }
+
+ [Fact]
+ public async Task ItProcessImageMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (UserChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.ParticipantName.Should().Be("user");
+ chatRequestMessage.Content.Count().Should().Be(1);
+ chatRequestMessage.Content.First().Kind.Should().Be(ChatMessageContentPartKind.Image);
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ IMessage message = new ImageMessage(Role.User, "https://example.com/image.png", "user");
+ await agent.GenerateReplyAsync([message]);
+ }
+
+ [Fact]
+ public async Task ItThrowExceptionWhenProcessingImageMessageFromSelfAndStrictModeIsTrueAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector(true);
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(middleware);
+
+ var imageMessage = new ImageMessage(Role.Assistant, "https://example.com/image.png", "assistant");
+ Func action = async () => await agent.GenerateReplyAsync([imageMessage]);
+
+ await action.Should().ThrowAsync().WithMessage("Invalid message type: ImageMessage");
+ }
+
+ [Fact]
+ public async Task ItProcessMultiModalMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (UserChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.ParticipantName.Should().Be("user");
+ chatRequestMessage.Content.Count().Should().Be(2);
+ chatRequestMessage.Content.First().Kind.Should().Be(ChatMessageContentPartKind.Text);
+ chatRequestMessage.Content.Last().Kind.Should().Be(ChatMessageContentPartKind.Image);
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ IMessage message = new MultiModalMessage(
+ Role.User,
+ [
+ new TextMessage(Role.User, "Hello", "user"),
+ new ImageMessage(Role.User, "https://example.com/image.png", "user"),
+ ], "user");
+ await agent.GenerateReplyAsync([message]);
+ }
+
+ [Fact]
+ public async Task ItThrowExceptionWhenProcessingMultiModalMessageFromSelfAndStrictModeIsTrueAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector(true);
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(middleware);
+
+ var multiModalMessage = new MultiModalMessage(
+ Role.Assistant,
+ [
+ new TextMessage(Role.User, "Hello", "assistant"),
+ new ImageMessage(Role.User, "https://example.com/image.png", "assistant"),
+ ], "assistant");
+
+ Func action = async () => await agent.GenerateReplyAsync([multiModalMessage]);
+
+ await action.Should().ThrowAsync().WithMessage("Invalid message type: MultiModalMessage");
+ }
+
+ [Fact]
+ public async Task ItProcessToolCallMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (AssistantChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.ParticipantName.Should().Be("assistant");
+ chatRequestMessage.ToolCalls.Count().Should().Be(1);
+ chatRequestMessage.Content.First().Text.Should().Be("textContent");
+ chatRequestMessage.ToolCalls.First().Should().BeOfType();
+ var functionToolCall = (ChatToolCall)chatRequestMessage.ToolCalls.First();
+ functionToolCall.FunctionName.Should().Be("test");
+ functionToolCall.Id.Should().Be("test");
+ functionToolCall.FunctionArguments.Should().Be("test");
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ IMessage message = new ToolCallMessage("test", "test", "assistant")
+ {
+ Content = "textContent",
+ };
+ await agent.GenerateReplyAsync([message]);
+ }
+
+ [Fact]
+ public async Task ItProcessParallelToolCallMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (AssistantChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Content.Should().BeNullOrEmpty();
+ chatRequestMessage.ParticipantName.Should().Be("assistant");
+ chatRequestMessage.ToolCalls.Count().Should().Be(2);
+ for (int i = 0; i < chatRequestMessage.ToolCalls.Count(); i++)
+ {
+ chatRequestMessage.ToolCalls.ElementAt(i).Should().BeOfType();
+ var functionToolCall = (ChatToolCall)chatRequestMessage.ToolCalls.ElementAt(i);
+ functionToolCall.FunctionName.Should().Be("test");
+ functionToolCall.Id.Should().Be($"test_{i}");
+ functionToolCall.FunctionArguments.Should().Be("test");
+ }
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ var toolCalls = new[]
+ {
+ new ToolCall("test", "test"),
+ new ToolCall("test", "test"),
+ };
+ IMessage message = new ToolCallMessage(toolCalls, "assistant");
+ await agent.GenerateReplyAsync([message]);
+ }
+
+ [Fact]
+ public async Task ItThrowExceptionWhenProcessingToolCallMessageFromUserAndStrictModeIsTrueAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector(strictMode: true);
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(middleware);
+
+ var toolCallMessage = new ToolCallMessage("test", "test", "user");
+ Func action = async () => await agent.GenerateReplyAsync([toolCallMessage]);
+ await action.Should().ThrowAsync().WithMessage("Invalid message type: ToolCallMessage");
+ }
+
+ [Fact]
+ public async Task ItProcessToolCallResultMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (ToolChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Content.First().Text.Should().Be("result");
+ chatRequestMessage.ToolCallId.Should().Be("test");
+
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ IMessage message = new ToolCallResultMessage("result", "test", "test", "user");
+ await agent.GenerateReplyAsync([message]);
+ }
+
+ [Fact]
+ public async Task ItProcessParallelToolCallResultMessageAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ msgs.Count().Should().Be(2);
+
+ for (int i = 0; i < msgs.Count(); i++)
+ {
+ var innerMessage = msgs.ElementAt(i);
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (ToolChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Content.First().Text.Should().Be("result");
+ chatRequestMessage.ToolCallId.Should().Be($"test_{i}");
+ }
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ var toolCalls = new[]
+ {
+ new ToolCall("test", "test", "result"),
+ new ToolCall("test", "test", "result"),
+ };
+ IMessage message = new ToolCallResultMessage(toolCalls, "user");
+ await agent.GenerateReplyAsync([message]);
+ }
+
+ [Fact]
+ public async Task ItProcessFunctionCallMiddlewareMessageFromUserAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ msgs.Count().Should().Be(1);
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (UserChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Content.First().Text.Should().Be("result");
+ chatRequestMessage.ParticipantName.Should().Be("user");
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ var toolCallMessage = new ToolCallMessage("test", "test", "user");
+ var toolCallResultMessage = new ToolCallResultMessage("result", "test", "test", "user");
+ var aggregateMessage = new AggregateMessage(toolCallMessage, toolCallResultMessage, "user");
+ await agent.GenerateReplyAsync([aggregateMessage]);
+ }
+
+ [Fact]
+ public async Task ItProcessFunctionCallMiddlewareMessageFromAssistantAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ msgs.Count().Should().Be(2);
+ var innerMessage = msgs.Last();
+ innerMessage!.Should().BeOfType>();
+ var chatRequestMessage = (ToolChatMessage)((MessageEnvelope)innerMessage!).Content;
+ chatRequestMessage.Content.First().Text.Should().Be("result");
+ chatRequestMessage.ToolCallId.Should().Be("test");
+
+ var toolCallMessage = msgs.First();
+ toolCallMessage!.Should().BeOfType>();
+ var toolCallRequestMessage = (AssistantChatMessage)((MessageEnvelope)toolCallMessage!).Content;
+ toolCallRequestMessage.Content.Should().BeNullOrEmpty();
+ toolCallRequestMessage.ToolCalls.Count().Should().Be(1);
+ toolCallRequestMessage.ToolCalls.First().Should().BeOfType();
+ var functionToolCall = (ChatToolCall)toolCallRequestMessage.ToolCalls.First();
+ functionToolCall.FunctionName.Should().Be("test");
+ functionToolCall.Id.Should().Be("test");
+ functionToolCall.FunctionArguments.Should().Be("test");
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ var toolCallMessage = new ToolCallMessage("test", "test", "assistant");
+ var toolCallResultMessage = new ToolCallResultMessage("result", "test", "test", "assistant");
+ var aggregateMessage = new ToolCallAggregateMessage(toolCallMessage, toolCallResultMessage, "assistant");
+ await agent.GenerateReplyAsync([aggregateMessage]);
+ }
+
+ [Fact]
+ public async Task ItProcessParallelFunctionCallMiddlewareMessageFromAssistantAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(async (msgs, _, innerAgent, _) =>
+ {
+ msgs.Count().Should().Be(3);
+ var toolCallMessage = msgs.First();
+ toolCallMessage!.Should().BeOfType>();
+ var toolCallRequestMessage = (AssistantChatMessage)((MessageEnvelope)toolCallMessage!).Content;
+ toolCallRequestMessage.Content.Should().BeNullOrEmpty();
+ toolCallRequestMessage.ToolCalls.Count().Should().Be(2);
+
+ for (int i = 0; i < toolCallRequestMessage.ToolCalls.Count(); i++)
+ {
+ toolCallRequestMessage.ToolCalls.ElementAt(i).Should().BeOfType();
+ var functionToolCall = (ChatToolCall)toolCallRequestMessage.ToolCalls.ElementAt(i);
+ functionToolCall.FunctionName.Should().Be("test");
+ functionToolCall.Id.Should().Be($"test_{i}");
+ functionToolCall.FunctionArguments.Should().Be("test");
+ }
+
+ for (int i = 1; i < msgs.Count(); i++)
+ {
+ var toolCallResultMessage = msgs.ElementAt(i);
+ toolCallResultMessage!.Should().BeOfType>();
+ var toolCallResultRequestMessage = (ToolChatMessage)((MessageEnvelope)toolCallResultMessage!).Content;
+ toolCallResultRequestMessage.Content.First().Text.Should().Be("result");
+ toolCallResultRequestMessage.ToolCallId.Should().Be($"test_{i - 1}");
+ }
+
+ return await innerAgent.GenerateReplyAsync(msgs);
+ })
+ .RegisterMiddleware(middleware);
+
+ // user message
+ var toolCalls = new[]
+ {
+ new ToolCall("test", "test", "result"),
+ new ToolCall("test", "test", "result"),
+ };
+ var toolCallMessage = new ToolCallMessage(toolCalls, "assistant");
+ var toolCallResultMessage = new ToolCallResultMessage(toolCalls, "assistant");
+ var aggregateMessage = new AggregateMessage(toolCallMessage, toolCallResultMessage, "assistant");
+ await agent.GenerateReplyAsync([aggregateMessage]);
+ }
+
+ [Fact]
+ public async Task ItReturnOriginalMessageWhenStrictModeIsFalseAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector();
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(middleware);
+
+ // text message
+ var textMessage = "hello";
+ var messageToSend = MessageEnvelope.Create(textMessage);
+
+ var message = await agent.GenerateReplyAsync([messageToSend]);
+ message.Should().BeOfType>();
+ }
+
+ [Fact]
+ public async Task ItThrowInvalidOperationExceptionWhenStrictModeIsTrueAsync()
+ {
+ var middleware = new OpenAIChatRequestMessageConnector(true);
+ var agent = new EchoAgent("assistant")
+ .RegisterMiddleware(middleware);
+
+ // text message
+ var textMessage = new UserChatMessage("hello");
+ var messageToSend = MessageEnvelope.Create(textMessage);
+ Func action = async () => await agent.GenerateReplyAsync([messageToSend]);
+
+ await action.Should().ThrowAsync().WithMessage("Invalid return message type MessageEnvelope`1");
+ }
+
+ [Fact]
+ public void ToOpenAIChatRequestMessageShortCircuitTest()
+ {
+ var agent = new EchoAgent("assistant");
+ var middleware = new OpenAIChatRequestMessageConnector();
+#pragma warning disable CS0618 // Type or member is obsolete
+ ChatMessage[] messages =
+ [
+ new UserChatMessage("Hello"),
+ new AssistantChatMessage("How can I help you?"),
+ new SystemChatMessage("You are a helpful AI assistant"),
+ new FunctionChatMessage("functionName", "result"),
+ new ToolChatMessage("test", "test"),
+ ];
+#pragma warning restore CS0618 // Type or member is obsolete
+
+ foreach (var oaiMessage in messages)
+ {
+ IMessage message = new MessageEnvelope(oaiMessage);
+ var oaiMessages = middleware.ProcessIncomingMessages(agent, [message]);
+ oaiMessages.Count().Should().Be(1);
+ //oaiMessages.First().Should().BeOfType>();
+ if (oaiMessages.First() is IMessage chatRequestMessage)
+ {
+ chatRequestMessage.Content.Should().Be(oaiMessage);
+ }
+ else
+ {
+ // fail the test
+ Assert.True(false);
+ }
+ }
+ }
+ private void VerifyOAIMessages(IEnumerable<(IMessage, IEnumerable)> messages)
+ {
+ var jsonObjects = messages.Select(pair =>
+ {
+ var (originalMessage, ms) = pair;
+ var objs = new List