Skip to content

Commit

Permalink
Align config files schema across tests, service and examples + Config…
Browse files Browse the repository at this point in the history
… fixes (#309)

## Motivation and Context (Why the change? What's the scenario?)

Some functional tests config used a different schema, making it harder
to copy settings from service to examples to tests.
Redis config needs an update given the new tag for partition number.
Redis functional tests checking for error message were not aligned with
latest code.
ElasticSearch config settings were missing in Service appsettings.json

## High level description (Approach, Design)

* Add "KernelMemory" config prefix where missing.
* Update Redis config and Redis settings.
* Add ES default settings.
  • Loading branch information
dluc authored Feb 13, 2024
1 parent f0dd6c8 commit d767d17
Show file tree
Hide file tree
Showing 18 changed files with 524 additions and 461 deletions.
1 change: 1 addition & 0 deletions KernelMemory.sln.DotSettings
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,7 @@ public void It$SOMENAME$()
<s:Boolean x:Key="/Default/UserDictionary/Words/=greaterthan/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/UserDictionary/Words/=hhmmssfffffff/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/UserDictionary/Words/=Hmmss/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/UserDictionary/Words/=HNSW/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/UserDictionary/Words/=inheritdoc/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/UserDictionary/Words/=INPROCESS/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/UserDictionary/Words/=Joinable/@EntryIndexedValue">True</s:Boolean>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,34 +1,36 @@
{
"ServiceAuthorization": {
"Endpoint": "http://127.0.0.1:9001/",
"AccessKey": "",
},
"Services": {
"AzureAISearch": {
// "ApiKey" or "AzureIdentity". For other options see <AzureAISearchConfig>.
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
"Endpoint": "https://<...>",
"APIKey": "",
},
"OpenAI": {
// Name of the model used to generate text (text completion or chat completion)
"TextModel": "gpt-3.5-turbo-16k",
// The max number of tokens supported by the text model.
"TextModelMaxTokenTotal": 16384,
// Name of the model used to generate text embeddings
"EmbeddingModel": "text-embedding-ada-002",
// The max number of tokens supported by the embedding model
// See https://platform.openai.com/docs/guides/embeddings/what-are-embeddings
"EmbeddingModelMaxTokenTotal": 8191,
// OpenAI API Key
"APIKey": "",
// OpenAI Organization ID (usually empty, unless you have multiple accounts on different orgs)
"OrgId": "",
// How many times to retry in case of throttling
"MaxRetries": 10
"KernelMemory": {
"ServiceAuthorization": {
"Endpoint": "http://127.0.0.1:9001/",
"AccessKey": ""
},
"Services": {
"AzureAISearch": {
// "ApiKey" or "AzureIdentity". For other options see <AzureAISearchConfig>.
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
"Endpoint": "https://<...>",
"APIKey": ""
},
"OpenAI": {
// Name of the model used to generate text (text completion or chat completion)
"TextModel": "gpt-3.5-turbo-16k",
// The max number of tokens supported by the text model.
"TextModelMaxTokenTotal": 16384,
// Name of the model used to generate text embeddings
"EmbeddingModel": "text-embedding-ada-002",
// The max number of tokens supported by the embedding model
// See https://platform.openai.com/docs/guides/embeddings/what-are-embeddings
"EmbeddingModelMaxTokenTotal": 8191,
// OpenAI API Key
"APIKey": "",
// OpenAI Organization ID (usually empty, unless you have multiple accounts on different orgs)
"OrgId": "",
// How many times to retry in case of throttling
"MaxRetries": 10
}
}
},
"Logging": {
"LogLevel": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ public AzureAISearchFilteringTest(ITestOutputHelper output) : base(output)
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItRendersEmptyFilters()
{
// Arrange
Expand All @@ -32,6 +34,8 @@ public void ItRendersEmptyFilters()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItRendersSimpleFilter()
{
// Arrange
Expand All @@ -46,6 +50,8 @@ public void ItRendersSimpleFilter()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItRendersSimpleFilters()
{
// Arrange
Expand All @@ -64,6 +70,8 @@ public void ItRendersSimpleFilters()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItRendersUsingSearchIn()
{
// Arrange
Expand All @@ -82,6 +90,8 @@ public void ItRendersUsingSearchIn()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItUsesSearchInWithAlternativeSeparators()
{
// Arrange
Expand All @@ -108,6 +118,8 @@ public void ItUsesSearchInWithAlternativeSeparators()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItHandlesComplexFilters()
{
// Arrange
Expand All @@ -128,6 +140,8 @@ public void ItHandlesComplexFilters()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItHandlesEdgeCase0()
{
// Arrange
Expand All @@ -147,6 +161,8 @@ public void ItHandlesEdgeCase0()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItHandlesEdgeCase1()
{
// Arrange
Expand All @@ -169,6 +185,8 @@ public void ItHandlesEdgeCase1()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItHandlesEdgeCase2()
{
// Arrange
Expand All @@ -191,6 +209,8 @@ public void ItHandlesEdgeCase2()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItHandlesEdgeCase3()
{
// Arrange
Expand All @@ -209,6 +229,8 @@ public void ItHandlesEdgeCase3()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItHandlesEdgeCase4()
{
// Arrange
Expand All @@ -228,6 +250,8 @@ public void ItHandlesEdgeCase4()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItHandlesEdgeCase5()
{
// Arrange
Expand All @@ -246,6 +270,8 @@ public void ItHandlesEdgeCase5()
}

[Fact]
[Trait("Category", "UnitTest")]
[Trait("Category", "AzAISearch")]
public void ItHandlesEdgeCase6()
{
// Arrange
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ public DefaultTests(IConfiguration cfg, ITestOutputHelper output) : base(cfg, ou
{
Assert.False(string.IsNullOrEmpty(this.OpenAiConfig.APIKey));

this._elasticsearchConfig = cfg.GetSection("Services:Elasticsearch").Get<ElasticsearchConfig>()!;
this._elasticsearchConfig = cfg.GetSection("KernelMemory:Services:Elasticsearch").Get<ElasticsearchConfig>()!;

this._memory = new KernelMemoryBuilder()
.WithSearchClientConfig(new SearchClientConfig { EmptyAnswer = NotFound })
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,56 +4,65 @@
"Default": "Information"
}
},
"Services": {
"Elasticsearch": {
"CertificateFingerPrint": "",
"Endpoint": "",
"UserName": "",
"Password": "",
},
"AzureOpenAIText": {
// "ApiKey" or "AzureIdentity"
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
"Endpoint": "https://<...>.openai.azure.com/",
"APIKey": "",
"Deployment": "",
// The max number of tokens supported by model deployed
// See https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models
"MaxTokenTotal": 16384,
// "ChatCompletion" or "TextCompletion"
"APIType": "ChatCompletion",
"MaxRetries": 10
},
"AzureOpenAIEmbedding": {
// "ApiKey" or "AzureIdentity"
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
"Endpoint": "https://<...>.openai.azure.com/",
"APIKey": "",
"Deployment": "",
// The max number of tokens supported by model deployed
// See https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models
"MaxTokenTotal": 8191,
},
"OpenAI": {
// Name of the model used to generate text (text completion or chat completion)
"TextModel": "gpt-3.5-turbo-16k",
// The max number of tokens supported by the text model.
"TextModelMaxTokenTotal": 16384,
// Name of the model used to generate text embeddings
"EmbeddingModel": "text-embedding-ada-002",
// The max number of tokens supported by the embedding model
// See https://platform.openai.com/docs/guides/embeddings/what-are-embeddings
"EmbeddingModelMaxTokenTotal": 8191,
// OpenAI API Key
"APIKey": "",
// OpenAI Organization ID (usually empty, unless you have multiple accounts on different orgs)
"OrgId": "",
// How many times to retry in case of throttling
"MaxRetries": 10
},
"KernelMemory": {
"Services": {
"Elasticsearch": {
// SHA-256 fingerprint. When running the docker image this is printed after starting the server
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-stack-security.html#_use_the_ca_fingerprint_5
"CertificateFingerPrint": "",
// e.g. https://localhost:9200
"Endpoint": "",
// e.g. "elastic"
"UserName": "",
"Password": "",
"IndexPrefix": "",
"ShardCount": 1,
"Replicas": 0
},
"AzureOpenAIText": {
// "ApiKey" or "AzureIdentity"
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
"Endpoint": "https://<...>.openai.azure.com/",
"APIKey": "",
"Deployment": "",
// The max number of tokens supported by model deployed
// See https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models
"MaxTokenTotal": 16384,
// "ChatCompletion" or "TextCompletion"
"APIType": "ChatCompletion",
"MaxRetries": 10
},
"AzureOpenAIEmbedding": {
// "ApiKey" or "AzureIdentity"
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
"Endpoint": "https://<...>.openai.azure.com/",
"APIKey": "",
"Deployment": "",
// The max number of tokens supported by model deployed
// See https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models
"MaxTokenTotal": 8191
},
"OpenAI": {
// Name of the model used to generate text (text completion or chat completion)
"TextModel": "gpt-3.5-turbo-16k",
// The max number of tokens supported by the text model.
"TextModelMaxTokenTotal": 16384,
// Name of the model used to generate text embeddings
"EmbeddingModel": "text-embedding-ada-002",
// The max number of tokens supported by the embedding model
// See https://platform.openai.com/docs/guides/embeddings/what-are-embeddings
"EmbeddingModelMaxTokenTotal": 8191,
// OpenAI API Key
"APIKey": "",
// OpenAI Organization ID (usually empty, unless you have multiple accounts on different orgs)
"OrgId": "",
// How many times to retry in case of throttling
"MaxRetries": 10
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -76,18 +76,13 @@ public async Task ItGeneratesText()
{
// Arrange
var prompt = """
Facts:
The public Kernel Memory project kicked off around May 2023.
Now, in December 2023, we are integrating LLama compatibility
into KM, following the steady addition of numerous features.
By January, we anticipate to complete this update and potentially
introduce more models by February.
Instructions: Reply in JSON.
Question: What's the current month?
# Current date: 12/12/2024.
# Instructions: use JSON syntax.
# Deduction: { "DayOfWeek": "Monday", "MonthName":
""";
var options = new TextGenerationOptions
{
MaxTokens = 30,
MaxTokens = 60,
Temperature = 0,
StopSequences = new List<string> { "Question" }
};
Expand All @@ -106,7 +101,7 @@ introduce more models by February.
var answer = result.ToString();

// Assert
Console.WriteLine($"=============================\n{answer}\n=============================");
Console.WriteLine($"Model Output:\n=============================\n{answer}\n=============================");
Console.WriteLine($"Time: {this._timer.ElapsedMilliseconds / 1000} secs");
Assert.Contains("december", answer, StringComparison.OrdinalIgnoreCase);
}
Expand Down
Loading

0 comments on commit d767d17

Please sign in to comment.