diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/AI.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/AI.cs
index 96a9fdd98..e87693f25 100644
--- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/AI.cs
+++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/AI.cs
@@ -14,7 +14,7 @@ namespace Microsoft.Teams.AI.AI
///
///
/// The AI system is responsible for generating plans, moderating input and output, and
- /// generating prompts. It can be used free standing or routed to by the Application object.
+ /// generating prompts. It can be used free standing or routed to by the object.
///
/// Optional. Type of the turn state.
public class AI where TState : TurnState
@@ -65,7 +65,7 @@ public AI(AIOptions options, ILoggerFactory? loggerFactory = null)
/// Registers a handler for a named action.
///
///
- /// The AI systems planner returns plans that are made up of a series of commands or actions
+ /// The AI system's planner returns plans that are made up of a series of commands or actions
/// that should be performed. Registering a handler lets you provide code that should be run in
/// response to one of the predicted actions.
///
@@ -112,7 +112,7 @@ public AI RegisterAction(string name, IActionHandler handler)
/// Registers the default handler for a named action.
///
///
- /// Default handlers can be replaced by calling the RegisterAction() method with the same name.
+ /// Default handlers can be replaced by calling the method with the same name.
///
/// The name of the action.
/// The action handler function.
diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClient.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClient.cs
index f633cc48b..9ce0444d2 100644
--- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClient.cs
+++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClient.cs
@@ -13,35 +13,35 @@ namespace Microsoft.Teams.AI.AI.Clients
/// LLMClient class that's used to complete prompts.
///
///
- /// Each wave, at a minimum needs to be configured with a `client`, `prompt`, and `prompt_options`.
+ /// Each LLMClient, at a minimum needs to be configured with a and .
///
- /// Configuring the wave to use a `validator` is optional but recommended. The primary benefit to
+ /// Configuring the LLMClient to use a is optional but recommended. The primary benefit to
/// using LLMClient is it's response validation and automatic response repair features. The
/// validator acts as guard and guarantees that you never get an malformed response back from the
/// model. At least not without it being flagged as an `invalid_response`.
///
- /// Using the `JsonResponseValidator`, for example, guarantees that you only ever get a valid
- /// object back from `CompletePromptAsync()`. In fact, you'll get back a fully parsed object and any
- /// additional response text from the model will be dropped. If you give the `JsonResponseValidator`
+ /// Using the , for example, guarantees that you only ever get a valid
+ /// object back from . In fact, you'll get back a fully parsed object and any
+ /// additional response text from the model will be dropped. If you give the
/// a JSON Schema, you will get back a strongly typed and validated instance of an object in
/// the returned `response.message.content`.
///
/// When a validator detects a bad response from the model, it gives the model "feedback" as to the
/// problem it detected with its response and more importantly an instruction that tells the model
- /// how it should repair the problem. This puts the wave into a special repair mode where it first
+ /// how it should repair the problem. This puts the LLMClient into a special repair mode where it first
/// forks the memory for the conversation and then has a side conversation with the model in an
/// effort to get it to repair its response. By forking the conversation, this isolates the bad
/// response and prevents it from contaminating the main conversation history. If the response can
- /// be repaired, the wave will un-fork the memory and use the repaired response in place of the
+ /// be repaired, the LLMClient will un-fork the memory and use the repaired response in place of the
/// original bad response. To the model it's as if it never made a mistake which is important for
/// future turns with the model. If the response can't be repaired, a response status of
/// `invalid_response` will be returned.
///
- /// When using a well designed validator, like the `JsonResponseValidator`, the wave can typically
+ /// When using a well designed validator, like the , the LLMClient can typically
/// repair a bad response in a single additional model call. Sometimes it takes a couple of calls
/// to effect a repair and occasionally it won't be able to repair it at all. If your prompt is
/// well designed and you only occasionally see failed repair attempts, I'd recommend just calling
- /// the wave a second time. Given the stochastic nature of these models, there's a decent chance
+ /// the LLMClient a second time. Given the stochastic nature of these models, there's a decent chance
/// it won't make the same mistake on the second call. A well designed prompt coupled with a well
/// designed validator should get the reliability of calling these models somewhere close to 99%
/// reliable.
@@ -49,13 +49,6 @@ namespace Microsoft.Teams.AI.AI.Clients
/// This "feedback" technique works with all the GPT-3 generation of models and I've tested it with
/// `text-davinci-003`, `gpt-3.5-turbo`, and `gpt-4`. There's a good chance it will work with other
/// open source models like `LLaMA` and Googles `Bard` but I have yet to test it with those models.
- ///
- /// LLMClient supports OpenAI's functions feature and can validate the models response against the
- /// schema for the supported functions. When an LLMClient is configured with both a `OpenAIModel`
- /// and a `FunctionResponseValidator`, the model will be cloned and configured to send the
- /// validators configured list of functions with the request. There's no need to separately
- /// configure the models `functions` list, but if you do, the models functions list will be sent
- /// instead.
///
///
/// Type of message content returned for a 'success' response. The `response.message.content` field will be of type TContent.
@@ -120,7 +113,7 @@ public void AddFunctionResultToHistory(IMemory memory, string name, object resul
/// conversation history and formatted like `{ role: 'user', content: input }`.
///
/// It's important to note that if you want the users input sent to the model as part of the
- /// prompt, you will need to add a `UserMessageSection` to your prompt. The wave does not do
+ /// prompt, you will need to add a `UserMessageSection` to your prompt. The LLMClient does not do
/// anything to modify your prompt, except when performing repairs and those changes are
/// temporary.
///
diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlanner.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlanner.cs
index 85acae3e6..a95cd6727 100644
--- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlanner.cs
+++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlanner.cs
@@ -14,15 +14,15 @@ namespace Microsoft.Teams.AI.AI.Planners
/// The ActionPlanner is a powerful planner that uses a LLM to generate plans. The planner can
/// trigger parameterized actions and send text based responses to the user. The ActionPlanner
/// supports the following advanced features:
- /// - //////Augmentations:////// Augmentations virtually eliminate the need for prompt engineering. Prompts
+ /// - Augmentations: Augmentations virtually eliminate the need for prompt engineering. Prompts
/// can be configured to use a named augmentation which will be automatically appended to the outgoing
/// prompt. Augmentations let the developer specify whether they want to support multi-step plans (sequence),
/// use OpenAI's functions support (functions), or create an AutoGPT style agent (monologue).
- /// - //////Validations:////// Validators are used to validate the response returned by the LLM and can guarantee
+ /// - Validations: Validators are used to validate the response returned by the LLM and can guarantee
/// that the parameters passed to an action match a supplied schema. The validator used is automatically
/// selected based on the augmentation being used. Validators also prevent hallucinated action names
/// making it impossible for the LLM to trigger an action that doesn't exist.
- /// - //////Repair:////// The ActionPlanner will automatically attempt to repair invalid responses returned by the
+ /// - Repair: The ActionPlanner will automatically attempt to repair invalid responses returned by the
/// LLM using a feedback loop. When a validation fails, the ActionPlanner sends the error back to the
/// model, along with an instruction asking it to fix its mistake. This feedback technique leads to a
/// dramatic reduction in the number of invalid responses returned by the model.
diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlannerOptions.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlannerOptions.cs
index 5783451d2..042258b29 100644
--- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlannerOptions.cs
+++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlannerOptions.cs
@@ -44,7 +44,7 @@ public class ActionPlannerOptions where TState : TurnState, IMemory
/// tokenizer to use.
///
///
- /// If not specified, a new `GPTTokenizer` instance will be created.
+ /// If not specified, a new instance will be created.
///
public ITokenizer Tokenizer { get; set; } = new GPTTokenizer();
diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/ApplicationOptions.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/ApplicationOptions.cs
index a88e8437f..39395883c 100644
--- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/ApplicationOptions.cs
+++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/ApplicationOptions.cs
@@ -16,7 +16,7 @@ public class ApplicationOptions
/// Optional. Bot adapter being used.
///
///
- /// If using the LongRunningMessages option or calling the ContinueConversationAsync method, this property is required.
+ /// If using the option, calling the method, or configuring user authentication, this property is required.
///
public BotAdapter? Adapter { get; set; }
@@ -24,7 +24,7 @@ public class ApplicationOptions
/// Optional. Application ID of the bot.
///
///
- /// If using the option or calling the method, this property is required.
+ /// If using the option, calling the method, or configuring user authentication, this property is required.
///
public string? BotAppId { get; set; }
diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Microsoft.Teams.AI.csproj b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Microsoft.Teams.AI.csproj
index ab2a8be0a..d25e91320 100644
--- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Microsoft.Teams.AI.csproj
+++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Microsoft.Teams.AI.csproj
@@ -7,7 +7,7 @@
enable
Microsoft.Teams.AI
Microsoft Teams AI SDK
- 1.0.0
+ 1.0.1
Microsoft
Microsoft
© Microsoft Corporation. All rights reserved.