Skip to content

Commit

Permalink
OpenAI-DotNet 8.4.1 (#383)
Browse files Browse the repository at this point in the history
- Fix ChatRequest serialization for Azure OpenAI
  • Loading branch information
StephenHodgson authored Nov 15, 2024
1 parent 0d4ee48 commit cabbf88
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 7 deletions.
28 changes: 22 additions & 6 deletions OpenAI-DotNet/Chat/ChatRequest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ public ChatRequest(
}
else
{
Modalities = Modality.Text;
Modalities = Modality.Text & Modality.Audio;
}

FrequencyPenalty = frequencyPenalty;
Expand Down Expand Up @@ -238,12 +238,14 @@ public ChatRequest(
/// Whether or not to store the output of this chat completion request for use in our model distillation or evals products.
/// </summary>
[JsonPropertyName("store")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public bool? Store { get; set; }

/// <summary>
/// Developer-defined tags and values used for filtering completions in the dashboard.
/// </summary>
[JsonPropertyName("metadata")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public IReadOnlyDictionary<string, object> Metadata { get; set; }

/// <summary>
Expand All @@ -253,6 +255,7 @@ public ChatRequest(
/// Defaults to 0
/// </summary>
[JsonPropertyName("frequency_penalty")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public double? FrequencyPenalty { get; }

/// <summary>
Expand All @@ -275,7 +278,7 @@ public ChatRequest(
/// This option is currently not available on the gpt-4-vision-preview model.
/// </remarks>
[JsonPropertyName("logprobs")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public bool? LogProbs { get; }

/// <summary>
Expand All @@ -286,21 +289,21 @@ public ChatRequest(
/// <see cref="LogProbs"/> must be set to true if this parameter is used.
/// </remarks>
[JsonPropertyName("top_logprobs")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public int? TopLogProbs { get; }

/// <summary>
/// The maximum number of tokens allowed for the generated answer.
/// By default, the number of tokens the model can return will be (4096 - prompt tokens).
/// </summary>
[JsonPropertyName("max_tokens")]
[Obsolete("Use MaxCompletionTokens instead")]
public int? MaxTokens { get; }
[JsonIgnore]
public int? MaxTokens => MaxCompletionTokens;

/// <summary>
/// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
/// </summary>
[JsonPropertyName("max_completion_tokens")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public int? MaxCompletionTokens { get; }

/// <summary>
Expand All @@ -312,20 +315,23 @@ public ChatRequest(

[JsonPropertyName("modalities")]
[JsonConverter(typeof(ModalityConverter))]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public Modality Modalities { get; }

/// <summary>
/// Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time.
/// This is most common when you are regenerating a file with only minor changes to most of the content.
/// </summary>
[JsonPropertyName("prediction")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public object Prediction { get; set; }

/// <summary>
/// Parameters for audio output.
/// Required when audio output is requested with modalities: ["audio"].
/// </summary>
[JsonPropertyName("audio")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public AudioConfig AudioConfig { get; }

/// <summary>
Expand All @@ -335,6 +341,7 @@ public ChatRequest(
/// Defaults to 0
/// </summary>
[JsonPropertyName("presence_penalty")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public double? PresencePenalty { get; }

[JsonPropertyName("response_format")]
Expand Down Expand Up @@ -363,6 +370,7 @@ public ChatRequest(
/// monitor changes in the backend.
/// </summary>
[JsonPropertyName("seed")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public int? Seed { get; }

/// <summary>
Expand All @@ -374,12 +382,14 @@ public ChatRequest(
/// When this parameter is set, the response body will include the service_tier utilized.
/// </summary>
[JsonPropertyName("service_tier")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public string ServiceTier { get; set; }

/// <summary>
/// Up to 4 sequences where the API will stop generating further tokens.
/// </summary>
[JsonPropertyName("stop")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string[] Stops { get; }

/// <summary>
Expand All @@ -403,6 +413,7 @@ public ChatRequest(
/// Defaults to 1
/// </summary>
[JsonPropertyName("temperature")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public double? Temperature { get; }

/// <summary>
Expand All @@ -413,13 +424,15 @@ public ChatRequest(
/// Defaults to 1
/// </summary>
[JsonPropertyName("top_p")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public double? TopP { get; }

/// <summary>
/// A list of tools the model may call. Currently, only functions are supported as a tool.
/// Use this to provide a list of functions the model may generate JSON inputs for.
/// </summary>
[JsonPropertyName("tools")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public IReadOnlyList<Tool> Tools { get; }

/// <summary>
Expand All @@ -432,18 +445,21 @@ public ChatRequest(
/// 'auto' is the default if functions are present.<br/>
/// </summary>
[JsonPropertyName("tool_choice")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public dynamic ToolChoice { get; }

/// <summary>
/// Whether to enable parallel function calling during tool use.
/// </summary>
[JsonPropertyName("parallel_tool_calls")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public bool? ParallelToolCalls { get; }

/// <summary>
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// </summary>
[JsonPropertyName("user")]
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public string User { get; }

/// <inheritdoc />
Expand Down
4 changes: 3 additions & 1 deletion OpenAI-DotNet/OpenAI-DotNet.csproj
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,10 @@ More context [on Roger Pincombe's blog](https://rogerpincombe.com/openai-dotnet-
<AssemblyOriginatorKeyFile>OpenAI-DotNet.pfx</AssemblyOriginatorKeyFile>
<IncludeSymbols>true</IncludeSymbols>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
<Version>8.4.0</Version>
<Version>8.4.1</Version>
<PackageReleaseNotes>
Version 8.4.1
- Fix ChatRequest serialization for Azure OpenAI
Version 8.4.0
- Add realtime support
- Added o1, o1-mini, gpt-4o-mini, and gpt-4o-realtime, gpt-4o-audio model convenience properties
Expand Down

0 comments on commit cabbf88

Please sign in to comment.