diff --git a/async-openai/src/config.rs b/async-openai/src/config.rs index e654f02a..5dd5a982 100644 --- a/async-openai/src/config.rs +++ b/async-openai/src/config.rs @@ -335,7 +335,7 @@ mod test { } async fn dynamic_dispatch_compiles(client: &Client>) { - let _ = client.chat().create(CreateChatCompletionRequest { + std::mem::drop(client.chat().create(CreateChatCompletionRequest { model: "gpt-4o".to_string(), messages: vec![ChatCompletionRequestMessage::User( ChatCompletionRequestUserMessage { @@ -344,7 +344,7 @@ mod test { }, )], ..Default::default() - }); + })); } #[tokio::test] @@ -358,7 +358,11 @@ mod test { let _ = dynamic_dispatch_compiles(&azure_client).await; let _ = dynamic_dispatch_compiles(&oai_client).await; - let _ = tokio::spawn(async move { dynamic_dispatch_compiles(&azure_client).await }); - let _ = tokio::spawn(async move { dynamic_dispatch_compiles(&oai_client).await }); + std::mem::drop(tokio::spawn(async move { + dynamic_dispatch_compiles(&azure_client).await + })); + std::mem::drop(tokio::spawn(async move { + dynamic_dispatch_compiles(&oai_client).await + })); } } diff --git a/async-openai/src/types/chat/chat_.rs b/async-openai/src/types/chat/chat_.rs index 9da8368b..d2af6f47 100644 --- a/async-openai/src/types/chat/chat_.rs +++ b/async-openai/src/types/chat/chat_.rs @@ -440,6 +440,9 @@ pub struct ChatCompletionResponseMessage { /// The contents of the message. #[serde(skip_serializing_if = "Option::is_none")] pub content: Option, + /// Provider-specific reasoning content (for example BigModel `reasoning_content`). + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning_content: Option, /// The refusal message generated by the model. #[serde(skip_serializing_if = "Option::is_none")] pub refusal: Option, @@ -736,6 +739,21 @@ pub struct ChatCompletionAudio { pub format: ChatCompletionAudioFormat, } +#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum ChatThinkingType { + Enabled, + Disabled, +} + +#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] +pub struct ChatThinking { + #[serde(rename = "type", skip_serializing_if = "Option::is_none")] + pub r#type: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub clear_thinking: Option, +} + #[derive(Clone, Serialize, Default, Debug, Builder, Deserialize, PartialEq)] #[builder(name = "CreateChatCompletionRequestArgs")] #[builder(pattern = "mutable")] @@ -784,6 +802,10 @@ pub struct CreateChatCompletionRequest { #[serde(skip_serializing_if = "Option::is_none")] pub reasoning_effort: Option, + /// Provider-specific thinking controls (for example BigModel `thinking`). + #[serde(skip_serializing_if = "Option::is_none")] + pub thinking: Option, + /// An upper bound for the number of tokens that can be generated for a completion, including /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). #[serde(skip_serializing_if = "Option::is_none")] @@ -1138,6 +1160,9 @@ pub enum FunctionType { pub struct ChatCompletionStreamResponseDelta { /// The contents of the chunk message. pub content: Option, + /// Provider-specific reasoning delta (for example BigModel `reasoning_content`). + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning_content: Option, /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. #[deprecated] pub function_call: Option,