Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 8 additions & 4 deletions async-openai/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ mod test {
}

async fn dynamic_dispatch_compiles(client: &Client<Box<dyn Config>>) {
let _ = client.chat().create(CreateChatCompletionRequest {
std::mem::drop(client.chat().create(CreateChatCompletionRequest {
model: "gpt-4o".to_string(),
messages: vec![ChatCompletionRequestMessage::User(
ChatCompletionRequestUserMessage {
Expand All @@ -344,7 +344,7 @@ mod test {
},
)],
..Default::default()
});
}));
}

#[tokio::test]
Expand All @@ -358,7 +358,11 @@ mod test {
let _ = dynamic_dispatch_compiles(&azure_client).await;
let _ = dynamic_dispatch_compiles(&oai_client).await;

let _ = tokio::spawn(async move { dynamic_dispatch_compiles(&azure_client).await });
let _ = tokio::spawn(async move { dynamic_dispatch_compiles(&oai_client).await });
std::mem::drop(tokio::spawn(async move {
dynamic_dispatch_compiles(&azure_client).await
}));
std::mem::drop(tokio::spawn(async move {
dynamic_dispatch_compiles(&oai_client).await
}));
}
}
25 changes: 25 additions & 0 deletions async-openai/src/types/chat/chat_.rs
Original file line number Diff line number Diff line change
Expand Up @@ -440,6 +440,9 @@ pub struct ChatCompletionResponseMessage {
/// The contents of the message.
#[serde(skip_serializing_if = "Option::is_none")]
pub content: Option<String>,
/// Provider-specific reasoning content (for example BigModel `reasoning_content`).
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_content: Option<String>,
/// The refusal message generated by the model.
#[serde(skip_serializing_if = "Option::is_none")]
pub refusal: Option<String>,
Expand Down Expand Up @@ -736,6 +739,21 @@ pub struct ChatCompletionAudio {
pub format: ChatCompletionAudioFormat,
}

#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum ChatThinkingType {
Enabled,
Disabled,
}

#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
pub struct ChatThinking {
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
pub r#type: Option<ChatThinkingType>,
#[serde(skip_serializing_if = "Option::is_none")]
pub clear_thinking: Option<bool>,
}

#[derive(Clone, Serialize, Default, Debug, Builder, Deserialize, PartialEq)]
#[builder(name = "CreateChatCompletionRequestArgs")]
#[builder(pattern = "mutable")]
Expand Down Expand Up @@ -784,6 +802,10 @@ pub struct CreateChatCompletionRequest {
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffort>,

/// Provider-specific thinking controls (for example BigModel `thinking`).
#[serde(skip_serializing_if = "Option::is_none")]
pub thinking: Option<ChatThinking>,

/// An upper bound for the number of tokens that can be generated for a completion, including
/// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
#[serde(skip_serializing_if = "Option::is_none")]
Expand Down Expand Up @@ -1138,6 +1160,9 @@ pub enum FunctionType {
pub struct ChatCompletionStreamResponseDelta {
/// The contents of the chunk message.
pub content: Option<String>,
/// Provider-specific reasoning delta (for example BigModel `reasoning_content`).
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_content: Option<String>,
/// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.
#[deprecated]
pub function_call: Option<FunctionCallStream>,
Expand Down