Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions crates/coverage-report/src/requests_expected_differences.json
Original file line number Diff line number Diff line change
Expand Up @@ -654,6 +654,28 @@
"skip": true,
"reason": "Google code_execution is provider-specific and is not a lossless equivalent of Anthropic bash"
},
{
"testCase": "codeInterpreterToolParam",
"source": "Google",
"target": "ChatCompletions",
"fields": [
{ "pattern": "params.tools", "reason": "Google code_execution has no OpenAI Chat Completions equivalent and is dropped" },
{ "pattern": "messages[*].content.length", "reason": "Google executableCode/codeExecutionResult followup content does not roundtrip through Chat Completions" }
]
},
{
"testCase": "codeInterpreterToolParam",
"source": "Google",
"target": "Responses",
"fields": [
{ "pattern": "params.tools[*].builtin_type", "reason": "Google code_execution is canonicalized to the OpenAI Responses code_interpreter builtin type" },
{ "pattern": "params.tools[*].name", "reason": "Google code_execution is canonicalized to the OpenAI Responses code_interpreter builtin name" },
{ "pattern": "params.tools[*].provider", "reason": "Google code_execution is canonicalized from a Google builtin to a Responses builtin" },
{ "pattern": "params.tools[*].config.type", "reason": "Google code_execution is normalized to OpenAI code_interpreter config" },
{ "pattern": "params.tools[*].config.container", "reason": "Google code_execution is normalized to OpenAI code_interpreter container config" },
{ "pattern": "messages[*].content[*].provider_options", "reason": "Google code execution metadata is not losslessly representable through Responses message content" }
]
},
{
"testCase": "toolChoiceRequiredWithReasoningParam",
"source": "Google",
Expand Down
64 changes: 64 additions & 0 deletions crates/lingua/src/processing/transform.rs
Original file line number Diff line number Diff line change
Expand Up @@ -758,6 +758,70 @@ mod tests {
assert!(output.get("top_p").is_none(), "Should not have top_p");
}

#[test]
#[cfg(all(feature = "openai", feature = "google"))]
fn test_google_code_execution_maps_to_responses_code_interpreter() {
let payload = json!({
"contents": [{
"role": "user",
"parts": [{"text": "Execute Python code to generate a random number"}]
}],
"tools": [{
"codeExecution": {}
}]
});
let input = to_bytes(&payload);

let result =
transform_request(input, ProviderFormat::Responses, Some("gpt-5-nano")).unwrap();

assert!(!result.is_passthrough());
assert_eq!(result.source_format(), Some(ProviderFormat::Google));

let output: Value = crate::serde_json::from_slice(result.as_bytes()).unwrap();
assert_eq!(output.get("model").unwrap().as_str().unwrap(), "gpt-5-nano");
assert_eq!(
output.get("tools"),
Some(&json!([
{
"type": "code_interpreter",
"container": {
"type": "auto"
}
}
])),
"Google codeExecution should map to Responses code_interpreter"
);
}

#[test]
#[cfg(all(feature = "openai", feature = "google"))]
fn test_google_code_execution_is_stripped_for_chat_requests() {
let payload = json!({
"contents": [{
"role": "user",
"parts": [{"text": "Execute Python code to generate a random number"}]
}],
"tools": [{
"codeExecution": {}
}]
});
let input = to_bytes(&payload);

let result =
transform_request(input, ProviderFormat::ChatCompletions, Some("gpt-5-nano")).unwrap();

assert!(!result.is_passthrough());
assert_eq!(result.source_format(), Some(ProviderFormat::Google));

let output: Value = crate::serde_json::from_slice(result.as_bytes()).unwrap();
assert_eq!(output.get("model").unwrap().as_str().unwrap(), "gpt-5-nano");
assert!(
output.get("tools").is_none(),
"Google codeExecution should be stripped for Chat Completions"
);
}

#[test]
#[cfg(feature = "openai")]
fn test_non_reasoning_model_still_passthroughs() {
Expand Down
95 changes: 88 additions & 7 deletions crates/lingua/src/universal/tools.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ use crate::providers::anthropic::generated::{
};
use crate::providers::google::generated::GoogleSearch;
use crate::providers::openai::generated::{
ApproximateLocation, Tool as OpenAIResponsesTool, UserLocationType as OpenAIUserLocationType,
WebSearchTool,
ApproximateLocation, CodeInterpreterTool, Tool as OpenAIResponsesTool,
UserLocationType as OpenAIUserLocationType, WebSearchTool,
};
use crate::serde_json::{self, json, Map, Value};

Expand Down Expand Up @@ -526,6 +526,28 @@ impl UniversalTool {
error: e.to_string(),
})
}
BuiltinToolProvider::Google if builtin_type == "code_execution" => {
let _google_config =
config
.clone()
.ok_or_else(|| ConvertError::UnsupportedToolType {
tool_name: self.name.clone(),
tool_type: builtin_type.clone(),
target_provider: ProviderFormat::Responses,
})?;

let tool = OpenAIResponsesTool::CodeInterpreter(CodeInterpreterTool {
container: json!({ "type": "auto" }),
});

serde_json::to_value(tool).map_err(|e| ConvertError::JsonSerializationFailed {
field: format!(
"OpenAI responses Google code execution tool conversion for '{}'",
self.name
),
error: e.to_string(),
})
}
BuiltinToolProvider::Anthropic
| BuiltinToolProvider::Converse
| BuiltinToolProvider::Google => Err(ConvertError::UnsupportedToolType {
Expand Down Expand Up @@ -558,7 +580,9 @@ pub fn tools_to_openai_chat_value(tools: &[UniversalTool]) -> Result<Option<Valu
tool_type,
target_provider: ProviderFormat::ChatCompletions,
..
}) if tool_type == "web_search_20250305" || tool_type == "google_search" => {}
}) if tool_type == "web_search_20250305"
|| tool_type == "google_search"
|| tool_type == "code_execution" => {}
Err(err) => return Err(err),
}
}
Expand All @@ -577,10 +601,12 @@ pub fn tools_to_responses_value(tools: &[UniversalTool]) -> Result<Option<Value>
if tools.is_empty() {
return Ok(None);
}
let converted: Vec<Value> = tools
.iter()
.map(|t| t.to_responses_value())
.collect::<Result<Vec<_>, _>>()?;
let mut converted = Vec::new();

for tool in tools {
converted.push(tool.to_responses_value()?);
}

Ok(Some(Value::Array(converted)))
}

Expand Down Expand Up @@ -926,6 +952,19 @@ mod tests {
assert!(result.is_err());
}

#[test]
fn test_batch_conversion_to_openai_chat_drops_google_code_execution() {
let tools = vec![UniversalTool::builtin(
"code_execution",
BuiltinToolProvider::Google,
"code_execution",
Some(json!({})),
)];

let result = tools_to_openai_chat_value(&tools).unwrap();
assert!(result.is_none());
}

#[test]
fn test_anthropic_web_search_to_openai_chat_is_unsupported_without_filters() {
let tool = UniversalTool::builtin(
Expand Down Expand Up @@ -1098,6 +1137,48 @@ mod tests {
}
}

#[test]
fn test_google_code_execution_to_responses() {
let tool = UniversalTool::builtin(
"code_execution",
BuiltinToolProvider::Google,
"code_execution",
Some(json!({})),
);

let typed: OpenAIResponsesTool =
serde_json::from_value(tool.to_responses_value().unwrap()).unwrap();
match typed {
OpenAIResponsesTool::CodeInterpreter(code_interpreter) => {
assert_eq!(code_interpreter.container, json!({ "type": "auto" }));
}
other => panic!("expected code_interpreter tool, got {:?}", other),
}
}

#[test]
fn test_batch_conversion_to_responses_maps_google_code_execution() {
let tools = vec![UniversalTool::builtin(
"code_execution",
BuiltinToolProvider::Google,
"code_execution",
Some(json!({})),
)];

let value = tools_to_responses_value(&tools).unwrap();
assert_eq!(
value,
Some(json!([
{
"type": "code_interpreter",
"container": {
"type": "auto"
}
}
]))
);
}

#[test]
fn test_google_function_declaration_to_openai_tool_parameters_are_object() {
let decl: FunctionDeclaration = serde_json::from_value(json!({
Expand Down
Loading
Loading