diff --git a/mcp/src/aieo/src/provider.ts b/mcp/src/aieo/src/provider.ts index fa9d1d53d..b54ccb118 100644 --- a/mcp/src/aieo/src/provider.ts +++ b/mcp/src/aieo/src/provider.ts @@ -356,9 +356,23 @@ export function getTokenPricing(provider: Provider): TokenPricing { export type ThinkingSpeed = "thinking" | "fast"; +// Anthropic models that do NOT support `adaptive` thinking. Adaptive is only +// available on certain Sonnet/Opus tiers; Haiku and older models reject it +// with `invalid_request_error: adaptive thinking is not supported on this model`. +function anthropicSupportsAdaptiveThinking(modelName?: string): boolean { + if (!modelName) return false; + const m = modelName.toLowerCase(); + // Haiku family does not support adaptive thinking. + if (m.includes("haiku")) return false; + // Conservatively only enable adaptive thinking for known-supported families. + // Sonnet 4.5+ and Opus 4.5+ support it. + return m.includes("sonnet") || m.includes("opus"); +} + export function getProviderOptions( provider: Provider, - thinkingSpeed?: ThinkingSpeed + thinkingSpeed?: ThinkingSpeed, + modelName?: string ) { const fast = thinkingSpeed === "fast"; const explicitThinking = thinkingSpeed === "thinking"; @@ -372,9 +386,12 @@ export function getProviderOptions( thinking = { type: "disabled" }; } else if (explicitThinking) { thinking = { type: "enabled", budgetTokens: 24000 }; - } else { - // Default: let the model decide, with summarized thinking output. + } else if (anthropicSupportsAdaptiveThinking(modelName)) { + // Default for capable models: let the model decide, with summarized thinking output. thinking = { type: "adaptive", display: "summarized" }; + } else { + // Models that don't support adaptive thinking (e.g. Haiku): disable it. + thinking = { type: "disabled" }; } return { anthropic: { diff --git a/mcp/src/aieo/src/stream.ts b/mcp/src/aieo/src/stream.ts index 6ead0b04c..dabc450c6 100644 --- a/mcp/src/aieo/src/stream.ts +++ b/mcp/src/aieo/src/stream.ts @@ -47,7 +47,7 @@ export async function callModel(opts: CallModelOptions): Promise<{ executablePath, modelName, }); - const providerOptions = getProviderOptions(provider, thinkingSpeed); + const providerOptions = getProviderOptions(provider, thinkingSpeed, modelName); console.log(`Calling ${provider} with options:`, providerOptions); const result = streamText({ model, diff --git a/mcp/src/gitsee/agent/explore.ts b/mcp/src/gitsee/agent/explore.ts index 20aa95733..230e3da7c 100644 --- a/mcp/src/gitsee/agent/explore.ts +++ b/mcp/src/gitsee/agent/explore.ts @@ -157,7 +157,7 @@ export async function gitsee_context( tools, prompt, system: overrides?.system_prompt || CONF.system, - providerOptions: getProviderOptions(llm.provider) as any, + providerOptions: getProviderOptions(llm.provider, undefined, llm.modelName) as any, stopWhen: hasToolCall("final_answer"), onStepFinish: (sf) => logStep(sf.content), }); diff --git a/mcp/src/graph/learnings.ts b/mcp/src/graph/learnings.ts index aec9eaac3..144741086 100644 --- a/mcp/src/graph/learnings.ts +++ b/mcp/src/graph/learnings.ts @@ -91,7 +91,7 @@ export async function post_relevant_learnings(req: Request, res: Response) { try { const llm = resolveLLMConfig({ model: req.body.model, apiKey: req.body.apiKey, light: true }); const model = llm.model; - const providerOptions = getProviderOptions(llm.provider); + const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName); // 1. List all scopes const allScopes = await db.get_all_scopes(); diff --git a/mcp/src/log/agent.ts b/mcp/src/log/agent.ts index dc5cc31dc..489137f88 100644 --- a/mcp/src/log/agent.ts +++ b/mcp/src/log/agent.ts @@ -59,7 +59,7 @@ export async function log_agent_context( opts: LogAgentOptions ): Promise { const startTime = Date.now(); - const { model, provider } = getModelDetails(opts.modelName, opts.apiKey); + const { model, provider, modelId } = getModelDetails(opts.modelName, opts.apiKey); console.log("===> log_agent model", model); const tools = get_log_tools({ @@ -83,7 +83,7 @@ export async function log_agent_context( model, instructions: SYSTEM, tools, - providerOptions: getProviderOptions(provider) as any, + providerOptions: getProviderOptions(provider, undefined, modelId) as any, stopWhen: hasEndMarker, stopSequences: ["[END_OF_ANSWER]"], onStepFinish: (sf) => { diff --git a/mcp/src/repo/agent.ts b/mcp/src/repo/agent.ts index ce3887cf2..cb9d858fb 100644 --- a/mcp/src/repo/agent.ts +++ b/mcp/src/repo/agent.ts @@ -446,8 +446,8 @@ Apply the guidance from each skill throughout your response.`; /** Build the generate/stream call params from the prepared agent state. */ function buildCallParams(prepared: PreparedAgent) { - const { finalPrompt, previousMessages, userMessage, provider, abortSignal } = prepared; - const providerOptions = getProviderOptions(provider as any); + const { finalPrompt, previousMessages, userMessage, provider, modelId, abortSignal } = prepared; + const providerOptions = getProviderOptions(provider as any, undefined, modelId); const base = abortSignal ? { providerOptions, abortSignal } : { providerOptions }; if (previousMessages.length > 0) { const messagesToSend = diff --git a/mcp/src/repo/descriptions.ts b/mcp/src/repo/descriptions.ts index f08caf1c1..65fd2876b 100644 --- a/mcp/src/repo/descriptions.ts +++ b/mcp/src/repo/descriptions.ts @@ -119,7 +119,7 @@ export const describe_nodes_agent = async (req: Request, res: Response) => { let totalProcessed = 0; let totalUsage = emptyUsage(); const model = llm.model; - const providerOptions = getProviderOptions(llm.provider); + const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName); // Loop until cost limit reached or no more nodes while (true) { diff --git a/mcp/src/repo/docs.ts b/mcp/src/repo/docs.ts index 3e7c445a4..a753508a6 100644 --- a/mcp/src/repo/docs.ts +++ b/mcp/src/repo/docs.ts @@ -12,7 +12,7 @@ export async function learn_docs_agent(req: Request, res: Response) { const reqApiKey = (req.query.apiKey || req.body?.apiKey) as string | undefined; const llm = resolveLLMConfig({ model: reqModel, apiKey: reqApiKey }); const model = llm.model; - const providerOptions = getProviderOptions(llm.provider); + const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName); try { const allRepos = await db.get_repositories(); diff --git a/mcp/src/repo/workflows.ts b/mcp/src/repo/workflows.ts index 75692beb1..a4d2c5632 100644 --- a/mcp/src/repo/workflows.ts +++ b/mcp/src/repo/workflows.ts @@ -30,7 +30,7 @@ export async function document_workflow(req: Request, res: Response) { res.json({ request_id, status: 'pending' }); const llm = resolveLLMConfig({ model: req.body.model, apiKey: req.body.apiKey }); - const providerOptions = getProviderOptions(llm.provider); + const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName); (async () => { try { const result = await generateText({ model: llm.model, prompt: buildPrompt(workflow.workflow_json || JSON.stringify(workflow)), providerOptions: providerOptions as any }); @@ -52,7 +52,7 @@ export async function document_workflows(req: Request, res: Response) { const llm = resolveLLMConfig({ model: req.body.model, apiKey: req.body.apiKey }); const model = llm.model; - const providerOptions = getProviderOptions(llm.provider); + const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName); (async () => { try { const workflows = await db.get_all_workflows(); diff --git a/mcp/src/tools/explore/tool.ts b/mcp/src/tools/explore/tool.ts index 18cea1115..240230f46 100644 --- a/mcp/src/tools/explore/tool.ts +++ b/mcp/src/tools/explore/tool.ts @@ -174,7 +174,7 @@ export async function get_context_explore( tools, prompt, system, - providerOptions: getProviderOptions(llm.provider) as any, + providerOptions: getProviderOptions(llm.provider, undefined, llm.modelName) as any, stopWhen: hasToolCall("final_answer"), onStepFinish: (sf) => { // console.log("step", JSON.stringify(sf.content, null, 2));