Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 20 additions & 3 deletions mcp/src/aieo/src/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -356,9 +356,23 @@ export function getTokenPricing(provider: Provider): TokenPricing {

export type ThinkingSpeed = "thinking" | "fast";

// Anthropic models that do NOT support `adaptive` thinking. Adaptive is only
// available on certain Sonnet/Opus tiers; Haiku and older models reject it
// with `invalid_request_error: adaptive thinking is not supported on this model`.
function anthropicSupportsAdaptiveThinking(modelName?: string): boolean {
if (!modelName) return false;
const m = modelName.toLowerCase();
// Haiku family does not support adaptive thinking.
if (m.includes("haiku")) return false;
// Conservatively only enable adaptive thinking for known-supported families.
// Sonnet 4.5+ and Opus 4.5+ support it.
return m.includes("sonnet") || m.includes("opus");
}

export function getProviderOptions(
provider: Provider,
thinkingSpeed?: ThinkingSpeed
thinkingSpeed?: ThinkingSpeed,
modelName?: string
) {
const fast = thinkingSpeed === "fast";
const explicitThinking = thinkingSpeed === "thinking";
Expand All @@ -372,9 +386,12 @@ export function getProviderOptions(
thinking = { type: "disabled" };
} else if (explicitThinking) {
thinking = { type: "enabled", budgetTokens: 24000 };
} else {
// Default: let the model decide, with summarized thinking output.
} else if (anthropicSupportsAdaptiveThinking(modelName)) {
// Default for capable models: let the model decide, with summarized thinking output.
thinking = { type: "adaptive", display: "summarized" };
} else {
// Models that don't support adaptive thinking (e.g. Haiku): disable it.
thinking = { type: "disabled" };
}
return {
anthropic: {
Expand Down
2 changes: 1 addition & 1 deletion mcp/src/aieo/src/stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ export async function callModel(opts: CallModelOptions): Promise<{
executablePath,
modelName,
});
const providerOptions = getProviderOptions(provider, thinkingSpeed);
const providerOptions = getProviderOptions(provider, thinkingSpeed, modelName);
console.log(`Calling ${provider} with options:`, providerOptions);
const result = streamText({
model,
Expand Down
2 changes: 1 addition & 1 deletion mcp/src/gitsee/agent/explore.ts
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ export async function gitsee_context(
tools,
prompt,
system: overrides?.system_prompt || CONF.system,
providerOptions: getProviderOptions(llm.provider) as any,
providerOptions: getProviderOptions(llm.provider, undefined, llm.modelName) as any,
stopWhen: hasToolCall("final_answer"),
onStepFinish: (sf) => logStep(sf.content),
});
Expand Down
2 changes: 1 addition & 1 deletion mcp/src/graph/learnings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ export async function post_relevant_learnings(req: Request, res: Response) {
try {
const llm = resolveLLMConfig({ model: req.body.model, apiKey: req.body.apiKey, light: true });
const model = llm.model;
const providerOptions = getProviderOptions(llm.provider);
const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName);

// 1. List all scopes
const allScopes = await db.get_all_scopes();
Expand Down
4 changes: 2 additions & 2 deletions mcp/src/log/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ export async function log_agent_context(
opts: LogAgentOptions
): Promise<ContextResult> {
const startTime = Date.now();
const { model, provider } = getModelDetails(opts.modelName, opts.apiKey);
const { model, provider, modelId } = getModelDetails(opts.modelName, opts.apiKey);
console.log("===> log_agent model", model);

const tools = get_log_tools({
Expand All @@ -83,7 +83,7 @@ export async function log_agent_context(
model,
instructions: SYSTEM,
tools,
providerOptions: getProviderOptions(provider) as any,
providerOptions: getProviderOptions(provider, undefined, modelId) as any,
stopWhen: hasEndMarker,
stopSequences: ["[END_OF_ANSWER]"],
onStepFinish: (sf) => {
Expand Down
4 changes: 2 additions & 2 deletions mcp/src/repo/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -446,8 +446,8 @@ Apply the guidance from each skill throughout your response.`;

/** Build the generate/stream call params from the prepared agent state. */
function buildCallParams(prepared: PreparedAgent) {
const { finalPrompt, previousMessages, userMessage, provider, abortSignal } = prepared;
const providerOptions = getProviderOptions(provider as any);
const { finalPrompt, previousMessages, userMessage, provider, modelId, abortSignal } = prepared;
const providerOptions = getProviderOptions(provider as any, undefined, modelId);
const base = abortSignal ? { providerOptions, abortSignal } : { providerOptions };
if (previousMessages.length > 0) {
const messagesToSend =
Expand Down
2 changes: 1 addition & 1 deletion mcp/src/repo/descriptions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ export const describe_nodes_agent = async (req: Request, res: Response) => {
let totalProcessed = 0;
let totalUsage = emptyUsage();
const model = llm.model;
const providerOptions = getProviderOptions(llm.provider);
const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName);

// Loop until cost limit reached or no more nodes
while (true) {
Expand Down
2 changes: 1 addition & 1 deletion mcp/src/repo/docs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ export async function learn_docs_agent(req: Request, res: Response) {
const reqApiKey = (req.query.apiKey || req.body?.apiKey) as string | undefined;
const llm = resolveLLMConfig({ model: reqModel, apiKey: reqApiKey });
const model = llm.model;
const providerOptions = getProviderOptions(llm.provider);
const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName);

try {
const allRepos = await db.get_repositories();
Expand Down
4 changes: 2 additions & 2 deletions mcp/src/repo/workflows.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ export async function document_workflow(req: Request, res: Response) {
res.json({ request_id, status: 'pending' });

const llm = resolveLLMConfig({ model: req.body.model, apiKey: req.body.apiKey });
const providerOptions = getProviderOptions(llm.provider);
const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName);
(async () => {
try {
const result = await generateText({ model: llm.model, prompt: buildPrompt(workflow.workflow_json || JSON.stringify(workflow)), providerOptions: providerOptions as any });
Expand All @@ -52,7 +52,7 @@ export async function document_workflows(req: Request, res: Response) {

const llm = resolveLLMConfig({ model: req.body.model, apiKey: req.body.apiKey });
const model = llm.model;
const providerOptions = getProviderOptions(llm.provider);
const providerOptions = getProviderOptions(llm.provider, undefined, llm.modelName);
(async () => {
try {
const workflows = await db.get_all_workflows();
Expand Down
2 changes: 1 addition & 1 deletion mcp/src/tools/explore/tool.ts
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ export async function get_context_explore(
tools,
prompt,
system,
providerOptions: getProviderOptions(llm.provider) as any,
providerOptions: getProviderOptions(llm.provider, undefined, llm.modelName) as any,
stopWhen: hasToolCall("final_answer"),
onStepFinish: (sf) => {
// console.log("step", JSON.stringify(sf.content, null, 2));
Expand Down
Loading