From 8ef2660b4ff2f0ba6ea32b1494e25e1295d23e9a Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 20 Mar 2026 14:52:34 -0700 Subject: [PATCH 1/3] feat: add LM Studio provider for local Qwen model support Register `lmstudio` as an OpenAI-compatible provider in `opencode.jsonc`, pointing at the default LM Studio local server (`localhost:1234`). --- .opencode/opencode.jsonc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/.opencode/opencode.jsonc b/.opencode/opencode.jsonc index f9a81f1169..8da70de82a 100644 --- a/.opencode/opencode.jsonc +++ b/.opencode/opencode.jsonc @@ -4,6 +4,25 @@ "opencode": { "options": {}, }, + "lmstudio": { + "name": "LM Studio", + "npm": "@ai-sdk/openai-compatible", + "env": [], + "options": { + "apiKey": "lm-studio", + "baseURL": "http://localhost:1234/v1", + }, + "models": { + "qwen": { + "name": "Qwen (LM Studio)", + "tool_call": true, + "limit": { + "context": 32768, + "output": 4096, + }, + }, + }, + }, }, "permission": { "edit": { From ba4d00fa62e1d91ace2fa78828e63a3041bd4fd5 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 20 Mar 2026 15:01:59 -0700 Subject: [PATCH 2/3] fix: correct LM Studio port and model IDs to match actual server - Port: 11434 (not default 1234) - Models: `gpt-oss:20b` and `deepseek-r1:70b` (actual loaded models) --- .opencode/opencode.jsonc | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/.opencode/opencode.jsonc b/.opencode/opencode.jsonc index 8da70de82a..02d56b6cff 100644 --- a/.opencode/opencode.jsonc +++ b/.opencode/opencode.jsonc @@ -10,11 +10,19 @@ "env": [], "options": { "apiKey": "lm-studio", - "baseURL": "http://localhost:1234/v1", + "baseURL": "http://localhost:11434/v1", }, "models": { - "qwen": { - "name": "Qwen (LM Studio)", + "gpt-oss:20b": { + "name": "GPT-OSS 20B (LM Studio)", + "tool_call": true, + "limit": { + "context": 32768, + "output": 4096, + }, + }, + "deepseek-r1:70b": { + "name": "DeepSeek R1 70B (LM Studio)", "tool_call": true, "limit": { "context": 32768, From 7be252cf07e0d1e8302972a95eb88dd417a64daf Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 20 Mar 2026 15:27:41 -0700 Subject: [PATCH 3/3] =?UTF-8?q?fix:=20address=20PR=20review=20=E2=80=94=20?= =?UTF-8?q?generic=20config,=20correct=20port,=20add=20docs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Use LM Studio default port 1234 (not Ollama's 11434) - Replace hardcoded model IDs with commented examples users fill in - Add `LMSTUDIO_API_KEY` env var support - Add LM Studio section to providers.md docs - Add LM Studio tab to quickstart.md provider examples --- .opencode/opencode.jsonc | 38 +++++++++++----------- docs/docs/configure/providers.md | 42 +++++++++++++++++++++++++ docs/docs/getting-started/quickstart.md | 25 +++++++++++++++ 3 files changed, 87 insertions(+), 18 deletions(-) diff --git a/.opencode/opencode.jsonc b/.opencode/opencode.jsonc index 02d56b6cff..c9f2430233 100644 --- a/.opencode/opencode.jsonc +++ b/.opencode/opencode.jsonc @@ -4,31 +4,33 @@ "opencode": { "options": {}, }, + // LM Studio — local inference via OpenAI-compatible API + // 1. Open LM Studio → Developer tab → Start Server (default port: 1234) + // 2. Load a model in LM Studio + // 3. Run: curl http://localhost:1234/v1/models to find the model ID + // 4. Add the model ID to the "models" section below + // 5. Use as: altimate-code run -m lmstudio/ "lmstudio": { "name": "LM Studio", "npm": "@ai-sdk/openai-compatible", - "env": [], + "env": ["LMSTUDIO_API_KEY"], "options": { "apiKey": "lm-studio", - "baseURL": "http://localhost:11434/v1", + "baseURL": "http://localhost:1234/v1", }, "models": { - "gpt-oss:20b": { - "name": "GPT-OSS 20B (LM Studio)", - "tool_call": true, - "limit": { - "context": 32768, - "output": 4096, - }, - }, - "deepseek-r1:70b": { - "name": "DeepSeek R1 70B (LM Studio)", - "tool_call": true, - "limit": { - "context": 32768, - "output": 4096, - }, - }, + // Add your loaded models here. The key must match the model ID from LM Studio. + // Examples: + // "qwen2.5-7b-instruct": { + // "name": "Qwen 2.5 7B Instruct", + // "tool_call": true, + // "limit": { "context": 131072, "output": 8192 } + // }, + // "deepseek-r1:70b": { + // "name": "DeepSeek R1 70B", + // "tool_call": true, + // "limit": { "context": 65536, "output": 8192 } + // } }, }, }, diff --git a/docs/docs/configure/providers.md b/docs/docs/configure/providers.md index a62e96d9e6..1fbd50fc1f 100644 --- a/docs/docs/configure/providers.md +++ b/docs/docs/configure/providers.md @@ -148,6 +148,48 @@ No API key needed. Runs entirely on your local machine. !!! info Make sure Ollama is running before starting altimate. Install it from [ollama.com](https://ollama.com) and pull your desired model with `ollama pull llama3.1`. +## LM Studio (Local) + +Run local models through [LM Studio](https://lmstudio.ai)'s OpenAI-compatible server: + +```json +{ + "provider": { + "lmstudio": { + "name": "LM Studio", + "npm": "@ai-sdk/openai-compatible", + "env": ["LMSTUDIO_API_KEY"], + "options": { + "apiKey": "lm-studio", + "baseURL": "http://localhost:1234/v1" + }, + "models": { + "qwen2.5-7b-instruct": { + "name": "Qwen 2.5 7B Instruct", + "tool_call": true, + "limit": { "context": 131072, "output": 8192 } + } + } + } + }, + "model": "lmstudio/qwen2.5-7b-instruct" +} +``` + +**Setup:** + +1. Open LM Studio → **Developer** tab → **Start Server** (default port: 1234) +2. Load a model in LM Studio +3. Find your model ID: `curl http://localhost:1234/v1/models` +4. Add the model ID to the `models` section in your config +5. Use it: `altimate-code run -m lmstudio/` + +!!! tip + The model key in your config must match the model ID returned by LM Studio's `/v1/models` endpoint. If you change models in LM Studio, update the config to match. + +!!! note + If you changed LM Studio's default port, update the `baseURL` accordingly. No real API key is needed — the `"lm-studio"` placeholder satisfies the SDK requirement. + ## OpenRouter ```json diff --git a/docs/docs/getting-started/quickstart.md b/docs/docs/getting-started/quickstart.md index 92ae5f1377..8de5501e50 100644 --- a/docs/docs/getting-started/quickstart.md +++ b/docs/docs/getting-started/quickstart.md @@ -130,6 +130,31 @@ Switch providers at any time by updating the `provider` and `model` fields in `a } ``` +=== "LM Studio (Local)" + + ```json + { + "provider": { + "lmstudio": { + "name": "LM Studio", + "npm": "@ai-sdk/openai-compatible", + "options": { + "apiKey": "lm-studio", + "baseURL": "http://localhost:1234/v1" + }, + "models": { + "qwen2.5-7b-instruct": { + "name": "Qwen 2.5 7B Instruct", + "tool_call": true, + "limit": { "context": 131072, "output": 8192 } + } + } + } + }, + "model": "lmstudio/qwen2.5-7b-instruct" + } + ``` + === "OpenRouter" ```json