diff --git a/.opencode/opencode.jsonc b/.opencode/opencode.jsonc index f9a81f116..c9f243023 100644 --- a/.opencode/opencode.jsonc +++ b/.opencode/opencode.jsonc @@ -4,6 +4,35 @@ "opencode": { "options": {}, }, + // LM Studio — local inference via OpenAI-compatible API + // 1. Open LM Studio → Developer tab → Start Server (default port: 1234) + // 2. Load a model in LM Studio + // 3. Run: curl http://localhost:1234/v1/models to find the model ID + // 4. Add the model ID to the "models" section below + // 5. Use as: altimate-code run -m lmstudio/ + "lmstudio": { + "name": "LM Studio", + "npm": "@ai-sdk/openai-compatible", + "env": ["LMSTUDIO_API_KEY"], + "options": { + "apiKey": "lm-studio", + "baseURL": "http://localhost:1234/v1", + }, + "models": { + // Add your loaded models here. The key must match the model ID from LM Studio. + // Examples: + // "qwen2.5-7b-instruct": { + // "name": "Qwen 2.5 7B Instruct", + // "tool_call": true, + // "limit": { "context": 131072, "output": 8192 } + // }, + // "deepseek-r1:70b": { + // "name": "DeepSeek R1 70B", + // "tool_call": true, + // "limit": { "context": 65536, "output": 8192 } + // } + }, + }, }, "permission": { "edit": { diff --git a/docs/docs/configure/providers.md b/docs/docs/configure/providers.md index a62e96d9e..1fbd50fc1 100644 --- a/docs/docs/configure/providers.md +++ b/docs/docs/configure/providers.md @@ -148,6 +148,48 @@ No API key needed. Runs entirely on your local machine. !!! info Make sure Ollama is running before starting altimate. Install it from [ollama.com](https://ollama.com) and pull your desired model with `ollama pull llama3.1`. +## LM Studio (Local) + +Run local models through [LM Studio](https://lmstudio.ai)'s OpenAI-compatible server: + +```json +{ + "provider": { + "lmstudio": { + "name": "LM Studio", + "npm": "@ai-sdk/openai-compatible", + "env": ["LMSTUDIO_API_KEY"], + "options": { + "apiKey": "lm-studio", + "baseURL": "http://localhost:1234/v1" + }, + "models": { + "qwen2.5-7b-instruct": { + "name": "Qwen 2.5 7B Instruct", + "tool_call": true, + "limit": { "context": 131072, "output": 8192 } + } + } + } + }, + "model": "lmstudio/qwen2.5-7b-instruct" +} +``` + +**Setup:** + +1. Open LM Studio → **Developer** tab → **Start Server** (default port: 1234) +2. Load a model in LM Studio +3. Find your model ID: `curl http://localhost:1234/v1/models` +4. Add the model ID to the `models` section in your config +5. Use it: `altimate-code run -m lmstudio/` + +!!! tip + The model key in your config must match the model ID returned by LM Studio's `/v1/models` endpoint. If you change models in LM Studio, update the config to match. + +!!! note + If you changed LM Studio's default port, update the `baseURL` accordingly. No real API key is needed — the `"lm-studio"` placeholder satisfies the SDK requirement. + ## OpenRouter ```json diff --git a/docs/docs/getting-started/quickstart.md b/docs/docs/getting-started/quickstart.md index 92ae5f137..8de5501e5 100644 --- a/docs/docs/getting-started/quickstart.md +++ b/docs/docs/getting-started/quickstart.md @@ -130,6 +130,31 @@ Switch providers at any time by updating the `provider` and `model` fields in `a } ``` +=== "LM Studio (Local)" + + ```json + { + "provider": { + "lmstudio": { + "name": "LM Studio", + "npm": "@ai-sdk/openai-compatible", + "options": { + "apiKey": "lm-studio", + "baseURL": "http://localhost:1234/v1" + }, + "models": { + "qwen2.5-7b-instruct": { + "name": "Qwen 2.5 7B Instruct", + "tool_call": true, + "limit": { "context": 131072, "output": 8192 } + } + } + } + }, + "model": "lmstudio/qwen2.5-7b-instruct" + } + ``` + === "OpenRouter" ```json