diff --git a/README.md b/README.md index d1cc6132..4d671f47 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,23 @@ const text = await generateText({ }); ``` -Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere) +#### generateText with MiniMax + +```ts +import { generateText, openaicompatible } from "modelfusion"; + +const text = await generateText({ + model: openaicompatible + .ChatTextGenerator({ + api: openaicompatible.MiniMaxApi(), + model: "MiniMax-M2.5", + }) + .withTextPrompt(), + prompt: "Write a short story about a robot learning to love:", +}); +``` + +Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [MiniMax](https://platform.minimaxi.com/document/Fast%20access) #### streamText @@ -305,7 +321,7 @@ const embeddings = await embedMany({ }); ``` -Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere) +Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [MiniMax](https://platform.minimaxi.com/document/Fast%20access) ### [Classify Value](https://modelfusion.dev/guide/function/classify) diff --git a/packages/modelfusion/src/model-provider/openai-compatible/MiniMaxApiConfiguration.integration.test.ts b/packages/modelfusion/src/model-provider/openai-compatible/MiniMaxApiConfiguration.integration.test.ts new file mode 100644 index 00000000..2024af16 --- /dev/null +++ b/packages/modelfusion/src/model-provider/openai-compatible/MiniMaxApiConfiguration.integration.test.ts @@ -0,0 +1,60 @@ +/** + * Integration tests for MiniMax API via OpenAI-compatible interface. + * + * These tests require a valid MINIMAX_API_KEY environment variable. + * Run with: MINIMAX_API_KEY=your_key npx vitest run src/model-provider/openai-compatible/MiniMaxApiConfiguration.integration.test.ts + */ +import { describe, it, expect } from "vitest"; +import { generateText } from "../../model-function/generate-text/generateText"; +import { streamText } from "../../model-function/generate-text/streamText"; +import { MiniMaxApiConfiguration } from "./MiniMaxApiConfiguration"; +import { OpenAICompatibleChatModel } from "./OpenAICompatibleChatModel"; +import { arrayFromAsync } from "../../test/arrayFromAsync"; + +const SKIP = !process.env.MINIMAX_API_KEY; + +describe.skipIf(SKIP)("MiniMax integration", () => { + it("should generate text with MiniMax-M2.5", async () => { + const text = await generateText({ + model: new OpenAICompatibleChatModel({ + api: new MiniMaxApiConfiguration(), + model: "MiniMax-M2.5", + maxGenerationTokens: 50, + }).withTextPrompt(), + prompt: "Say hello in one sentence.", + }); + + expect(text).toBeTruthy(); + expect(text.length).toBeGreaterThan(0); + }, 30_000); + + it("should stream text with MiniMax-M2.5", async () => { + const stream = await streamText({ + model: new OpenAICompatibleChatModel({ + api: new MiniMaxApiConfiguration(), + model: "MiniMax-M2.5", + maxGenerationTokens: 50, + }).withTextPrompt(), + prompt: "Count from 1 to 5.", + }); + + const chunks = await arrayFromAsync(stream); + expect(chunks.length).toBeGreaterThan(0); + const fullText = chunks.join(""); + expect(fullText).toBeTruthy(); + }, 60_000); + + it("should generate text with MiniMax-M2.7", async () => { + const text = await generateText({ + model: new OpenAICompatibleChatModel({ + api: new MiniMaxApiConfiguration(), + model: "MiniMax-M2.7", + maxGenerationTokens: 50, + }).withTextPrompt(), + prompt: "What is 2+2? Answer with just the number.", + }); + + expect(text).toBeTruthy(); + expect(text).toContain("4"); + }, 30_000); +}); diff --git a/packages/modelfusion/src/model-provider/openai-compatible/MiniMaxApiConfiguration.test.ts b/packages/modelfusion/src/model-provider/openai-compatible/MiniMaxApiConfiguration.test.ts new file mode 100644 index 00000000..342c5a42 --- /dev/null +++ b/packages/modelfusion/src/model-provider/openai-compatible/MiniMaxApiConfiguration.test.ts @@ -0,0 +1,198 @@ +import { streamText } from "../../model-function/generate-text/streamText"; +import { generateText } from "../../model-function/generate-text/generateText"; +import { StreamingTestServer } from "../../test/StreamingTestServer"; +import { JsonTestServer } from "../../test/JsonTestServer"; +import { arrayFromAsync } from "../../test/arrayFromAsync"; +import { MiniMaxApiConfiguration } from "./MiniMaxApiConfiguration"; +import { OpenAICompatibleChatModel } from "./OpenAICompatibleChatModel"; + +describe("MiniMaxApiConfiguration", () => { + it("should set the correct base URL", () => { + const api = new MiniMaxApiConfiguration({ apiKey: "test-key" }); + expect(api.assembleUrl("/chat/completions")).toBe( + "https://api.minimax.io:443/v1/chat/completions" + ); + }); + + it("should set the correct provider name", () => { + const api = new MiniMaxApiConfiguration({ apiKey: "test-key" }); + expect(api.provider).toBe("openaicompatible-minimax"); + }); + + it("should allow custom base URL overrides", () => { + const api = new MiniMaxApiConfiguration({ + apiKey: "test-key", + baseUrl: { host: "custom.minimax.io" }, + }); + expect(api.assembleUrl("/chat/completions")).toBe( + "https://custom.minimax.io:443/v1/chat/completions" + ); + }); +}); + +describe("MiniMax streamText", () => { + const server = new StreamingTestServer( + "https://api.minimax.io/v1/chat/completions" + ); + + server.setupTestEnvironment(); + + describe("simple hello world stream", () => { + beforeEach(() => { + server.responseChunks = [ + `data: {"id":"chatcmpl-minimax-001","object":"chat.completion.chunk","created":1703439030,"model":"MiniMax-M2.5",` + + `"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-minimax-001","object":"chat.completion.chunk","created":1703439030,"model":"MiniMax-M2.5",` + + `"choices":[{"index":0,"delta":{"content":"Hello"},"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-minimax-001","object":"chat.completion.chunk","created":1703439030,"model":"MiniMax-M2.5",` + + `"choices":[{"index":0,"delta":{"content":", "},"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-minimax-001","object":"chat.completion.chunk","created":1703439030,"model":"MiniMax-M2.5",` + + `"choices":[{"index":0,"delta":{"content":"world!"},"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-minimax-001","object":"chat.completion.chunk","created":1703439030,"model":"MiniMax-M2.5",` + + `"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`, + `data: [DONE]\n\n`, + ]; + }); + + it("should return a text stream", async () => { + const stream = await streamText({ + model: new OpenAICompatibleChatModel({ + api: new MiniMaxApiConfiguration({ apiKey: "test-key" }), + model: "MiniMax-M2.5", + }).withTextPrompt(), + prompt: "hello", + }); + + // note: space moved to next chunk due to trimming + expect(await arrayFromAsync(stream)).toStrictEqual([ + "Hello", + ",", + " world!", + ]); + }); + + it("should return full text", async () => { + const { textPromise } = await streamText({ + model: new OpenAICompatibleChatModel({ + api: new MiniMaxApiConfiguration({ apiKey: "test-key" }), + model: "MiniMax-M2.5", + }).withTextPrompt(), + prompt: "hello", + fullResponse: true, + }); + + expect(await textPromise).toStrictEqual("Hello, world!"); + }); + }); +}); + +describe("MiniMax generateText", () => { + const server = new JsonTestServer( + "https://api.minimax.io/v1/chat/completions" + ); + + server.setupTestEnvironment(); + + it("should return generated text", async () => { + server.responseBodyJson = { + id: "chatcmpl-minimax-002", + object: "chat.completion", + created: 1703439030, + model: "MiniMax-M2.5", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "Hello, world!", + }, + finish_reason: "stop", + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + total_tokens: 15, + }, + }; + + const text = await generateText({ + model: new OpenAICompatibleChatModel({ + api: new MiniMaxApiConfiguration({ apiKey: "test-key" }), + model: "MiniMax-M2.5", + }).withTextPrompt(), + prompt: "hello", + }); + + expect(text).toStrictEqual("Hello, world!"); + }); + + it("should work with MiniMax-M2.7 model", async () => { + server.responseBodyJson = { + id: "chatcmpl-minimax-003", + object: "chat.completion", + created: 1703439030, + model: "MiniMax-M2.7", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "I am MiniMax M2.7.", + }, + finish_reason: "stop", + }, + ], + usage: { + prompt_tokens: 8, + completion_tokens: 6, + total_tokens: 14, + }, + }; + + const text = await generateText({ + model: new OpenAICompatibleChatModel({ + api: new MiniMaxApiConfiguration({ apiKey: "test-key" }), + model: "MiniMax-M2.7", + }).withTextPrompt(), + prompt: "What model are you?", + }); + + expect(text).toStrictEqual("I am MiniMax M2.7."); + }); + + it("should work with temperature setting", async () => { + server.responseBodyJson = { + id: "chatcmpl-minimax-004", + object: "chat.completion", + created: 1703439030, + model: "MiniMax-M2.5", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "Creative response here.", + }, + finish_reason: "stop", + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16, + }, + }; + + const text = await generateText({ + model: new OpenAICompatibleChatModel({ + api: new MiniMaxApiConfiguration({ apiKey: "test-key" }), + model: "MiniMax-M2.5", + temperature: 0.7, + }).withTextPrompt(), + prompt: "Be creative", + }); + + expect(text).toStrictEqual("Creative response here."); + }); +}); diff --git a/packages/modelfusion/src/model-provider/openai-compatible/MiniMaxApiConfiguration.ts b/packages/modelfusion/src/model-provider/openai-compatible/MiniMaxApiConfiguration.ts new file mode 100644 index 00000000..fe73beaa --- /dev/null +++ b/packages/modelfusion/src/model-provider/openai-compatible/MiniMaxApiConfiguration.ts @@ -0,0 +1,43 @@ +import { + BaseUrlApiConfigurationWithDefaults, + PartialBaseUrlPartsApiConfigurationOptions, +} from "../../core/api/BaseUrlApiConfiguration"; +import { loadApiKey } from "../../core/api/loadApiKey"; +import { OpenAICompatibleApiConfiguration } from "./OpenAICompatibleApiConfiguration"; + +/** + * Configuration for the MiniMax API. + * + * It calls the API at https://api.minimax.io/v1 and uses the `MINIMAX_API_KEY` api key environment variable. + * + * @see https://platform.minimaxi.com/document/Fast%20access + */ +export class MiniMaxApiConfiguration + extends BaseUrlApiConfigurationWithDefaults + implements OpenAICompatibleApiConfiguration +{ + constructor( + settings: PartialBaseUrlPartsApiConfigurationOptions & { + apiKey?: string; + } = {} + ) { + super({ + ...settings, + headers: { + Authorization: `Bearer ${loadApiKey({ + apiKey: settings.apiKey, + environmentVariableName: "MINIMAX_API_KEY", + description: "MiniMax", + })}`, + }, + baseUrlDefaults: { + protocol: "https", + host: "api.minimax.io", + port: "443", + path: "/v1", + }, + }); + } + + readonly provider = "openaicompatible-minimax" as const; +} diff --git a/packages/modelfusion/src/model-provider/openai-compatible/OpenAICompatibleFacade.ts b/packages/modelfusion/src/model-provider/openai-compatible/OpenAICompatibleFacade.ts index 39b99d6c..5b0286f7 100644 --- a/packages/modelfusion/src/model-provider/openai-compatible/OpenAICompatibleFacade.ts +++ b/packages/modelfusion/src/model-provider/openai-compatible/OpenAICompatibleFacade.ts @@ -1,5 +1,6 @@ import { PartialBaseUrlPartsApiConfigurationOptions } from "../../core/api/BaseUrlApiConfiguration"; import { FireworksAIApiConfiguration } from "./FireworksAIApiConfiguration"; +import { MiniMaxApiConfiguration } from "./MiniMaxApiConfiguration"; import { OpenAICompatibleChatModel, OpenAICompatibleChatSettings, @@ -27,6 +28,21 @@ export function FireworksAIApi( return new FireworksAIApiConfiguration(settings); } +/** + * Configuration for the MiniMax API. + * + * It calls the API at https://api.minimax.io/v1 and uses the `MINIMAX_API_KEY` api key environment variable. + * + * @see https://platform.minimaxi.com/document/Fast%20access + */ +export function MiniMaxApi( + settings: PartialBaseUrlPartsApiConfigurationOptions & { + apiKey?: string; + } = {} +) { + return new MiniMaxApiConfiguration(settings); +} + /** * Configuration for the Perplexity API. * diff --git a/packages/modelfusion/src/model-provider/openai-compatible/index.ts b/packages/modelfusion/src/model-provider/openai-compatible/index.ts index 20477d14..8758946d 100644 --- a/packages/modelfusion/src/model-provider/openai-compatible/index.ts +++ b/packages/modelfusion/src/model-provider/openai-compatible/index.ts @@ -1,4 +1,5 @@ export * from "./FireworksAIApiConfiguration"; +export * from "./MiniMaxApiConfiguration"; export * from "./OpenAICompatibleApiConfiguration"; export * from "./OpenAICompatibleChatModel"; export * from "./OpenAICompatibleCompletionModel";