@@ -174,13 +183,13 @@ Monitor tool call progress in real-time:
const processor = new StreamProcessor({
handlers: {
onToolCallStateChange: (index, id, name, state, args, parsedArgs) => {
- console.log(`Tool ${name} is now ${state}`);
+ console.log(`Tool ${name} is now ${state}`)
if (parsedArgs) {
- console.log("Parsed arguments so far:", parsedArgs);
+ console.log('Parsed arguments so far:', parsedArgs)
}
- }
- }
-});
+ },
+ },
+})
```
### 2. Message Converters
@@ -191,17 +200,17 @@ Convert between UIMessages and ModelMessages:
import {
uiMessageToModelMessages,
modelMessageToUIMessage,
- modelMessagesToUIMessages
-} from "@tanstack/ai-client";
+ modelMessagesToUIMessages,
+} from '@tanstack/ai-client'
// Convert UI message to model message(s)
-const modelMessages = uiMessageToModelMessages(uiMessage);
+const modelMessages = uiMessageToModelMessages(uiMessage)
// Convert model message to UI message
-const uiMessage = modelMessageToUIMessage(modelMessage, "msg-123");
+const uiMessage = modelMessageToUIMessage(modelMessage, 'msg-123')
// Convert array of model messages to UI messages
-const uiMessages = modelMessagesToUIMessages(modelMessages);
+const uiMessages = modelMessagesToUIMessages(modelMessages)
```
### 3. Custom JSON Parser
@@ -212,23 +221,27 @@ Provide your own parser for incomplete JSON:
const customParser = {
parse: (jsonString: string) => {
// Your custom parsing logic
- return myPartialJSONParser(jsonString);
- }
-};
+ return myPartialJSONParser(jsonString)
+ },
+}
const processor = new StreamProcessor({
jsonParser: customParser,
- handlers: { /* ... */ }
-});
+ handlers: {
+ /* ... */
+ },
+})
```
## Updated Exports
### @tanstack/ai
+
- ā `ModelMessage` (renamed from `Message`)
- All other exports unchanged
### @tanstack/ai-client
+
- ā `UIMessage` - New parts-based message type
- ā `MessagePart`, `TextPart`, `ToolCallPart`, `ToolResultPart` - Part types
- ā `ToolCallState`, `ToolResultState` - State types
@@ -239,16 +252,19 @@ const processor = new StreamProcessor({
## Breaking Changes
### āļø Message Type Rename
+
- `Message` is now `ModelMessage` in `@tanstack/ai`
- Update all type imports and variable declarations
### āļø UIMessage Structure Change
+
- Messages now have `parts: MessagePart[]` instead of `content` and `toolCalls`
- Update UI rendering code to iterate over parts
- Access text via `parts.filter(p => p.type === "text")`
- Access tool calls via `parts.filter(p => p.type === "tool-call")`
### ā No Breaking Changes For
+
- Server-side code (Python, PHP) - continues to work as-is
- Connection adapters - automatically convert UIMessages to ModelMessages
- Core AI functionality - ModelMessage has same structure as old Message
@@ -264,6 +280,7 @@ const processor = new StreamProcessor({
## Examples
See the updated examples:
+
- **CLI Example**: `/examples/cli/src/index.ts` - Uses ModelMessage
- **React Chat Example**: `/examples/ts-chat/src/routes/demo/tanchat.tsx` - Uses UIMessage with parts
- **AI Assistant Component**: `/examples/ts-chat/src/components/example-AIAssistant.tsx` - Uses UIMessage with parts
@@ -271,4 +288,3 @@ See the updated examples:
## Support
For questions or issues related to this migration, please refer to the TanStack AI documentation or open an issue on GitHub.
-
diff --git a/ai-docs/TYPE_NARROWING_SOLUTION.md b/ai-docs/TYPE_NARROWING_SOLUTION.md
index 6526226f0..2f618e06a 100644
--- a/ai-docs/TYPE_NARROWING_SOLUTION.md
+++ b/ai-docs/TYPE_NARROWING_SOLUTION.md
@@ -1,156 +1,165 @@
-# Type Narrowing with Separate Methods ā
-
-> **Note**: This document describes type narrowing with the current API. The previous `as` option approach has been replaced with separate methods.
-
-## The Solution
-
-With separate methods, type narrowing is automatic and simple:
-
-```typescript
-// Streaming - returns AsyncIterable
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
-});
-// Type: AsyncIterable ā
-
-// Promise-based - returns Promise
-const result = ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
-});
-// Type: Promise ā
-```
-
-No need for `as const` assertions or discriminated unions - TypeScript automatically knows the return type!
-
-## How to Use
-
-### ā Correct Usage - Type is Automatically Narrowed
-
-```typescript
-// Returns AsyncIterable
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
-});
-
-for await (const chunk of stream) {
- // TypeScript knows chunk is StreamChunk ā
- console.log(chunk.type);
-}
-
-// Returns Promise
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
-});
-
-// TypeScript knows result is ChatCompletionResult ā
-console.log(result.content);
-console.log(result.usage.totalTokens);
-```
-
-### Type Inference Examples
-
-```typescript
-// 1. Stream mode - returns AsyncIterable
-const stream = ai.chat({ adapter: "openai", model: "gpt-4", messages: [] });
-// Type: AsyncIterable ā
-
-// 2. Promise mode - returns Promise
-const promise = ai.chatCompletion({ adapter: "openai", model: "gpt-4", messages: [] });
-// Type: Promise ā
-
-// 3. After await - ChatCompletionResult
-const result = await ai.chatCompletion({ adapter: "openai", model: "gpt-4", messages: [] });
-// Type: ChatCompletionResult ā
-```
-
-## Real-World Example: API Handler
-
-```typescript
-import { toStreamResponse } from "@tanstack/ai";
-
-export const Route = createAPIFileRoute("/api/chat")({
- POST: async ({ request }): Promise => {
- const { messages } = await request.json();
-
- // TypeScript knows this returns AsyncIterable ā
- const stream = ai.chat({
- adapter: "openAi",
- model: "gpt-4o",
- messages,
- fallbacks: [
- { adapter: "ollama", model: "llama2" }
- ]
- });
-
- // Convert to Response
- return toStreamResponse(stream);
- }
-});
-```
-
-## Why Separate Methods Are Better
-
-With the old `as` option approach:
-```typescript
-const as = "response"; // Type: string
-const result = ai.chat({ adapter: "openai", model: "gpt-4", messages: [], as });
-// Return type: Promise | AsyncIterable | Response
-// ā TypeScript doesn't know which specific type
-// Need: as: "response" as const
-```
-
-With separate methods:
-```typescript
-const stream = ai.chat({ adapter: "openai", model: "gpt-4", messages: [] });
-// Return type: AsyncIterable
-// ā TypeScript knows exact type automatically!
-```
-
-## Technical Explanation
-
-The separate methods approach is simpler:
-
-```typescript
-class AI {
- chat(options: ChatOptions): AsyncIterable {
- // Implementation...
- }
-
- async chatCompletion(options: ChatOptions): Promise {
- // Implementation...
- }
-}
-```
-
-TypeScript's type inference:
-1. Call `chat()` ā method signature says it returns `AsyncIterable`
-2. Call `chatCompletion()` ā method signature says it returns `Promise`
-3. No conditional types needed - just straightforward method signatures!
-
-## Benefits
-
-ā **Type Safety**: TypeScript knows exact return type at compile time
-ā **IntelliSense**: Autocomplete shows correct properties for each method
-ā **Compile-Time Errors**: Catch type mismatches before runtime
-ā **Refactoring Safety**: Changes are caught automatically
-ā **Self-Documenting**: Methods serve as inline documentation
-ā **Simpler**: No `as const` needed, no overloads needed
-
-## Summary
-
-The separate methods API provides perfect type narrowing without any special syntax:
-
-| Method | Return Type |
-|--------|-------------|
-| `chat()` | `AsyncIterable` |
-| `chatCompletion()` | `Promise` |
-
-**Pro Tip**: Just call the method you need - TypeScript handles the rest! š
+# Type Narrowing with Separate Methods ā
+
+> **Note**: This document describes type narrowing with the current API. The previous `as` option approach has been replaced with separate methods.
+
+## The Solution
+
+With separate methods, type narrowing is automatic and simple:
+
+```typescript
+// Streaming - returns AsyncIterable
+const stream = ai.chat({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+});
+// Type: AsyncIterable ā
+
+// Promise-based - returns Promise
+const result = ai.chatCompletion({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+});
+// Type: Promise ā
+```
+
+No need for `as const` assertions or discriminated unions - TypeScript automatically knows the return type!
+
+## How to Use
+
+### ā Correct Usage - Type is Automatically Narrowed
+
+```typescript
+// Returns AsyncIterable
+const stream = ai.chat({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+});
+
+for await (const chunk of stream) {
+ // TypeScript knows chunk is StreamChunk ā
+ console.log(chunk.type);
+}
+
+// Returns Promise
+const result = await ai.chatCompletion({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+});
+
+// TypeScript knows result is ChatCompletionResult ā
+console.log(result.content);
+console.log(result.usage.totalTokens);
+```
+
+### Type Inference Examples
+
+```typescript
+// 1. Stream mode - returns AsyncIterable
+const stream = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [] })
+// Type: AsyncIterable ā
+
+// 2. Promise mode - returns Promise
+const promise = ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+})
+// Type: Promise ā
+
+// 3. After await - ChatCompletionResult
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+})
+// Type: ChatCompletionResult ā
+```
+
+## Real-World Example: API Handler
+
+```typescript
+import { toStreamResponse } from '@tanstack/ai'
+
+export const Route = createAPIFileRoute('/api/chat')({
+ POST: async ({ request }): Promise => {
+ const { messages } = await request.json()
+
+ // TypeScript knows this returns AsyncIterable ā
+ const stream = ai.chat({
+ adapter: 'openAi',
+ model: 'gpt-4o',
+ messages,
+ fallbacks: [{ adapter: 'ollama', model: 'llama2' }],
+ })
+
+ // Convert to Response
+ return toStreamResponse(stream)
+ },
+})
+```
+
+## Why Separate Methods Are Better
+
+With the old `as` option approach:
+
+```typescript
+const as = 'response' // Type: string
+const result = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [], as })
+// Return type: Promise | AsyncIterable | Response
+// ā TypeScript doesn't know which specific type
+// Need: as: "response" as const
+```
+
+With separate methods:
+
+```typescript
+const stream = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [] })
+// Return type: AsyncIterable
+// ā TypeScript knows exact type automatically!
+```
+
+## Technical Explanation
+
+The separate methods approach is simpler:
+
+```typescript
+class AI {
+ chat(options: ChatOptions): AsyncIterable {
+ // Implementation...
+ }
+
+ async chatCompletion(options: ChatOptions): Promise {
+ // Implementation...
+ }
+}
+```
+
+TypeScript's type inference:
+
+1. Call `chat()` ā method signature says it returns `AsyncIterable`
+2. Call `chatCompletion()` ā method signature says it returns `Promise`
+3. No conditional types needed - just straightforward method signatures!
+
+## Benefits
+
+ā **Type Safety**: TypeScript knows exact return type at compile time
+ā **IntelliSense**: Autocomplete shows correct properties for each method
+ā **Compile-Time Errors**: Catch type mismatches before runtime
+ā **Refactoring Safety**: Changes are caught automatically
+ā **Self-Documenting**: Methods serve as inline documentation
+ā **Simpler**: No `as const` needed, no overloads needed
+
+## Summary
+
+The separate methods API provides perfect type narrowing without any special syntax:
+
+| Method | Return Type |
+| ------------------ | ------------------------------- |
+| `chat()` | `AsyncIterable` |
+| `chatCompletion()` | `Promise` |
+
+**Pro Tip**: Just call the method you need - TypeScript handles the rest! š
diff --git a/ai-docs/TYPE_NARROWING_UNIFIED_CHAT.md b/ai-docs/TYPE_NARROWING_UNIFIED_CHAT.md
index 879bf9d7d..adc0554d7 100644
--- a/ai-docs/TYPE_NARROWING_UNIFIED_CHAT.md
+++ b/ai-docs/TYPE_NARROWING_UNIFIED_CHAT.md
@@ -1,224 +1,225 @@
-# Type Narrowing in Chat API
-
-> **Note**: This document describes type narrowing with the current API using separate methods. The previous `as` option approach has been replaced with `chat()` for streaming and `chatCompletion()` for promise-based completion.
-
-## Overview
-
-The chat API uses separate methods, which provides automatic type narrowing without needing discriminated unions or const assertions:
-
-- **`chat()`** - Always returns `AsyncIterable`
-- **`chatCompletion()`** - Always returns `Promise`
-
-TypeScript automatically knows the exact return type based on which method you call!
-
-## Type Narrowing Rules
-
-| Method | Return Type | Usage |
-|--------|-------------|-------|
-| `chat()` | `AsyncIterable` | Can use `for await...of`, iterate chunks |
-| `chatCompletion()` | `Promise` | Can `await`, access `.content`, `.usage`, etc. |
-
-## Examples with Type Checking
-
-### 1. Promise Mode (chatCompletion) - Type is `Promise`
-
-```typescript
-const result = ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-
-// TypeScript knows result is Promise
-const resolved = await result;
-
-// ā These work - properties exist on ChatCompletionResult
-console.log(resolved.content);
-console.log(resolved.role);
-console.log(resolved.usage.totalTokens);
-
-// ā TypeScript error - headers doesn't exist on ChatCompletionResult
-console.log(resolved.headers); // Type error!
-```
-
-### 2. Stream Mode (chat) - Type is `AsyncIterable`
-
-```typescript
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-
-// TypeScript knows stream is AsyncIterable
-// ā This works - can iterate async iterable
-for await (const chunk of stream) {
- console.log(chunk.type);
- console.log(chunk.id);
- console.log(chunk.model);
-}
-
-// ā TypeScript error - content doesn't exist on AsyncIterable
-console.log(stream.content); // Type error!
-
-// ā TypeScript error - headers doesn't exist on AsyncIterable
-console.log(stream.headers); // Type error!
-```
-
-### 3. HTTP Response Mode
-
-```typescript
-import { toStreamResponse } from "@tanstack/ai";
-
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-
-const response = toStreamResponse(stream);
-
-// TypeScript knows response is Response
-// ā These work - properties exist on Response
-console.log(response.headers);
-console.log(response.body);
-console.log(response.status);
-console.log(response.ok);
-
-const contentType = response.headers.get("Content-Type");
-
-// ā TypeScript error - content doesn't exist on Response
-console.log(response.content); // Type error!
-```
-
-## Function Return Type Inference
-
-TypeScript correctly infers return types in functions:
-
-### API Handler - Returns `Response`
-
-```typescript
-import { toStreamResponse } from "@tanstack/ai";
-
-function apiHandler() {
- const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- });
-
- return toStreamResponse(stream);
- // TypeScript infers: function apiHandler(): Response ā
-}
-```
-
-### Type-safe API Handler
-
-```typescript
-import { toStreamResponse } from "@tanstack/ai";
-
-function apiHandler(): Response {
- const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- });
-
- return toStreamResponse(stream); // ā Correct - returns Response
-}
-
-function wrongApiHandler(): Response {
- const result = ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- });
-
- return result; // ā TypeScript error - returns Promise, not Response
-}
-```
-
-### Streaming Handler
-
-```typescript
-async function* streamHandler() {
- const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- });
-
- // TypeScript knows stream is AsyncIterable
- for await (const chunk of stream) {
- yield chunk; // ā Works perfectly
- }
-}
-```
-
-## With Fallbacks - Type Narrowing Still Works
-
-```typescript
-// Promise with fallbacks - Type: Promise
-const promise = ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- fallbacks: [{ adapter: "ollama", model: "llama2" }]
-});
-const resolved = await promise;
-console.log(resolved.content); // ā Works
-
-// Stream with fallbacks - Type: AsyncIterable
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- fallbacks: [{ adapter: "ollama", model: "llama2" }]
-});
-for await (const chunk of stream) {
- console.log(chunk.type); // ā Works
-}
-```
-
-## How It Works (Technical Details)
-
-With separate methods, TypeScript doesn't need function overloads or conditional types:
-
-```typescript
-class AI {
- // Simple method signatures - no overloads needed!
- chat(options: ChatOptions): AsyncIterable {
- return this.adapter.chatStream(options);
- }
-
- async chatCompletion(options: ChatOptions): Promise {
- return this.adapter.chatCompletion(options);
- }
-}
-```
-
-TypeScript's type inference is straightforward:
-- Call `chat()` ā get `AsyncIterable`
-- Call `chatCompletion()` ā get `Promise`
-
-No need for `as const` assertions or discriminated unions!
-
-## Benefits
-
-ā **Type Safety**: TypeScript knows exact return type at compile time
-ā **IntelliSense**: Autocomplete shows correct properties for each method
-ā **Compile-Time Errors**: Catch type mismatches before runtime
-ā **Refactoring Safety**: Changes are caught automatically
-ā **Self-Documenting**: Methods serve as inline documentation
-ā **Simpler**: No need for const assertions or overloads
-
-## Summary
-
-The separate methods API provides perfect type narrowing automatically:
-
-| Code | Return Type |
-|------|-------------|
-| `chat()` | `AsyncIterable` |
-| `chatCompletion()` | `Promise` |
-
-TypeScript enforces these types at compile time, providing complete type safety without any special syntax! š
+# Type Narrowing in Chat API
+
+> **Note**: This document describes type narrowing with the current API using separate methods. The previous `as` option approach has been replaced with `chat()` for streaming and `chatCompletion()` for promise-based completion.
+
+## Overview
+
+The chat API uses separate methods, which provides automatic type narrowing without needing discriminated unions or const assertions:
+
+- **`chat()`** - Always returns `AsyncIterable`
+- **`chatCompletion()`** - Always returns `Promise`
+
+TypeScript automatically knows the exact return type based on which method you call!
+
+## Type Narrowing Rules
+
+| Method | Return Type | Usage |
+| ------------------ | ------------------------------- | ---------------------------------------------- |
+| `chat()` | `AsyncIterable` | Can use `for await...of`, iterate chunks |
+| `chatCompletion()` | `Promise` | Can `await`, access `.content`, `.usage`, etc. |
+
+## Examples with Type Checking
+
+### 1. Promise Mode (chatCompletion) - Type is `Promise`
+
+```typescript
+const result = ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+
+// TypeScript knows result is Promise
+const resolved = await result
+
+// ā These work - properties exist on ChatCompletionResult
+console.log(resolved.content)
+console.log(resolved.role)
+console.log(resolved.usage.totalTokens)
+
+// ā TypeScript error - headers doesn't exist on ChatCompletionResult
+console.log(resolved.headers) // Type error!
+```
+
+### 2. Stream Mode (chat) - Type is `AsyncIterable`
+
+```typescript
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+
+// TypeScript knows stream is AsyncIterable
+// ā This works - can iterate async iterable
+for await (const chunk of stream) {
+ console.log(chunk.type)
+ console.log(chunk.id)
+ console.log(chunk.model)
+}
+
+// ā TypeScript error - content doesn't exist on AsyncIterable
+console.log(stream.content) // Type error!
+
+// ā TypeScript error - headers doesn't exist on AsyncIterable
+console.log(stream.headers) // Type error!
+```
+
+### 3. HTTP Response Mode
+
+```typescript
+import { toStreamResponse } from '@tanstack/ai'
+
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+
+const response = toStreamResponse(stream)
+
+// TypeScript knows response is Response
+// ā These work - properties exist on Response
+console.log(response.headers)
+console.log(response.body)
+console.log(response.status)
+console.log(response.ok)
+
+const contentType = response.headers.get('Content-Type')
+
+// ā TypeScript error - content doesn't exist on Response
+console.log(response.content) // Type error!
+```
+
+## Function Return Type Inference
+
+TypeScript correctly infers return types in functions:
+
+### API Handler - Returns `Response`
+
+```typescript
+import { toStreamResponse } from "@tanstack/ai";
+
+function apiHandler() {
+ const stream = ai.chat({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ });
+
+ return toStreamResponse(stream);
+ // TypeScript infers: function apiHandler(): Response ā
+}
+```
+
+### Type-safe API Handler
+
+```typescript
+import { toStreamResponse } from "@tanstack/ai";
+
+function apiHandler(): Response {
+ const stream = ai.chat({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ });
+
+ return toStreamResponse(stream); // ā Correct - returns Response
+}
+
+function wrongApiHandler(): Response {
+ const result = ai.chatCompletion({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ });
+
+ return result; // ā TypeScript error - returns Promise, not Response
+}
+```
+
+### Streaming Handler
+
+```typescript
+async function* streamHandler() {
+ const stream = ai.chat({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ });
+
+ // TypeScript knows stream is AsyncIterable
+ for await (const chunk of stream) {
+ yield chunk; // ā Works perfectly
+ }
+}
+```
+
+## With Fallbacks - Type Narrowing Still Works
+
+```typescript
+// Promise with fallbacks - Type: Promise
+const promise = ai.chatCompletion({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ fallbacks: [{ adapter: "ollama", model: "llama2" }]
+});
+const resolved = await promise;
+console.log(resolved.content); // ā Works
+
+// Stream with fallbacks - Type: AsyncIterable
+const stream = ai.chat({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ fallbacks: [{ adapter: "ollama", model: "llama2" }]
+});
+for await (const chunk of stream) {
+ console.log(chunk.type); // ā Works
+}
+```
+
+## How It Works (Technical Details)
+
+With separate methods, TypeScript doesn't need function overloads or conditional types:
+
+```typescript
+class AI {
+ // Simple method signatures - no overloads needed!
+ chat(options: ChatOptions): AsyncIterable {
+ return this.adapter.chatStream(options)
+ }
+
+ async chatCompletion(options: ChatOptions): Promise {
+ return this.adapter.chatCompletion(options)
+ }
+}
+```
+
+TypeScript's type inference is straightforward:
+
+- Call `chat()` ā get `AsyncIterable`
+- Call `chatCompletion()` ā get `Promise`
+
+No need for `as const` assertions or discriminated unions!
+
+## Benefits
+
+ā **Type Safety**: TypeScript knows exact return type at compile time
+ā **IntelliSense**: Autocomplete shows correct properties for each method
+ā **Compile-Time Errors**: Catch type mismatches before runtime
+ā **Refactoring Safety**: Changes are caught automatically
+ā **Self-Documenting**: Methods serve as inline documentation
+ā **Simpler**: No need for const assertions or overloads
+
+## Summary
+
+The separate methods API provides perfect type narrowing automatically:
+
+| Code | Return Type |
+| ------------------ | ------------------------------- |
+| `chat()` | `AsyncIterable` |
+| `chatCompletion()` | `Promise` |
+
+TypeScript enforces these types at compile time, providing complete type safety without any special syntax! š
diff --git a/ai-docs/TYPE_SAFETY.md b/ai-docs/TYPE_SAFETY.md
index 08d44be8a..48cf95eeb 100644
--- a/ai-docs/TYPE_SAFETY.md
+++ b/ai-docs/TYPE_SAFETY.md
@@ -1,303 +1,305 @@
-# Type-Safe Multi-Adapter AI API
-
-This package provides complete TypeScript type safety for working with multiple AI providers, ensuring that you can only use models that are supported by each adapter.
-
-## Features
-
-- ā **Adapter-specific model validation** - TypeScript prevents using GPT models with Anthropic and vice versa
-- ā **Full autocomplete support** - Your IDE suggests only valid models for the selected adapter
-- ā **Compile-time safety** - Catch model incompatibilities before runtime
-- ā **Multi-adapter support** - Use multiple AI providers in a single application
-- ā **Type inference** - Model types are automatically inferred from adapter configuration
-
-## Installation
-
-```bash
-npm install @tanstack/ai @tanstack/ai-openai @tanstack/ai-anthropic
-```
-
-## Basic Usage
-
-### Creating an AI instance with multiple adapters
-
-```typescript
-import { AI } from "@tanstack/ai";
-import { OpenAIAdapter } from "@tanstack/ai-openai";
-import { AnthropicAdapter } from "@tanstack/ai-anthropic";
-
-const ai = new AI({
- adapters: {
- "openai": new OpenAIAdapter({
- apiKey: process.env.OPENAI_API_KEY!,
- }),
- "anthropic": new AnthropicAdapter({
- apiKey: process.env.ANTHROPIC_API_KEY!,
- }),
- },
-});
-```
-
-### Type-safe model selection
-
-```typescript
-// ā VALID - OpenAI with GPT model
-await ai.chat({
- adapter: "openai",
- model: "gpt-4", // TypeScript knows this is valid
- messages: [{ role: "user", content: "Hello!" }],
-});
-
-// ā VALID - Anthropic with Claude model
-await ai.chat({
- adapter: "anthropic",
- model: "claude-3-5-sonnet-20241022", // TypeScript knows this is valid
- messages: [{ role: "user", content: "Hello!" }],
-});
-
-// ā COMPILE ERROR - Wrong model for adapter
-await ai.chat({
- adapter: "anthropic",
- model: "gpt-4", // TypeScript error: "gpt-4" not valid for Anthropic!
- messages: [{ role: "user", content: "Hello!" }],
-});
-
-// ā COMPILE ERROR - Wrong model for adapter
-await ai.chat({
- adapter: "openai",
- model: "claude-3-5-sonnet-20241022", // TypeScript error: Claude not valid for OpenAI!
- messages: [{ role: "user", content: "Hello!" }],
-});
-```
-
-## Available Models
-
-### OpenAI Models
-
-```typescript
-type OpenAIModel =
- | "gpt-4"
- | "gpt-4-turbo"
- | "gpt-4-turbo-preview"
- | "gpt-4o"
- | "gpt-4o-mini"
- | "gpt-3.5-turbo"
- | "gpt-3.5-turbo-16k"
- | "gpt-3.5-turbo-instruct"
- | "text-embedding-ada-002"
- | "text-embedding-3-small"
- | "text-embedding-3-large";
-```
-
-### Anthropic Models
-
-```typescript
-type AnthropicModel =
- | "claude-3-5-sonnet-20241022"
- | "claude-3-5-sonnet-20240620"
- | "claude-3-opus-20240229"
- | "claude-3-sonnet-20240229"
- | "claude-3-haiku-20240307"
- | "claude-2.1"
- | "claude-2.0"
- | "claude-instant-1.2";
-```
-
-## API Methods
-
-All methods support the same type-safe adapter and model selection:
-
-### Chat Completion
-
-```typescript
-const result = await ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [
- { role: "system", content: "You are a helpful assistant." },
- { role: "user", content: "What is TypeScript?" },
- ],
- temperature: 0.7,
- maxTokens: 500,
-});
-```
-
-### Streaming Chat
-
-```typescript
-for await (const chunk of ai.streamChat({
- adapter: "anthropic",
- model: "claude-3-5-sonnet-20241022",
- messages: [{ role: "user", content: "Count from 1 to 5" }],
-})) {
- if (chunk.type === "content") {
- process.stdout.write(chunk.delta);
- }
-}
-```
-
-### Text Generation
-
-```typescript
-const result = await ai.generateText({
- adapter: "openai",
- model: "gpt-3.5-turbo-instruct",
- prompt: "Write a haiku about TypeScript",
- maxTokens: 100,
-});
-```
-
-### Summarization
-
-```typescript
-const result = await ai.summarize({
- adapter: "anthropic",
- model: "claude-3-haiku-20240307",
- text: "Long text to summarize...",
- style: "bullet-points",
- maxLength: 200,
-});
-```
-
-### Embeddings
-
-```typescript
-const result = await ai.embed({
- adapter: "openai",
- model: "text-embedding-3-small",
- input: "Text to embed",
-});
-```
-
-## Advanced Features
-
-### Dynamic Adapter Addition
-
-```typescript
-const aiWithGemini = ai.addAdapter(
- "gemini",
- new GeminiAdapter({ apiKey: "..." })
-);
-
-// Now "gemini" is available with full type safety
-await aiWithGemini.chat({
- adapter: "gemini",
- model: "gemini-pro", // Types updated automatically
- messages: [{ role: "user", content: "Hello!" }],
-});
-```
-
-### Getting Available Adapters
-
-```typescript
-console.log(ai.adapterNames); // ["openai", "anthropic"]
-```
-
-### Direct Adapter Access
-
-```typescript
-const openai = ai.getAdapter("openai");
-console.log(openai.models); // Array of OpenAI models
-```
-
-## Benefits
-
-### 1. Compile-Time Safety
-
-**Before:**
-```typescript
-// Runtime error when deployed
-await ai.chat({
- provider: "anthropic",
- model: "gpt-4", // Oops! Wrong model
-});
-// Error: Model 'gpt-4' not found for provider 'anthropic'
-```
-
-**After:**
-```typescript
-// Compile-time error in your editor
-await ai.chat({
- adapter: "anthropic",
- model: "gpt-4", // TypeScript error immediately
-});
-// Error: Type '"gpt-4"' is not assignable to type 'claude-...'
-```
-
-### 2. IDE Autocomplete
-
-When you type `model:`, your IDE will show you **only** the models available for the selected adapter:
-
-- Select `openai` ā See GPT models
-- Select `anthropic` ā See Claude models
-
-### 3. Refactoring Safety
-
-If you switch adapters, TypeScript will immediately flag any incompatible models:
-
-```typescript
-// Change from OpenAI to Anthropic
-await ai.chat({
- adapter: "anthropic", // Changed this
- model: "gpt-4", // TypeScript immediately flags this as an error
- messages: [],
-});
-```
-
-### 4. Self-Documenting Code
-
-The types serve as documentation - you can see all available models without checking docs:
-
-```typescript
-// Hover over "model" to see all valid options
-ai.chat({ adapter: "openai", model: /* hover here */ });
-```
-
-## Creating Custom Adapters
-
-To create a custom adapter with type safety:
-
-```typescript
-import { BaseAdapter } from "@tanstack/ai";
-
-const MY_MODELS = ["my-model-1", "my-model-2", "my-model-3"] as const;
-
-export class MyAdapter extends BaseAdapter {
- name = "my-adapter";
- models = MY_MODELS;
-
- // Implement required methods...
-}
-```
-
-Then use it with full type safety:
-
-```typescript
-const ai = new AI({
- adapters: {
- "my-adapter": new MyAdapter({ apiKey: "..." }),
- },
-});
-
-// TypeScript now knows about "my-model-1", "my-model-2", etc.
-await ai.chat({
- adapter: "my-adapter",
- model: "my-model-1", // Autocomplete works!
- messages: [],
-});
-```
-
-## Examples
-
-See the `/examples` directory for complete working examples:
-
-- `model-safety-demo.ts` - Comprehensive demonstration of type safety
-- `type-safety-demo.ts` - Quick reference showing valid and invalid usage
-- `multi-adapter-example.ts` - Real-world multi-adapter usage
-
-## TypeScript Configuration
-
-This package requires TypeScript 4.5 or higher for full type inference support.
-
-## License
-
-MIT
+# Type-Safe Multi-Adapter AI API
+
+This package provides complete TypeScript type safety for working with multiple AI providers, ensuring that you can only use models that are supported by each adapter.
+
+## Features
+
+- ā **Adapter-specific model validation** - TypeScript prevents using GPT models with Anthropic and vice versa
+- ā **Full autocomplete support** - Your IDE suggests only valid models for the selected adapter
+- ā **Compile-time safety** - Catch model incompatibilities before runtime
+- ā **Multi-adapter support** - Use multiple AI providers in a single application
+- ā **Type inference** - Model types are automatically inferred from adapter configuration
+
+## Installation
+
+```bash
+npm install @tanstack/ai @tanstack/ai-openai @tanstack/ai-anthropic
+```
+
+## Basic Usage
+
+### Creating an AI instance with multiple adapters
+
+```typescript
+import { AI } from '@tanstack/ai'
+import { OpenAIAdapter } from '@tanstack/ai-openai'
+import { AnthropicAdapter } from '@tanstack/ai-anthropic'
+
+const ai = new AI({
+ adapters: {
+ openai: new OpenAIAdapter({
+ apiKey: process.env.OPENAI_API_KEY!,
+ }),
+ anthropic: new AnthropicAdapter({
+ apiKey: process.env.ANTHROPIC_API_KEY!,
+ }),
+ },
+})
+```
+
+### Type-safe model selection
+
+```typescript
+// ā VALID - OpenAI with GPT model
+await ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4', // TypeScript knows this is valid
+ messages: [{ role: 'user', content: 'Hello!' }],
+})
+
+// ā VALID - Anthropic with Claude model
+await ai.chat({
+ adapter: 'anthropic',
+ model: 'claude-3-5-sonnet-20241022', // TypeScript knows this is valid
+ messages: [{ role: 'user', content: 'Hello!' }],
+})
+
+// ā COMPILE ERROR - Wrong model for adapter
+await ai.chat({
+ adapter: 'anthropic',
+ model: 'gpt-4', // TypeScript error: "gpt-4" not valid for Anthropic!
+ messages: [{ role: 'user', content: 'Hello!' }],
+})
+
+// ā COMPILE ERROR - Wrong model for adapter
+await ai.chat({
+ adapter: 'openai',
+ model: 'claude-3-5-sonnet-20241022', // TypeScript error: Claude not valid for OpenAI!
+ messages: [{ role: 'user', content: 'Hello!' }],
+})
+```
+
+## Available Models
+
+### OpenAI Models
+
+```typescript
+type OpenAIModel =
+ | 'gpt-4'
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4o'
+ | 'gpt-4o-mini'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-instruct'
+ | 'text-embedding-ada-002'
+ | 'text-embedding-3-small'
+ | 'text-embedding-3-large'
+```
+
+### Anthropic Models
+
+```typescript
+type AnthropicModel =
+ | 'claude-3-5-sonnet-20241022'
+ | 'claude-3-5-sonnet-20240620'
+ | 'claude-3-opus-20240229'
+ | 'claude-3-sonnet-20240229'
+ | 'claude-3-haiku-20240307'
+ | 'claude-2.1'
+ | 'claude-2.0'
+ | 'claude-instant-1.2'
+```
+
+## API Methods
+
+All methods support the same type-safe adapter and model selection:
+
+### Chat Completion
+
+```typescript
+const result = await ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [
+ { role: 'system', content: 'You are a helpful assistant.' },
+ { role: 'user', content: 'What is TypeScript?' },
+ ],
+ temperature: 0.7,
+ maxTokens: 500,
+})
+```
+
+### Streaming Chat
+
+```typescript
+for await (const chunk of ai.streamChat({
+ adapter: 'anthropic',
+ model: 'claude-3-5-sonnet-20241022',
+ messages: [{ role: 'user', content: 'Count from 1 to 5' }],
+})) {
+ if (chunk.type === 'content') {
+ process.stdout.write(chunk.delta)
+ }
+}
+```
+
+### Text Generation
+
+```typescript
+const result = await ai.generateText({
+ adapter: 'openai',
+ model: 'gpt-3.5-turbo-instruct',
+ prompt: 'Write a haiku about TypeScript',
+ maxTokens: 100,
+})
+```
+
+### Summarization
+
+```typescript
+const result = await ai.summarize({
+ adapter: 'anthropic',
+ model: 'claude-3-haiku-20240307',
+ text: 'Long text to summarize...',
+ style: 'bullet-points',
+ maxLength: 200,
+})
+```
+
+### Embeddings
+
+```typescript
+const result = await ai.embed({
+ adapter: 'openai',
+ model: 'text-embedding-3-small',
+ input: 'Text to embed',
+})
+```
+
+## Advanced Features
+
+### Dynamic Adapter Addition
+
+```typescript
+const aiWithGemini = ai.addAdapter(
+ 'gemini',
+ new GeminiAdapter({ apiKey: '...' }),
+)
+
+// Now "gemini" is available with full type safety
+await aiWithGemini.chat({
+ adapter: 'gemini',
+ model: 'gemini-pro', // Types updated automatically
+ messages: [{ role: 'user', content: 'Hello!' }],
+})
+```
+
+### Getting Available Adapters
+
+```typescript
+console.log(ai.adapterNames) // ["openai", "anthropic"]
+```
+
+### Direct Adapter Access
+
+```typescript
+const openai = ai.getAdapter('openai')
+console.log(openai.models) // Array of OpenAI models
+```
+
+## Benefits
+
+### 1. Compile-Time Safety
+
+**Before:**
+
+```typescript
+// Runtime error when deployed
+await ai.chat({
+ provider: 'anthropic',
+ model: 'gpt-4', // Oops! Wrong model
+})
+// Error: Model 'gpt-4' not found for provider 'anthropic'
+```
+
+**After:**
+
+```typescript
+// Compile-time error in your editor
+await ai.chat({
+ adapter: 'anthropic',
+ model: 'gpt-4', // TypeScript error immediately
+})
+// Error: Type '"gpt-4"' is not assignable to type 'claude-...'
+```
+
+### 2. IDE Autocomplete
+
+When you type `model:`, your IDE will show you **only** the models available for the selected adapter:
+
+- Select `openai` ā See GPT models
+- Select `anthropic` ā See Claude models
+
+### 3. Refactoring Safety
+
+If you switch adapters, TypeScript will immediately flag any incompatible models:
+
+```typescript
+// Change from OpenAI to Anthropic
+await ai.chat({
+ adapter: 'anthropic', // Changed this
+ model: 'gpt-4', // TypeScript immediately flags this as an error
+ messages: [],
+})
+```
+
+### 4. Self-Documenting Code
+
+The types serve as documentation - you can see all available models without checking docs:
+
+```typescript
+// Hover over "model" to see all valid options
+ai.chat({ adapter: "openai", model: /* hover here */ });
+```
+
+## Creating Custom Adapters
+
+To create a custom adapter with type safety:
+
+```typescript
+import { BaseAdapter } from '@tanstack/ai'
+
+const MY_MODELS = ['my-model-1', 'my-model-2', 'my-model-3'] as const
+
+export class MyAdapter extends BaseAdapter {
+ name = 'my-adapter'
+ models = MY_MODELS
+
+ // Implement required methods...
+}
+```
+
+Then use it with full type safety:
+
+```typescript
+const ai = new AI({
+ adapters: {
+ 'my-adapter': new MyAdapter({ apiKey: '...' }),
+ },
+})
+
+// TypeScript now knows about "my-model-1", "my-model-2", etc.
+await ai.chat({
+ adapter: 'my-adapter',
+ model: 'my-model-1', // Autocomplete works!
+ messages: [],
+})
+```
+
+## Examples
+
+See the `/examples` directory for complete working examples:
+
+- `model-safety-demo.ts` - Comprehensive demonstration of type safety
+- `type-safety-demo.ts` - Quick reference showing valid and invalid usage
+- `multi-adapter-example.ts` - Real-world multi-adapter usage
+
+## TypeScript Configuration
+
+This package requires TypeScript 4.5 or higher for full type inference support.
+
+## License
+
+MIT
diff --git a/ai-docs/UNIFIED_CHAT_API.md b/ai-docs/UNIFIED_CHAT_API.md
index cea718387..300fa93ea 100644
--- a/ai-docs/UNIFIED_CHAT_API.md
+++ b/ai-docs/UNIFIED_CHAT_API.md
@@ -1,389 +1,389 @@
-# Unified Chat API
-
-## Overview
-
-The chat API provides two methods for different use cases:
-
-- **`chat()`** - Returns `AsyncIterable` - streaming with **automatic tool execution loop**
-- **`chatCompletion()`** - Returns `Promise` - standard non-streaming chat with optional structured output
-
-### š Automatic Tool Execution in `chat()`
-
-**IMPORTANT:** The `chat()` method runs an automatic tool execution loop. When you provide tools with `execute` functions:
-
-1. **Model calls a tool** ā SDK executes it automatically
-2. **SDK emits chunks** for tool calls and results (`tool_call`, `tool_result`)
-3. **SDK adds results** to messages and continues conversation
-4. **Loop repeats** until stopped by `agentLoopStrategy` (default: `maxIterations(5)`)
-
-**You don't need to manually execute tools or manage conversation state** - the SDK handles everything internally!
-
-**š See also:** [Complete Tool Execution Loop Documentation](TOOL_EXECUTION_LOOP.md)
-
-## Migration Guide
-
-### Before (Using `as` option)
-
-```typescript
-// For non-streaming
-const result = await ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
- as: "promise",
-});
-
-// For streaming
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
- as: "stream",
-});
-for await (const chunk of stream) {
- console.log(chunk);
-}
-
-// For HTTP response
-const response = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
- as: "response",
-});
-return response;
-```
-
-### After (Separate Methods)
-
-```typescript
-// For non-streaming - use chatCompletion()
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-
-// For streaming - use chat()
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-for await (const chunk of stream) {
- console.log(chunk);
-}
-
-// For HTTP response - use chat() + toStreamResponse()
-import { toStreamResponse } from "@tanstack/ai";
-
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-return toStreamResponse(stream);
-```
-
-## Usage Examples
-
-### 1. Promise Mode (chatCompletion)
-
-Standard non-streaming chat completion:
-
-```typescript
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [
- { role: "system", content: "You are a helpful assistant." },
- { role: "user", content: "What is TypeScript?" },
- ],
- temperature: 0.7,
-});
-
-console.log(result.content);
-console.log(`Tokens used: ${result.usage.totalTokens}`);
-```
-
-### 2. Stream Mode (chat)
-
-Streaming with automatic tool execution loop:
-
-```typescript
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Write a story" }],
- tools: [weatherTool], // Optional: tools are auto-executed
- agentLoopStrategy: maxIterations(5), // Optional: control loop
-});
-
-for await (const chunk of stream) {
- if (chunk.type === "content") {
- process.stdout.write(chunk.delta); // Stream text response
- } else if (chunk.type === "tool_call") {
- console.log(`ā Calling tool: ${chunk.toolCall.function.name}`);
- } else if (chunk.type === "tool_result") {
- console.log(`ā Tool result: ${chunk.content}`);
- } else if (chunk.type === "done") {
- console.log(`\nFinished: ${chunk.finishReason}`);
- console.log(`Tokens: ${chunk.usage?.totalTokens}`);
- }
-}
-```
-
-**Chunk Types:**
-
-- `content` - Text content from the model (use `chunk.delta` for streaming)
-- `tool_call` - Model is calling a tool (emitted by model, auto-executed by SDK)
-- `tool_result` - Tool execution result (emitted after SDK executes tool)
-- `done` - Stream complete (includes `finishReason` and token usage)
-- `error` - An error occurred
-
-### 3. HTTP Response Mode
-
-Perfect for API endpoints:
-
-```typescript
-import { toStreamResponse } from "@tanstack/ai";
-
-// TanStack Start API Route
-export const POST = async ({ request }: { request: Request }) => {
- const { messages } = await request.json();
-
- const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4o",
- messages,
- temperature: 0.7,
- });
-
- // Convert stream to Response with SSE headers
- return toStreamResponse(stream);
-};
-```
-
-## With Fallbacks
-
-Both methods support fallbacks:
-
-```typescript
-// Promise mode with fallbacks
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
- fallbacks: [
- { adapter: "anthropic", model: "claude-3-sonnet-20240229" },
- { adapter: "ollama", model: "llama2" },
- ],
-});
-
-// Stream mode with fallbacks
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
- fallbacks: [{ adapter: "anthropic", model: "claude-3-sonnet-20240229" }],
-});
-
-// HTTP response with fallbacks (seamless failover in HTTP streaming!)
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
- fallbacks: [{ adapter: "ollama", model: "llama2" }],
-});
-return toStreamResponse(stream);
-```
-
-## Tool Execution with Automatic Loop
-
-**The `chat()` method automatically executes tools in a loop** - no manual management needed!
-
-```typescript
-const tools = [
- {
- type: "function" as const,
- function: {
- name: "get_weather",
- description: "Get weather for a location",
- parameters: {
- type: "object",
- properties: {
- location: { type: "string" },
- },
- required: ["location"],
- },
- },
- execute: async (args: { location: string }) => {
- // This function is automatically called by the SDK
- const weather = await fetchWeatherAPI(args.location);
- return JSON.stringify(weather);
- },
- },
-];
-
-// Streaming chat with automatic tool execution
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "What's the weather in SF?" }],
- tools, // Tools with execute functions are auto-executed
- toolChoice: "auto",
- agentLoopStrategy: maxIterations(5), // Control loop behavior
-});
-
-for await (const chunk of stream) {
- if (chunk.type === "content") {
- process.stdout.write(chunk.delta); // Stream text response
- } else if (chunk.type === "tool_call") {
- // Model decided to call a tool - SDK will execute it automatically
- console.log(`ā Calling: ${chunk.toolCall.function.name}`);
- } else if (chunk.type === "tool_result") {
- // SDK executed the tool and got a result
- console.log(`ā Result: ${chunk.content}`);
- } else if (chunk.type === "done") {
- console.log(`Finished: ${chunk.finishReason}`);
- }
-}
-```
-
-**š What Happens Internally:**
-
-1. User asks: "What's the weather in SF?"
-2. Model decides to call `get_weather` tool
- - SDK emits `tool_call` chunk
-3. **SDK automatically executes** `tools[0].execute({ location: "SF" })`
- - SDK emits `tool_result` chunk
-4. SDK adds assistant message (with tool call) + tool result to messages
-5. **SDK automatically continues** conversation by calling model again
-6. Model responds: "The weather in SF is sunny, 72°F"
- - SDK emits `content` chunks
-7. SDK emits `done` chunk
-
-**Key Points:**
-
-- ā Tools are **automatically executed** by the SDK (you don't call `execute`)
-- ā Tool results are **automatically added** to messages
-- ā Conversation **automatically continues** after tool execution
-- ā Loop controlled by `agentLoopStrategy` (default: `maxIterations(5)`)
-- ā All you do is handle chunks for display
-- ā Custom strategies available for advanced control
-
-**Promise Mode (No Tool Execution):**
-
-The `chatCompletion()` method does NOT execute tools - it returns the model's response immediately:
-
-```typescript
-// chatCompletion does not execute tools
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "What's the weather in SF?" }],
- tools,
-});
-
-// If model wanted to call a tool, result.toolCalls will contain the calls
-// but they won't be executed. This is useful if you want manual control.
-if (result.toolCalls) {
- console.log("Model wants to call:", result.toolCalls);
- // You would execute manually and call chatCompletion again
-}
-```
-
-## Type Safety
-
-TypeScript automatically infers the correct return type:
-
-```typescript
-// Type: Promise
-const promise = ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [],
-});
-
-// Type: AsyncIterable
-const stream = ai.chat({ adapter: "openai", model: "gpt-4", messages: [] });
-```
-
-## Benefits
-
-1. **Clearer API**: Separate methods for different use cases
-2. **Consistent Interface**: Same options across both methods
-3. **HTTP Streaming Made Easy**: Use `toStreamResponse()` helper
-4. **Fallbacks Everywhere**: Both methods support the same fallback mechanism
-5. **Type Safety**: TypeScript infers the correct return type
-6. **Structured Outputs**: Available in `chatCompletion()` method
-
-## Real-World Example: TanStack Start API
-
-```typescript
-import { createAPIFileRoute } from "@tanstack/start/api";
-import { ai } from "~/lib/ai-client";
-import { toStreamResponse } from "@tanstack/ai";
-
-export const Route = createAPIFileRoute("/api/chat")({
- POST: async ({ request }) => {
- const { messages, tools } = await request.json();
-
- const stream = ai.chat({
- adapter: "openAi",
- model: "gpt-4o",
- messages,
- tools,
- toolChoice: "auto",
- maxIterations: 5,
- temperature: 0.7,
- fallbacks: [{ adapter: "ollama", model: "llama2" }],
- });
-
- return toStreamResponse(stream);
- },
-});
-```
-
-Client-side consumption:
-
-```typescript
-const response = await fetch("/api/chat", {
- method: "POST",
- body: JSON.stringify({ messages, tools }),
-});
-
-const reader = response.body!.getReader();
-const decoder = new TextDecoder();
-
-while (true) {
- const { done, value } = await reader.read();
- if (done) break;
-
- const text = decoder.decode(value);
- const lines = text.split("\n\n");
-
- for (const line of lines) {
- if (line.startsWith("data: ")) {
- const data = line.slice(6);
- if (data === "[DONE]") continue;
-
- const chunk = JSON.parse(data);
- if (chunk.type === "content") {
- console.log(chunk.delta); // Stream content to UI
- }
- }
- }
-}
-```
-
-## Summary
-
-The unified chat API provides:
-
-- **Two methods**: `chat()` for streaming, `chatCompletion()` for promises
-- **Same options** across both methods
-- **Built-in HTTP streaming** helper (`toStreamResponse`)
-- **Full fallback support** in both methods
-- **Type-safe** return types
-- **Simpler code** for common patterns
+# Unified Chat API
+
+## Overview
+
+The chat API provides two methods for different use cases:
+
+- **`chat()`** - Returns `AsyncIterable` - streaming with **automatic tool execution loop**
+- **`chatCompletion()`** - Returns `Promise` - standard non-streaming chat with optional structured output
+
+### š Automatic Tool Execution in `chat()`
+
+**IMPORTANT:** The `chat()` method runs an automatic tool execution loop. When you provide tools with `execute` functions:
+
+1. **Model calls a tool** ā SDK executes it automatically
+2. **SDK emits chunks** for tool calls and results (`tool_call`, `tool_result`)
+3. **SDK adds results** to messages and continues conversation
+4. **Loop repeats** until stopped by `agentLoopStrategy` (default: `maxIterations(5)`)
+
+**You don't need to manually execute tools or manage conversation state** - the SDK handles everything internally!
+
+**š See also:** [Complete Tool Execution Loop Documentation](TOOL_EXECUTION_LOOP.md)
+
+## Migration Guide
+
+### Before (Using `as` option)
+
+```typescript
+// For non-streaming
+const result = await ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+ as: 'promise',
+})
+
+// For streaming
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+ as: 'stream',
+})
+for await (const chunk of stream) {
+ console.log(chunk)
+}
+
+// For HTTP response
+const response = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+ as: 'response',
+})
+return response
+```
+
+### After (Separate Methods)
+
+```typescript
+// For non-streaming - use chatCompletion()
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+
+// For streaming - use chat()
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+for await (const chunk of stream) {
+ console.log(chunk)
+}
+
+// For HTTP response - use chat() + toStreamResponse()
+import { toStreamResponse } from '@tanstack/ai'
+
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+return toStreamResponse(stream)
+```
+
+## Usage Examples
+
+### 1. Promise Mode (chatCompletion)
+
+Standard non-streaming chat completion:
+
+```typescript
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [
+ { role: 'system', content: 'You are a helpful assistant.' },
+ { role: 'user', content: 'What is TypeScript?' },
+ ],
+ temperature: 0.7,
+})
+
+console.log(result.content)
+console.log(`Tokens used: ${result.usage.totalTokens}`)
+```
+
+### 2. Stream Mode (chat)
+
+Streaming with automatic tool execution loop:
+
+```typescript
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Write a story' }],
+ tools: [weatherTool], // Optional: tools are auto-executed
+ agentLoopStrategy: maxIterations(5), // Optional: control loop
+})
+
+for await (const chunk of stream) {
+ if (chunk.type === 'content') {
+ process.stdout.write(chunk.delta) // Stream text response
+ } else if (chunk.type === 'tool_call') {
+ console.log(`ā Calling tool: ${chunk.toolCall.function.name}`)
+ } else if (chunk.type === 'tool_result') {
+ console.log(`ā Tool result: ${chunk.content}`)
+ } else if (chunk.type === 'done') {
+ console.log(`\nFinished: ${chunk.finishReason}`)
+ console.log(`Tokens: ${chunk.usage?.totalTokens}`)
+ }
+}
+```
+
+**Chunk Types:**
+
+- `content` - Text content from the model (use `chunk.delta` for streaming)
+- `tool_call` - Model is calling a tool (emitted by model, auto-executed by SDK)
+- `tool_result` - Tool execution result (emitted after SDK executes tool)
+- `done` - Stream complete (includes `finishReason` and token usage)
+- `error` - An error occurred
+
+### 3. HTTP Response Mode
+
+Perfect for API endpoints:
+
+```typescript
+import { toStreamResponse } from '@tanstack/ai'
+
+// TanStack Start API Route
+export const POST = async ({ request }: { request: Request }) => {
+ const { messages } = await request.json()
+
+ const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4o',
+ messages,
+ temperature: 0.7,
+ })
+
+ // Convert stream to Response with SSE headers
+ return toStreamResponse(stream)
+}
+```
+
+## With Fallbacks
+
+Both methods support fallbacks:
+
+```typescript
+// Promise mode with fallbacks
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+ fallbacks: [
+ { adapter: 'anthropic', model: 'claude-3-sonnet-20240229' },
+ { adapter: 'ollama', model: 'llama2' },
+ ],
+})
+
+// Stream mode with fallbacks
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+ fallbacks: [{ adapter: 'anthropic', model: 'claude-3-sonnet-20240229' }],
+})
+
+// HTTP response with fallbacks (seamless failover in HTTP streaming!)
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+ fallbacks: [{ adapter: 'ollama', model: 'llama2' }],
+})
+return toStreamResponse(stream)
+```
+
+## Tool Execution with Automatic Loop
+
+**The `chat()` method automatically executes tools in a loop** - no manual management needed!
+
+```typescript
+const tools = [
+ {
+ type: 'function' as const,
+ function: {
+ name: 'get_weather',
+ description: 'Get weather for a location',
+ parameters: {
+ type: 'object',
+ properties: {
+ location: { type: 'string' },
+ },
+ required: ['location'],
+ },
+ },
+ execute: async (args: { location: string }) => {
+ // This function is automatically called by the SDK
+ const weather = await fetchWeatherAPI(args.location)
+ return JSON.stringify(weather)
+ },
+ },
+]
+
+// Streaming chat with automatic tool execution
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: "What's the weather in SF?" }],
+ tools, // Tools with execute functions are auto-executed
+ toolChoice: 'auto',
+ agentLoopStrategy: maxIterations(5), // Control loop behavior
+})
+
+for await (const chunk of stream) {
+ if (chunk.type === 'content') {
+ process.stdout.write(chunk.delta) // Stream text response
+ } else if (chunk.type === 'tool_call') {
+ // Model decided to call a tool - SDK will execute it automatically
+ console.log(`ā Calling: ${chunk.toolCall.function.name}`)
+ } else if (chunk.type === 'tool_result') {
+ // SDK executed the tool and got a result
+ console.log(`ā Result: ${chunk.content}`)
+ } else if (chunk.type === 'done') {
+ console.log(`Finished: ${chunk.finishReason}`)
+ }
+}
+```
+
+**š What Happens Internally:**
+
+1. User asks: "What's the weather in SF?"
+2. Model decides to call `get_weather` tool
+ - SDK emits `tool_call` chunk
+3. **SDK automatically executes** `tools[0].execute({ location: "SF" })`
+ - SDK emits `tool_result` chunk
+4. SDK adds assistant message (with tool call) + tool result to messages
+5. **SDK automatically continues** conversation by calling model again
+6. Model responds: "The weather in SF is sunny, 72°F"
+ - SDK emits `content` chunks
+7. SDK emits `done` chunk
+
+**Key Points:**
+
+- ā Tools are **automatically executed** by the SDK (you don't call `execute`)
+- ā Tool results are **automatically added** to messages
+- ā Conversation **automatically continues** after tool execution
+- ā Loop controlled by `agentLoopStrategy` (default: `maxIterations(5)`)
+- ā All you do is handle chunks for display
+- ā Custom strategies available for advanced control
+
+**Promise Mode (No Tool Execution):**
+
+The `chatCompletion()` method does NOT execute tools - it returns the model's response immediately:
+
+```typescript
+// chatCompletion does not execute tools
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: "What's the weather in SF?" }],
+ tools,
+})
+
+// If model wanted to call a tool, result.toolCalls will contain the calls
+// but they won't be executed. This is useful if you want manual control.
+if (result.toolCalls) {
+ console.log('Model wants to call:', result.toolCalls)
+ // You would execute manually and call chatCompletion again
+}
+```
+
+## Type Safety
+
+TypeScript automatically infers the correct return type:
+
+```typescript
+// Type: Promise
+const promise = ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+})
+
+// Type: AsyncIterable
+const stream = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [] })
+```
+
+## Benefits
+
+1. **Clearer API**: Separate methods for different use cases
+2. **Consistent Interface**: Same options across both methods
+3. **HTTP Streaming Made Easy**: Use `toStreamResponse()` helper
+4. **Fallbacks Everywhere**: Both methods support the same fallback mechanism
+5. **Type Safety**: TypeScript infers the correct return type
+6. **Structured Outputs**: Available in `chatCompletion()` method
+
+## Real-World Example: TanStack Start API
+
+```typescript
+import { createAPIFileRoute } from '@tanstack/start/api'
+import { ai } from '~/lib/ai-client'
+import { toStreamResponse } from '@tanstack/ai'
+
+export const Route = createAPIFileRoute('/api/chat')({
+ POST: async ({ request }) => {
+ const { messages, tools } = await request.json()
+
+ const stream = ai.chat({
+ adapter: 'openAi',
+ model: 'gpt-4o',
+ messages,
+ tools,
+ toolChoice: 'auto',
+ maxIterations: 5,
+ temperature: 0.7,
+ fallbacks: [{ adapter: 'ollama', model: 'llama2' }],
+ })
+
+ return toStreamResponse(stream)
+ },
+})
+```
+
+Client-side consumption:
+
+```typescript
+const response = await fetch('/api/chat', {
+ method: 'POST',
+ body: JSON.stringify({ messages, tools }),
+})
+
+const reader = response.body!.getReader()
+const decoder = new TextDecoder()
+
+while (true) {
+ const { done, value } = await reader.read()
+ if (done) break
+
+ const text = decoder.decode(value)
+ const lines = text.split('\n\n')
+
+ for (const line of lines) {
+ if (line.startsWith('data: ')) {
+ const data = line.slice(6)
+ if (data === '[DONE]') continue
+
+ const chunk = JSON.parse(data)
+ if (chunk.type === 'content') {
+ console.log(chunk.delta) // Stream content to UI
+ }
+ }
+ }
+}
+```
+
+## Summary
+
+The unified chat API provides:
+
+- **Two methods**: `chat()` for streaming, `chatCompletion()` for promises
+- **Same options** across both methods
+- **Built-in HTTP streaming** helper (`toStreamResponse`)
+- **Full fallback support** in both methods
+- **Type-safe** return types
+- **Simpler code** for common patterns
diff --git a/ai-docs/UNIFIED_CHAT_IMPLEMENTATION.md b/ai-docs/UNIFIED_CHAT_IMPLEMENTATION.md
index 440894e41..a75ad3c2b 100644
--- a/ai-docs/UNIFIED_CHAT_IMPLEMENTATION.md
+++ b/ai-docs/UNIFIED_CHAT_IMPLEMENTATION.md
@@ -1,246 +1,257 @@
-# Unified Chat API - Implementation Summary
-
-> **Note**: This document describes the historical implementation with the `as` option. The current API uses separate methods: `chat()` for streaming and `chatCompletion()` for promise-based completion. See `docs/UNIFIED_CHAT_API.md` for current API documentation.
-
-## Overview
-
-The chat API was previously unified using an `as` configuration option. The current implementation separates streaming and promise-based completion into distinct methods:
-
-- **`chat()`** - Always returns `AsyncIterable` (streaming)
-- **`chatCompletion()`** - Always returns `Promise` (promise-based)
-
-## Current API Design
-
-### Method Separation
-
-```typescript
-class AI {
- // Streaming method with automatic tool execution loop
- async *chat(options): AsyncIterable {
- // Manages tool execution internally using ToolCallManager
- const toolCallManager = new ToolCallManager(options.tools || []);
-
- while (iterationCount < maxIterations) {
- // Stream from adapter
- for await (const chunk of this.adapter.chatStream(options)) {
- yield chunk;
-
- // Track tool calls
- if (chunk.type === "tool_call") {
- toolCallManager.addToolCallChunk(chunk);
- }
- }
-
- // Execute tools if needed
- if (shouldExecuteTools && toolCallManager.hasToolCalls()) {
- const toolResults = yield* toolCallManager.executeTools(doneChunk);
- messages = [...messages, ...toolResults];
- continue; // Next iteration
- }
-
- break; // Done
- }
- }
-
- // Promise-based method (no tool execution loop)
- async chatCompletion(options): Promise {
- return this.adapter.chatCompletion(options);
- }
-}
-```
-
-### ToolCallManager Class
-
-The tool execution logic is extracted into a dedicated `ToolCallManager` class:
-
-```typescript
-class ToolCallManager {
- // Accumulate tool calls from streaming chunks
- addToolCallChunk(chunk): void;
-
- // Check if there are tool calls to execute
- hasToolCalls(): boolean;
-
- // Get all complete tool calls
- getToolCalls(): ToolCall[];
-
- // Execute tools and yield tool_result chunks
- async *executeTools(doneChunk): AsyncGenerator;
-
- // Clear for next iteration
- clear(): void;
-}
-```
-
-**Benefits:**
-- ā **Separation of concerns** - tool logic isolated from chat logic
-- ā **Testable** - ToolCallManager can be unit tested independently
-- ā **Maintainable** - changes to tool execution don't affect chat method
-- ā **Reusable** - can be used in other contexts if needed
-
-### Benefits of Separate Methods
-
-ā **Clearer API**: Method names indicate return type
-ā **Better Type Inference**: TypeScript knows exact return type without overloads
-ā **Simpler Implementation**: No need for discriminated unions
-ā **Easier to Use**: Less cognitive overhead
-
-## Usage Examples
-
-### 1. Promise Mode (chatCompletion)
-
-```typescript
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-```
-
-### 2. Stream Mode (chat)
-
-```typescript
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-
-for await (const chunk of stream) {
- console.log(chunk);
-}
-```
-
-### 3. HTTP Response Mode
-
-```typescript
-import { toStreamResponse } from "@tanstack/ai";
-
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-
-return toStreamResponse(stream);
-```
-
-## Historical Context
-
-The `as` option approach was implemented to unify `chat()` and `streamChat()` methods. However, separate methods provide better developer experience and type safety.
-
-### Migration Path
-
-See `docs/MIGRATION_UNIFIED_CHAT.md` for migration guide from the `as` option API to the current separate methods API.
-
-## Features Preserved
-
-ā **All features still supported**:
-- Discriminated union types for adapter-model pairs
-- Fallback mechanism (single-with-fallbacks or fallbacks-only)
-- **Automatic tool execution loop** (via `ToolCallManager`)
-- Error chunk detection for streaming
-- Type-safe model selection
-
-ā **No breaking changes** to core functionality:
-- Streaming behavior matches old `streamChat()` method
-- Promise behavior matches old `chat()` method
-- Error handling and fallbacks work identically
-- **Tool execution now handled by `ToolCallManager` class**
-
-## Files Changed
-
-### Core Implementation
-- ā `packages/ai/src/ai.ts`
- - Removed `as` option from `chat()` method
- - Made `chat()` streaming-only with automatic tool execution loop
- - Added `chatCompletion()` method for promise-based calls
- - Removed `streamToResponse()` private method (use `toStreamResponse()` from `stream-to-response.ts`)
- - Refactored to use `ToolCallManager` for tool execution
-
-- ā `packages/ai/src/tool-call-manager.ts` (NEW)
- - Encapsulates tool call accumulation, validation, and execution
- - Independently testable
- - Yields `tool_result` chunks during execution
- - Returns tool result messages for conversation history
-
-- ā `packages/ai/src/types.ts`
- - Added `ToolResultStreamChunk` type
- - Added `"tool_result"` to `StreamChunkType` union
- - Updated `StreamChunk` union to include `ToolResultStreamChunk`
-
-### Documentation
-- ā `docs/UNIFIED_CHAT_API.md` - Updated API documentation with tool execution details
-- ā `docs/MIGRATION_UNIFIED_CHAT.md` - Migration guide
-- ā `docs/UNIFIED_CHAT_QUICK_REFERENCE.md` - Quick reference updated
-- ā `docs/TOOL_EXECUTION_LOOP.md` (NEW) - Comprehensive tool execution guide
-- ā `README.md` - Updated with tool execution loop documentation
-- ā `examples/cli/README.md` - Updated with automatic tool execution details
-- ā `packages/ai-react/README.md` - Updated backend examples with tool execution
-- ā `packages/ai-client/README.md` - Added backend example with tool execution
-
-## Benefits of Current Approach
-
-1. **Simpler API Surface** - Two clear methods instead of one with options
-2. **Consistent Interface** - Same options across both methods
-3. **HTTP Streaming Made Easy** - Use `toStreamResponse()` helper
-4. **Better Developer Experience** - Clear intent with method names
-5. **Type Safety Maintained** - All discriminated unions still work
-6. **Backward Compatible Migration** - Easy to migrate from old API
-7. **Fallbacks Everywhere** - Both methods support same fallback mechanism
-8. **Automatic Tool Execution** - `chat()` handles tool calling in a loop via `ToolCallManager`
-9. **Testable Architecture** - Tool execution logic isolated in separate class
-10. **Clean Separation** - `chat()` for streaming+tools, `chatCompletion()` for promises+structured output
-
-## Testing Recommendations
-
-Test scenarios:
-1. ā Promise mode with primary adapter
-2. ā Promise mode with fallbacks
-3. ā Stream mode with primary adapter
-4. ā Stream mode with fallbacks
-5. ā HTTP response mode with primary adapter
-6. ā HTTP response mode with fallbacks
-7. ā Automatic tool execution in `chat()` (via `ToolCallManager`)
-8. ā Manual tool handling in `chatCompletion()`
-9. ā Error chunk detection triggers fallbacks
-10. ā Type inference for both methods
-11. ā Fallback-only mode (no primary adapter)
-12. ā `ToolCallManager` unit tests (accumulation, validation, execution)
-13. ā Multi-round tool execution (up to `maxIterations`)
-14. ā Tool execution error handling
-
-## Next Steps
-
-### For Users
-1. **Update method calls**:
- - `chat({ as: "promise" })` ā `chatCompletion()`
- - `chat({ as: "stream" })` ā `chat()`
- - `chat({ as: "response" })` ā `chat()` + `toStreamResponse()`
-2. **Update imports**: Add `toStreamResponse` import if needed
-3. **Test fallback behavior**: Verify seamless failover in all modes
-
-### Testing ToolCallManager
-
-The `ToolCallManager` class is independently testable. See `packages/ai/src/tool-call-manager.test.ts` for unit tests.
-
-Test scenarios:
-- ā Accumulating streaming tool call chunks
-- ā Filtering incomplete tool calls
-- ā Executing tools with valid arguments
-- ā Handling tool execution errors
-- ā Handling tools without execute functions
-- ā Multiple tool calls in one iteration
-- ā Clearing tool calls between iterations
-
-### Future Enhancements
-- Consider adding structured output support to streaming
-- Add streaming response mode to embeddings
-- Document SSE format for client-side consumption
-- Add examples for different frameworks (Express, Fastify, etc.)
-
-## Conclusion
-
-Separating `chat()` and `chatCompletion()` provides a cleaner, more intuitive interface while maintaining all existing functionality. The two-method design covers all common use cases with clear, type-safe APIs.
-
-**Key Achievement**: Clear separation of concerns with `chat()` for streaming and `chatCompletion()` for promises, eliminating the need for a configuration option.
+# Unified Chat API - Implementation Summary
+
+> **Note**: This document describes the historical implementation with the `as` option. The current API uses separate methods: `chat()` for streaming and `chatCompletion()` for promise-based completion. See `docs/UNIFIED_CHAT_API.md` for current API documentation.
+
+## Overview
+
+The chat API was previously unified using an `as` configuration option. The current implementation separates streaming and promise-based completion into distinct methods:
+
+- **`chat()`** - Always returns `AsyncIterable` (streaming)
+- **`chatCompletion()`** - Always returns `Promise` (promise-based)
+
+## Current API Design
+
+### Method Separation
+
+```typescript
+class AI {
+ // Streaming method with automatic tool execution loop
+ async *chat(options): AsyncIterable {
+ // Manages tool execution internally using ToolCallManager
+ const toolCallManager = new ToolCallManager(options.tools || [])
+
+ while (iterationCount < maxIterations) {
+ // Stream from adapter
+ for await (const chunk of this.adapter.chatStream(options)) {
+ yield chunk
+
+ // Track tool calls
+ if (chunk.type === 'tool_call') {
+ toolCallManager.addToolCallChunk(chunk)
+ }
+ }
+
+ // Execute tools if needed
+ if (shouldExecuteTools && toolCallManager.hasToolCalls()) {
+ const toolResults = yield* toolCallManager.executeTools(doneChunk)
+ messages = [...messages, ...toolResults]
+ continue // Next iteration
+ }
+
+ break // Done
+ }
+ }
+
+ // Promise-based method (no tool execution loop)
+ async chatCompletion(options): Promise {
+ return this.adapter.chatCompletion(options)
+ }
+}
+```
+
+### ToolCallManager Class
+
+The tool execution logic is extracted into a dedicated `ToolCallManager` class:
+
+```typescript
+class ToolCallManager {
+ // Accumulate tool calls from streaming chunks
+ addToolCallChunk(chunk): void
+
+ // Check if there are tool calls to execute
+ hasToolCalls(): boolean
+
+ // Get all complete tool calls
+ getToolCalls(): ToolCall[]
+
+ // Execute tools and yield tool_result chunks
+ async *executeTools(
+ doneChunk,
+ ): AsyncGenerator
+
+ // Clear for next iteration
+ clear(): void
+}
+```
+
+**Benefits:**
+
+- ā **Separation of concerns** - tool logic isolated from chat logic
+- ā **Testable** - ToolCallManager can be unit tested independently
+- ā **Maintainable** - changes to tool execution don't affect chat method
+- ā **Reusable** - can be used in other contexts if needed
+
+### Benefits of Separate Methods
+
+ā **Clearer API**: Method names indicate return type
+ā **Better Type Inference**: TypeScript knows exact return type without overloads
+ā **Simpler Implementation**: No need for discriminated unions
+ā **Easier to Use**: Less cognitive overhead
+
+## Usage Examples
+
+### 1. Promise Mode (chatCompletion)
+
+```typescript
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+```
+
+### 2. Stream Mode (chat)
+
+```typescript
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+
+for await (const chunk of stream) {
+ console.log(chunk)
+}
+```
+
+### 3. HTTP Response Mode
+
+```typescript
+import { toStreamResponse } from '@tanstack/ai'
+
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+
+return toStreamResponse(stream)
+```
+
+## Historical Context
+
+The `as` option approach was implemented to unify `chat()` and `streamChat()` methods. However, separate methods provide better developer experience and type safety.
+
+### Migration Path
+
+See `docs/MIGRATION_UNIFIED_CHAT.md` for migration guide from the `as` option API to the current separate methods API.
+
+## Features Preserved
+
+ā **All features still supported**:
+
+- Discriminated union types for adapter-model pairs
+- Fallback mechanism (single-with-fallbacks or fallbacks-only)
+- **Automatic tool execution loop** (via `ToolCallManager`)
+- Error chunk detection for streaming
+- Type-safe model selection
+
+ā **No breaking changes** to core functionality:
+
+- Streaming behavior matches old `streamChat()` method
+- Promise behavior matches old `chat()` method
+- Error handling and fallbacks work identically
+- **Tool execution now handled by `ToolCallManager` class**
+
+## Files Changed
+
+### Core Implementation
+
+- ā `packages/ai/src/ai.ts`
+ - Removed `as` option from `chat()` method
+ - Made `chat()` streaming-only with automatic tool execution loop
+ - Added `chatCompletion()` method for promise-based calls
+ - Removed `streamToResponse()` private method (use `toStreamResponse()` from `stream-to-response.ts`)
+ - Refactored to use `ToolCallManager` for tool execution
+
+- ā `packages/ai/src/tool-call-manager.ts` (NEW)
+ - Encapsulates tool call accumulation, validation, and execution
+ - Independently testable
+ - Yields `tool_result` chunks during execution
+ - Returns tool result messages for conversation history
+
+- ā `packages/ai/src/types.ts`
+ - Added `ToolResultStreamChunk` type
+ - Added `"tool_result"` to `StreamChunkType` union
+ - Updated `StreamChunk` union to include `ToolResultStreamChunk`
+
+### Documentation
+
+- ā `docs/UNIFIED_CHAT_API.md` - Updated API documentation with tool execution details
+- ā `docs/MIGRATION_UNIFIED_CHAT.md` - Migration guide
+- ā `docs/UNIFIED_CHAT_QUICK_REFERENCE.md` - Quick reference updated
+- ā `docs/TOOL_EXECUTION_LOOP.md` (NEW) - Comprehensive tool execution guide
+- ā `README.md` - Updated with tool execution loop documentation
+- ā `examples/cli/README.md` - Updated with automatic tool execution details
+- ā `packages/ai-react/README.md` - Updated backend examples with tool execution
+- ā `packages/ai-client/README.md` - Added backend example with tool execution
+
+## Benefits of Current Approach
+
+1. **Simpler API Surface** - Two clear methods instead of one with options
+2. **Consistent Interface** - Same options across both methods
+3. **HTTP Streaming Made Easy** - Use `toStreamResponse()` helper
+4. **Better Developer Experience** - Clear intent with method names
+5. **Type Safety Maintained** - All discriminated unions still work
+6. **Backward Compatible Migration** - Easy to migrate from old API
+7. **Fallbacks Everywhere** - Both methods support same fallback mechanism
+8. **Automatic Tool Execution** - `chat()` handles tool calling in a loop via `ToolCallManager`
+9. **Testable Architecture** - Tool execution logic isolated in separate class
+10. **Clean Separation** - `chat()` for streaming+tools, `chatCompletion()` for promises+structured output
+
+## Testing Recommendations
+
+Test scenarios:
+
+1. ā Promise mode with primary adapter
+2. ā Promise mode with fallbacks
+3. ā Stream mode with primary adapter
+4. ā Stream mode with fallbacks
+5. ā HTTP response mode with primary adapter
+6. ā HTTP response mode with fallbacks
+7. ā Automatic tool execution in `chat()` (via `ToolCallManager`)
+8. ā Manual tool handling in `chatCompletion()`
+9. ā Error chunk detection triggers fallbacks
+10. ā Type inference for both methods
+11. ā Fallback-only mode (no primary adapter)
+12. ā `ToolCallManager` unit tests (accumulation, validation, execution)
+13. ā Multi-round tool execution (up to `maxIterations`)
+14. ā Tool execution error handling
+
+## Next Steps
+
+### For Users
+
+1. **Update method calls**:
+ - `chat({ as: "promise" })` ā `chatCompletion()`
+ - `chat({ as: "stream" })` ā `chat()`
+ - `chat({ as: "response" })` ā `chat()` + `toStreamResponse()`
+2. **Update imports**: Add `toStreamResponse` import if needed
+3. **Test fallback behavior**: Verify seamless failover in all modes
+
+### Testing ToolCallManager
+
+The `ToolCallManager` class is independently testable. See `packages/ai/src/tool-call-manager.test.ts` for unit tests.
+
+Test scenarios:
+
+- ā Accumulating streaming tool call chunks
+- ā Filtering incomplete tool calls
+- ā Executing tools with valid arguments
+- ā Handling tool execution errors
+- ā Handling tools without execute functions
+- ā Multiple tool calls in one iteration
+- ā Clearing tool calls between iterations
+
+### Future Enhancements
+
+- Consider adding structured output support to streaming
+- Add streaming response mode to embeddings
+- Document SSE format for client-side consumption
+- Add examples for different frameworks (Express, Fastify, etc.)
+
+## Conclusion
+
+Separating `chat()` and `chatCompletion()` provides a cleaner, more intuitive interface while maintaining all existing functionality. The two-method design covers all common use cases with clear, type-safe APIs.
+
+**Key Achievement**: Clear separation of concerns with `chat()` for streaming and `chatCompletion()` for promises, eliminating the need for a configuration option.
diff --git a/ai-docs/UNIFIED_CHAT_QUICK_REFERENCE.md b/ai-docs/UNIFIED_CHAT_QUICK_REFERENCE.md
index 6d9e0e85b..e0ec12357 100644
--- a/ai-docs/UNIFIED_CHAT_QUICK_REFERENCE.md
+++ b/ai-docs/UNIFIED_CHAT_QUICK_REFERENCE.md
@@ -1,329 +1,329 @@
-# Unified Chat API - Quick Reference
-
-> **š Automatic Tool Execution:** The `chat()` method runs an automatic tool execution loop. Tools with `execute` functions are automatically called, results are added to messages, and the conversation continues - all handled internally by the SDK!
->
-> **š See also:** [Complete Tool Execution Loop Documentation](TOOL_EXECUTION_LOOP.md)
-
-## Two Methods for Different Use Cases
-
-```typescript
-// 1. CHATCOMPLETION - Returns Promise
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
-});
-
-// 2. CHAT - Returns AsyncIterable with automatic tool execution loop
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "Hello" }],
- tools: [weatherTool], // Optional: auto-executed when called
- agentLoopStrategy: maxIterations(5), // Optional: control loop
-});
-for await (const chunk of stream) {
- if (chunk.type === "content") process.stdout.write(chunk.delta);
- else if (chunk.type === "tool_call") console.log("Calling tool...");
- else if (chunk.type === "tool_result") console.log("Tool executed!");
-}
-```
-
-## Quick Comparison
-
-| Feature | chatCompletion | chat |
-| --------------------- | ------------------------------- | ---------------------------- |
-| **Return Type** | `Promise` | `AsyncIterable` |
-| **When to Use** | Need complete response | Real-time streaming |
-| **Async/Await** | ā Yes | ā Yes (for await) |
-| **Fallbacks** | ā Yes | ā Yes |
-| **Tool Execution** | ā No (manual) | ā **Automatic loop** |
-| **Type-Safe Models** | ā Yes | ā Yes |
-| **Structured Output** | ā Yes | ā No |
-
-## Common Patterns
-
-### API Endpoint (TanStack Start)
-
-```typescript
-import { toStreamResponse } from "@tanstack/ai";
-
-export const Route = createAPIFileRoute("/api/chat")({
- POST: async ({ request }) => {
- const { messages } = await request.json();
-
- const stream = ai.chat({
- adapter: "openAi",
- model: "gpt-4o",
- messages,
- fallbacks: [{ adapter: "ollama", model: "llama2" }],
- });
-
- return toStreamResponse(stream);
- },
-});
-```
-
-### CLI Application
-
-```typescript
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: userInput }],
-});
-
-for await (const chunk of stream) {
- if (chunk.type === "content") {
- process.stdout.write(chunk.delta);
- }
-}
-```
-
-### Batch Processing
-
-```typescript
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: document }],
-});
-
-await saveToDatabase(result.content);
-```
-
-## With Tools
-
-### Automatic Execution (chat)
-
-The `chat()` method **automatically executes tools in a loop**:
-
-```typescript
-const tools = [
- {
- type: "function" as const,
- function: {
- name: "get_weather",
- description: "Get weather for a location",
- parameters: {
- /* ... */
- },
- },
- execute: async (args: any) => {
- // SDK automatically calls this when model calls the tool
- return JSON.stringify({ temp: 72, condition: "sunny" });
- },
- },
-];
-
-// Stream mode with automatic tool execution
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "What's the weather in SF?" }],
- tools, // Tools with execute functions are auto-executed
- toolChoice: "auto",
- agentLoopStrategy: maxIterations(5), // Control loop behavior
-});
-
-for await (const chunk of stream) {
- if (chunk.type === "content") {
- process.stdout.write(chunk.delta);
- } else if (chunk.type === "tool_call") {
- console.log(`ā Calling: ${chunk.toolCall.function.name}`);
- } else if (chunk.type === "tool_result") {
- console.log(`ā Result: ${chunk.content}`);
- }
-}
-```
-
-**How it works:**
-
-1. Model decides to call a tool ā `tool_call` chunk
-2. SDK executes `tool.execute()` ā `tool_result` chunk
-3. SDK adds result to messages ā continues conversation
-4. Repeats until complete (up to `maxIterations`)
-
-### Manual Execution (chatCompletion)
-
-The `chatCompletion()` method does NOT execute tools automatically:
-
-```typescript
-// chatCompletion returns tool calls but doesn't execute them
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [{ role: "user", content: "What's the weather in SF?" }],
- tools,
-});
-
-// Check if model wants to call tools
-if (result.toolCalls) {
- console.log("Model wants to call:", result.toolCalls);
- // You must execute manually and call chatCompletion again
-}
-```
-
-## With Fallbacks
-
-Both methods support the same fallback mechanism:
-
-```typescript
-// Promise with fallbacks
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- fallbacks: [
- { adapter: "anthropic", model: "claude-3-sonnet-20240229" },
- { adapter: "ollama", model: "llama2" }
- ]
-});
-
-// Stream with fallbacks
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- fallbacks: [
- { adapter: "ollama", model: "llama2" }
- ]
-});
-
-// HTTP response with fallbacks (seamless HTTP failover!)
-import { toStreamResponse } from "@tanstack/ai";
-
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- fallbacks: [
- { adapter: "ollama", model: "llama2" }
- ]
-});
-return toStreamResponse(stream);
-```
-
-## Fallback-Only Mode
-
-No primary adapter, just try fallbacks in order:
-
-```typescript
-const result = await ai.chatCompletion({
- messages: [...],
- fallbacks: [
- { adapter: "openai", model: "gpt-4" },
- { adapter: "anthropic", model: "claude-3-sonnet-20240229" },
- { adapter: "ollama", model: "llama2" }
- ],
-});
-```
-
-## Migration from Old API
-
-### Before (using `as` option)
-
-```typescript
-// Non-streaming
-const result = await ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [],
- as: "promise",
-});
-
-// Streaming
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [],
- as: "stream",
-});
-
-// HTTP Response
-const response = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [],
- as: "response",
-});
-```
-
-### After (separate methods)
-
-```typescript
-// Non-streaming - use chatCompletion()
-const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [],
-});
-
-// Streaming - use chat()
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [],
-});
-
-// HTTP Response - use chat() + toStreamResponse()
-import { toStreamResponse } from "@tanstack/ai";
-
-const stream = ai.chat({
- adapter: "openai",
- model: "gpt-4",
- messages: [],
-});
-return toStreamResponse(stream);
-```
-
-## Type Inference
-
-TypeScript automatically infers the correct return type:
-
-```typescript
-// Type: Promise
-const promise = ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [],
-});
-
-// Type: AsyncIterable
-const stream = ai.chat({ adapter: "openai", model: "gpt-4", messages: [] });
-```
-
-## Error Handling
-
-Both methods throw errors if all adapters fail:
-
-```typescript
-try {
- const result = await ai.chatCompletion({
- adapter: "openai",
- model: "gpt-4",
- messages: [...],
- fallbacks: [{ adapter: "ollama", model: "llama2" }]
- });
-} catch (error: any) {
- console.error("All adapters failed:", error.message);
-}
-```
-
-## Cheat Sheet
-
-| What You Want | Use This | Example |
-| ----------------- | ------------------------------------ | ----------------------------------------------------- |
-| Complete response | `chatCompletion()` | `const result = await ai.chatCompletion({...})` |
-| Custom streaming | `chat()` | `for await (const chunk of ai.chat({...}))` |
-| API endpoint | `chat()` + `toStreamResponse()` | `return toStreamResponse(ai.chat({...}))` |
-| With fallbacks | Add `fallbacks: [...]` | `fallbacks: [{ adapter: "ollama", model: "llama2" }]` |
-| With tools | Add `tools: [...]` | `tools: [{...}, {...}], toolChoice: "auto"` |
-| Multiple adapters | Use `fallbacks` only | `fallbacks: [{ adapter: "a", model: "m1" }, {...}]` |
-| Structured output | Use `chatCompletion()` with `output` | `chatCompletion({..., output: schema })` |
-
-## Documentation
-
-- **Full API Docs**: `docs/UNIFIED_CHAT_API.md`
-- **Migration Guide**: `docs/MIGRATION_UNIFIED_CHAT.md`
-- **Implementation**: `docs/UNIFIED_CHAT_IMPLEMENTATION.md`
+# Unified Chat API - Quick Reference
+
+> **š Automatic Tool Execution:** The `chat()` method runs an automatic tool execution loop. Tools with `execute` functions are automatically called, results are added to messages, and the conversation continues - all handled internally by the SDK!
+>
+> **š See also:** [Complete Tool Execution Loop Documentation](TOOL_EXECUTION_LOOP.md)
+
+## Two Methods for Different Use Cases
+
+```typescript
+// 1. CHATCOMPLETION - Returns Promise
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+})
+
+// 2. CHAT - Returns AsyncIterable with automatic tool execution loop
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello' }],
+ tools: [weatherTool], // Optional: auto-executed when called
+ agentLoopStrategy: maxIterations(5), // Optional: control loop
+})
+for await (const chunk of stream) {
+ if (chunk.type === 'content') process.stdout.write(chunk.delta)
+ else if (chunk.type === 'tool_call') console.log('Calling tool...')
+ else if (chunk.type === 'tool_result') console.log('Tool executed!')
+}
+```
+
+## Quick Comparison
+
+| Feature | chatCompletion | chat |
+| --------------------- | ------------------------------- | ---------------------------- |
+| **Return Type** | `Promise` | `AsyncIterable` |
+| **When to Use** | Need complete response | Real-time streaming |
+| **Async/Await** | ā Yes | ā Yes (for await) |
+| **Fallbacks** | ā Yes | ā Yes |
+| **Tool Execution** | ā No (manual) | ā **Automatic loop** |
+| **Type-Safe Models** | ā Yes | ā Yes |
+| **Structured Output** | ā Yes | ā No |
+
+## Common Patterns
+
+### API Endpoint (TanStack Start)
+
+```typescript
+import { toStreamResponse } from '@tanstack/ai'
+
+export const Route = createAPIFileRoute('/api/chat')({
+ POST: async ({ request }) => {
+ const { messages } = await request.json()
+
+ const stream = ai.chat({
+ adapter: 'openAi',
+ model: 'gpt-4o',
+ messages,
+ fallbacks: [{ adapter: 'ollama', model: 'llama2' }],
+ })
+
+ return toStreamResponse(stream)
+ },
+})
+```
+
+### CLI Application
+
+```typescript
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: userInput }],
+})
+
+for await (const chunk of stream) {
+ if (chunk.type === 'content') {
+ process.stdout.write(chunk.delta)
+ }
+}
+```
+
+### Batch Processing
+
+```typescript
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: document }],
+})
+
+await saveToDatabase(result.content)
+```
+
+## With Tools
+
+### Automatic Execution (chat)
+
+The `chat()` method **automatically executes tools in a loop**:
+
+```typescript
+const tools = [
+ {
+ type: 'function' as const,
+ function: {
+ name: 'get_weather',
+ description: 'Get weather for a location',
+ parameters: {
+ /* ... */
+ },
+ },
+ execute: async (args: any) => {
+ // SDK automatically calls this when model calls the tool
+ return JSON.stringify({ temp: 72, condition: 'sunny' })
+ },
+ },
+]
+
+// Stream mode with automatic tool execution
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: "What's the weather in SF?" }],
+ tools, // Tools with execute functions are auto-executed
+ toolChoice: 'auto',
+ agentLoopStrategy: maxIterations(5), // Control loop behavior
+})
+
+for await (const chunk of stream) {
+ if (chunk.type === 'content') {
+ process.stdout.write(chunk.delta)
+ } else if (chunk.type === 'tool_call') {
+ console.log(`ā Calling: ${chunk.toolCall.function.name}`)
+ } else if (chunk.type === 'tool_result') {
+ console.log(`ā Result: ${chunk.content}`)
+ }
+}
+```
+
+**How it works:**
+
+1. Model decides to call a tool ā `tool_call` chunk
+2. SDK executes `tool.execute()` ā `tool_result` chunk
+3. SDK adds result to messages ā continues conversation
+4. Repeats until complete (up to `maxIterations`)
+
+### Manual Execution (chatCompletion)
+
+The `chatCompletion()` method does NOT execute tools automatically:
+
+```typescript
+// chatCompletion returns tool calls but doesn't execute them
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: "What's the weather in SF?" }],
+ tools,
+})
+
+// Check if model wants to call tools
+if (result.toolCalls) {
+ console.log('Model wants to call:', result.toolCalls)
+ // You must execute manually and call chatCompletion again
+}
+```
+
+## With Fallbacks
+
+Both methods support the same fallback mechanism:
+
+```typescript
+// Promise with fallbacks
+const result = await ai.chatCompletion({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ fallbacks: [
+ { adapter: "anthropic", model: "claude-3-sonnet-20240229" },
+ { adapter: "ollama", model: "llama2" }
+ ]
+});
+
+// Stream with fallbacks
+const stream = ai.chat({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ fallbacks: [
+ { adapter: "ollama", model: "llama2" }
+ ]
+});
+
+// HTTP response with fallbacks (seamless HTTP failover!)
+import { toStreamResponse } from "@tanstack/ai";
+
+const stream = ai.chat({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ fallbacks: [
+ { adapter: "ollama", model: "llama2" }
+ ]
+});
+return toStreamResponse(stream);
+```
+
+## Fallback-Only Mode
+
+No primary adapter, just try fallbacks in order:
+
+```typescript
+const result = await ai.chatCompletion({
+ messages: [...],
+ fallbacks: [
+ { adapter: "openai", model: "gpt-4" },
+ { adapter: "anthropic", model: "claude-3-sonnet-20240229" },
+ { adapter: "ollama", model: "llama2" }
+ ],
+});
+```
+
+## Migration from Old API
+
+### Before (using `as` option)
+
+```typescript
+// Non-streaming
+const result = await ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+ as: 'promise',
+})
+
+// Streaming
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+ as: 'stream',
+})
+
+// HTTP Response
+const response = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+ as: 'response',
+})
+```
+
+### After (separate methods)
+
+```typescript
+// Non-streaming - use chatCompletion()
+const result = await ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+})
+
+// Streaming - use chat()
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+})
+
+// HTTP Response - use chat() + toStreamResponse()
+import { toStreamResponse } from '@tanstack/ai'
+
+const stream = ai.chat({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+})
+return toStreamResponse(stream)
+```
+
+## Type Inference
+
+TypeScript automatically infers the correct return type:
+
+```typescript
+// Type: Promise
+const promise = ai.chatCompletion({
+ adapter: 'openai',
+ model: 'gpt-4',
+ messages: [],
+})
+
+// Type: AsyncIterable
+const stream = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [] })
+```
+
+## Error Handling
+
+Both methods throw errors if all adapters fail:
+
+```typescript
+try {
+ const result = await ai.chatCompletion({
+ adapter: "openai",
+ model: "gpt-4",
+ messages: [...],
+ fallbacks: [{ adapter: "ollama", model: "llama2" }]
+ });
+} catch (error: any) {
+ console.error("All adapters failed:", error.message);
+}
+```
+
+## Cheat Sheet
+
+| What You Want | Use This | Example |
+| ----------------- | ------------------------------------ | ----------------------------------------------------- |
+| Complete response | `chatCompletion()` | `const result = await ai.chatCompletion({...})` |
+| Custom streaming | `chat()` | `for await (const chunk of ai.chat({...}))` |
+| API endpoint | `chat()` + `toStreamResponse()` | `return toStreamResponse(ai.chat({...}))` |
+| With fallbacks | Add `fallbacks: [...]` | `fallbacks: [{ adapter: "ollama", model: "llama2" }]` |
+| With tools | Add `tools: [...]` | `tools: [{...}, {...}], toolChoice: "auto"` |
+| Multiple adapters | Use `fallbacks` only | `fallbacks: [{ adapter: "a", model: "m1" }, {...}]` |
+| Structured output | Use `chatCompletion()` with `output` | `chatCompletion({..., output: schema })` |
+
+## Documentation
+
+- **Full API Docs**: `docs/UNIFIED_CHAT_API.md`
+- **Migration Guide**: `docs/MIGRATION_UNIFIED_CHAT.md`
+- **Implementation**: `docs/UNIFIED_CHAT_IMPLEMENTATION.md`
diff --git a/config.json b/config.json
new file mode 100644
index 000000000..174cf479e
--- /dev/null
+++ b/config.json
@@ -0,0 +1,14 @@
+{
+ "$schema": "https://unpkg.com/@changesets/config@3.1.1/schema.json",
+ "changelog": [
+ "@svitejs/changesets-changelog-github-compact",
+ { "repo": "TanStack/ai" }
+ ],
+ "commit": false,
+ "access": "public",
+ "baseBranch": "main",
+ "updateInternalDependencies": "patch",
+ "fixed": [],
+ "linked": [],
+ "ignore": []
+}
diff --git a/docs/reference/classes/BaseAdapter.md b/docs/reference/classes/BaseAdapter.md
new file mode 100644
index 000000000..7ea3273e8
--- /dev/null
+++ b/docs/reference/classes/BaseAdapter.md
@@ -0,0 +1,265 @@
+---
+id: BaseAdapter
+title: BaseAdapter
+---
+
+# Abstract Class: BaseAdapter\
+
+Defined in: [base-adapter.ts:22](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L22)
+
+Base adapter class with support for endpoint-specific models and provider options.
+
+Generic parameters:
+- TChatModels: Models that support chat/text completion
+- TEmbeddingModels: Models that support embeddings
+- TChatProviderOptions: Provider-specific options for chat endpoint
+- TEmbeddingProviderOptions: Provider-specific options for embedding endpoint
+- TModelProviderOptionsByName: Provider-specific options for model by name
+
+## Type Parameters
+
+### TChatModels
+
+`TChatModels` *extends* `ReadonlyArray`\<`string`\> = `ReadonlyArray`\<`string`\>
+
+### TEmbeddingModels
+
+`TEmbeddingModels` *extends* `ReadonlyArray`\<`string`\> = `ReadonlyArray`\<`string`\>
+
+### TChatProviderOptions
+
+`TChatProviderOptions` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\>
+
+### TEmbeddingProviderOptions
+
+`TEmbeddingProviderOptions` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\>
+
+### TModelProviderOptionsByName
+
+`TModelProviderOptionsByName` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\>
+
+## Implements
+
+- [`AIAdapter`](../../interfaces/AIAdapter.md)\<`TChatModels`, `TEmbeddingModels`, `TChatProviderOptions`, `TEmbeddingProviderOptions`, `TModelProviderOptionsByName`\>
+
+## Constructors
+
+### Constructor
+
+```ts
+new BaseAdapter(config): BaseAdapter;
+```
+
+Defined in: [base-adapter.ts:49](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L49)
+
+#### Parameters
+
+##### config
+
+[`AIAdapterConfig`](../../interfaces/AIAdapterConfig.md) = `{}`
+
+#### Returns
+
+`BaseAdapter`\<`TChatModels`, `TEmbeddingModels`, `TChatProviderOptions`, `TEmbeddingProviderOptions`, `TModelProviderOptionsByName`\>
+
+## Properties
+
+### \_chatProviderOptions?
+
+```ts
+optional _chatProviderOptions: TChatProviderOptions;
+```
+
+Defined in: [base-adapter.ts:44](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L44)
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`_chatProviderOptions`](../../interfaces/AIAdapter.md#_chatprovideroptions)
+
+***
+
+### \_embeddingProviderOptions?
+
+```ts
+optional _embeddingProviderOptions: TEmbeddingProviderOptions;
+```
+
+Defined in: [base-adapter.ts:45](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L45)
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`_embeddingProviderOptions`](../../interfaces/AIAdapter.md#_embeddingprovideroptions)
+
+***
+
+### \_modelProviderOptionsByName
+
+```ts
+_modelProviderOptionsByName: TModelProviderOptionsByName;
+```
+
+Defined in: [base-adapter.ts:47](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L47)
+
+Type-only map from model name to its specific provider options.
+Used by the core AI types to narrow providerOptions based on the selected model.
+Must be provided by all adapters.
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`_modelProviderOptionsByName`](../../interfaces/AIAdapter.md#_modelprovideroptionsbyname)
+
+***
+
+### \_providerOptions?
+
+```ts
+optional _providerOptions: TChatProviderOptions;
+```
+
+Defined in: [base-adapter.ts:43](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L43)
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`_providerOptions`](../../interfaces/AIAdapter.md#_provideroptions)
+
+***
+
+### config
+
+```ts
+protected config: AIAdapterConfig;
+```
+
+Defined in: [base-adapter.ts:40](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L40)
+
+***
+
+### embeddingModels?
+
+```ts
+optional embeddingModels: TEmbeddingModels;
+```
+
+Defined in: [base-adapter.ts:39](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L39)
+
+Models that support embeddings
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`embeddingModels`](../../interfaces/AIAdapter.md#embeddingmodels)
+
+***
+
+### models
+
+```ts
+abstract models: TChatModels;
+```
+
+Defined in: [base-adapter.ts:38](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L38)
+
+Models that support chat/text completion
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`models`](../../interfaces/AIAdapter.md#models)
+
+***
+
+### name
+
+```ts
+abstract name: string;
+```
+
+Defined in: [base-adapter.ts:37](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L37)
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`name`](../../interfaces/AIAdapter.md#name)
+
+## Methods
+
+### chatStream()
+
+```ts
+abstract chatStream(options): AsyncIterable;
+```
+
+Defined in: [base-adapter.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L53)
+
+#### Parameters
+
+##### options
+
+[`ChatOptions`](../../interfaces/ChatOptions.md)
+
+#### Returns
+
+`AsyncIterable`\<[`StreamChunk`](../../type-aliases/StreamChunk.md)\>
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`chatStream`](../../interfaces/AIAdapter.md#chatstream)
+
+***
+
+### createEmbeddings()
+
+```ts
+abstract createEmbeddings(options): Promise;
+```
+
+Defined in: [base-adapter.ts:58](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L58)
+
+#### Parameters
+
+##### options
+
+[`EmbeddingOptions`](../../interfaces/EmbeddingOptions.md)
+
+#### Returns
+
+`Promise`\<[`EmbeddingResult`](../../interfaces/EmbeddingResult.md)\>
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`createEmbeddings`](../../interfaces/AIAdapter.md#createembeddings)
+
+***
+
+### generateId()
+
+```ts
+protected generateId(): string;
+```
+
+Defined in: [base-adapter.ts:60](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L60)
+
+#### Returns
+
+`string`
+
+***
+
+### summarize()
+
+```ts
+abstract summarize(options): Promise;
+```
+
+Defined in: [base-adapter.ts:55](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L55)
+
+#### Parameters
+
+##### options
+
+[`SummarizationOptions`](../../interfaces/SummarizationOptions.md)
+
+#### Returns
+
+`Promise`\<[`SummarizationResult`](../../interfaces/SummarizationResult.md)\>
+
+#### Implementation of
+
+[`AIAdapter`](../../interfaces/AIAdapter.md).[`summarize`](../../interfaces/AIAdapter.md#summarize)
diff --git a/docs/reference/classes/ToolCallManager.md b/docs/reference/classes/ToolCallManager.md
new file mode 100644
index 000000000..1d713c081
--- /dev/null
+++ b/docs/reference/classes/ToolCallManager.md
@@ -0,0 +1,190 @@
+---
+id: ToolCallManager
+title: ToolCallManager
+---
+
+# Class: ToolCallManager
+
+Defined in: [tools/tool-calls.ts:41](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L41)
+
+Manages tool call accumulation and execution for the chat() method's automatic tool execution loop.
+
+Responsibilities:
+- Accumulates streaming tool call chunks (ID, name, arguments)
+- Validates tool calls (filters out incomplete ones)
+- Executes tool `execute` functions with parsed arguments
+- Emits `tool_result` chunks for client visibility
+- Returns tool result messages for conversation history
+
+This class is used internally by the AI.chat() method to handle the automatic
+tool execution loop. It can also be used independently for custom tool execution logic.
+
+## Example
+
+```typescript
+const manager = new ToolCallManager(tools);
+
+// During streaming, accumulate tool calls
+for await (const chunk of stream) {
+ if (chunk.type === "tool_call") {
+ manager.addToolCallChunk(chunk);
+ }
+}
+
+// After stream completes, execute tools
+if (manager.hasToolCalls()) {
+ const toolResults = yield* manager.executeTools(doneChunk);
+ messages = [...messages, ...toolResults];
+ manager.clear();
+}
+```
+
+## Constructors
+
+### Constructor
+
+```ts
+new ToolCallManager(tools): ToolCallManager;
+```
+
+Defined in: [tools/tool-calls.ts:45](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L45)
+
+#### Parameters
+
+##### tools
+
+readonly [`Tool`](../../interfaces/Tool.md)[]
+
+#### Returns
+
+`ToolCallManager`
+
+## Methods
+
+### addToolCallChunk()
+
+```ts
+addToolCallChunk(chunk): void;
+```
+
+Defined in: [tools/tool-calls.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L53)
+
+Add a tool call chunk to the accumulator
+Handles streaming tool calls by accumulating arguments
+
+#### Parameters
+
+##### chunk
+
+###### index
+
+`number`
+
+###### toolCall
+
+\{
+ `function`: \{
+ `arguments`: `string`;
+ `name`: `string`;
+ \};
+ `id`: `string`;
+ `type`: `"function"`;
+\}
+
+###### toolCall.function
+
+\{
+ `arguments`: `string`;
+ `name`: `string`;
+\}
+
+###### toolCall.function.arguments
+
+`string`
+
+###### toolCall.function.name
+
+`string`
+
+###### toolCall.id
+
+`string`
+
+###### toolCall.type
+
+`"function"`
+
+#### Returns
+
+`void`
+
+***
+
+### clear()
+
+```ts
+clear(): void;
+```
+
+Defined in: [tools/tool-calls.ts:171](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L171)
+
+Clear the tool calls map for the next iteration
+
+#### Returns
+
+`void`
+
+***
+
+### executeTools()
+
+```ts
+executeTools(doneChunk): AsyncGenerator;
+```
+
+Defined in: [tools/tool-calls.ts:111](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L111)
+
+Execute all tool calls and return tool result messages
+Also yields tool_result chunks for streaming
+
+#### Parameters
+
+##### doneChunk
+
+[`DoneStreamChunk`](../../interfaces/DoneStreamChunk.md)
+
+#### Returns
+
+`AsyncGenerator`\<[`ToolResultStreamChunk`](../../interfaces/ToolResultStreamChunk.md), [`ModelMessage`](../../interfaces/ModelMessage.md)[], `void`\>
+
+***
+
+### getToolCalls()
+
+```ts
+getToolCalls(): ToolCall[];
+```
+
+Defined in: [tools/tool-calls.ts:101](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L101)
+
+Get all complete tool calls (filtered for valid ID and name)
+
+#### Returns
+
+[`ToolCall`](../../interfaces/ToolCall.md)[]
+
+***
+
+### hasToolCalls()
+
+```ts
+hasToolCalls(): boolean;
+```
+
+Defined in: [tools/tool-calls.ts:94](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L94)
+
+Check if there are any complete tool calls to execute
+
+#### Returns
+
+`boolean`
diff --git a/docs/reference/functions/chat.md b/docs/reference/functions/chat.md
new file mode 100644
index 000000000..9dd56efa8
--- /dev/null
+++ b/docs/reference/functions/chat.md
@@ -0,0 +1,55 @@
+---
+id: chat
+title: chat
+---
+
+# Function: chat()
+
+```ts
+function chat(options): AsyncIterable;
+```
+
+Defined in: [core/chat.ts:738](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/core/chat.ts#L738)
+
+Standalone chat streaming function with type inference from adapter
+Returns an async iterable of StreamChunks for streaming responses
+Includes automatic tool execution loop
+
+## Type Parameters
+
+### TAdapter
+
+`TAdapter` *extends* [`AIAdapter`](../../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`\>
+
+### TModel
+
+`TModel` *extends* `any`
+
+## Parameters
+
+### options
+
+`Omit`\<[`ChatStreamOptionsUnion`](../../type-aliases/ChatStreamOptionsUnion.md)\<`TAdapter`\>, `"model"` \| `"providerOptions"` \| `"adapter"`\> & `object`
+
+Chat options
+
+## Returns
+
+`AsyncIterable`\<[`StreamChunk`](../../type-aliases/StreamChunk.md)\>
+
+## Example
+
+```typescript
+const stream = chat({
+ adapter: openai(),
+ model: 'gpt-4o',
+ messages: [{ role: 'user', content: 'Hello!' }],
+ tools: [weatherTool], // Optional: auto-executed when called
+});
+
+for await (const chunk of stream) {
+ if (chunk.type === 'content') {
+ console.log(chunk.delta);
+ }
+}
+```
diff --git a/docs/reference/functions/chatOptions.md b/docs/reference/functions/chatOptions.md
new file mode 100644
index 000000000..90385ac78
--- /dev/null
+++ b/docs/reference/functions/chatOptions.md
@@ -0,0 +1,32 @@
+---
+id: chatOptions
+title: chatOptions
+---
+
+# Function: chatOptions()
+
+```ts
+function chatOptions(options): Omit, "model" | "providerOptions" | "messages" | "abortController"> & object;
+```
+
+Defined in: [utilities/chat-options.ts:3](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/chat-options.ts#L3)
+
+## Type Parameters
+
+### TAdapter
+
+`TAdapter` *extends* [`AIAdapter`](../../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`\>
+
+### TModel
+
+`TModel` *extends* `any`
+
+## Parameters
+
+### options
+
+`Omit`\<[`ChatStreamOptionsUnion`](../../type-aliases/ChatStreamOptionsUnion.md)\<`TAdapter`\>, `"model"` \| `"providerOptions"` \| `"messages"` \| `"abortController"`\> & `object`
+
+## Returns
+
+`Omit`\<[`ChatStreamOptionsUnion`](../../type-aliases/ChatStreamOptionsUnion.md)\<`TAdapter`\>, `"model"` \| `"providerOptions"` \| `"messages"` \| `"abortController"`\> & `object`
diff --git a/docs/reference/functions/combineStrategies.md b/docs/reference/functions/combineStrategies.md
new file mode 100644
index 000000000..4821bbef9
--- /dev/null
+++ b/docs/reference/functions/combineStrategies.md
@@ -0,0 +1,44 @@
+---
+id: combineStrategies
+title: combineStrategies
+---
+
+# Function: combineStrategies()
+
+```ts
+function combineStrategies(strategies): AgentLoopStrategy;
+```
+
+Defined in: [utilities/agent-loop-strategies.ts:79](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/agent-loop-strategies.ts#L79)
+
+Creates a strategy that combines multiple strategies with AND logic
+All strategies must return true to continue
+
+## Parameters
+
+### strategies
+
+[`AgentLoopStrategy`](../../type-aliases/AgentLoopStrategy.md)[]
+
+Array of strategies to combine
+
+## Returns
+
+[`AgentLoopStrategy`](../../type-aliases/AgentLoopStrategy.md)
+
+AgentLoopStrategy that continues only if all strategies return true
+
+## Example
+
+```typescript
+const stream = chat({
+ adapter: openai(),
+ model: "gpt-4o",
+ messages: [...],
+ tools: [weatherTool],
+ agentLoopStrategy: combineStrategies([
+ maxIterations(10),
+ ({ messages }) => messages.length < 100,
+ ]),
+});
+```
diff --git a/docs/reference/functions/embedding.md b/docs/reference/functions/embedding.md
new file mode 100644
index 000000000..37b8806f0
--- /dev/null
+++ b/docs/reference/functions/embedding.md
@@ -0,0 +1,30 @@
+---
+id: embedding
+title: embedding
+---
+
+# Function: embedding()
+
+```ts
+function embedding(options): Promise;
+```
+
+Defined in: [core/embedding.ts:11](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/core/embedding.ts#L11)
+
+Standalone embedding function with type inference from adapter
+
+## Type Parameters
+
+### TAdapter
+
+`TAdapter` *extends* [`AIAdapter`](../../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`\>
+
+## Parameters
+
+### options
+
+`Omit`\<[`EmbeddingOptions`](../../interfaces/EmbeddingOptions.md), `"model"`\> & `object`
+
+## Returns
+
+`Promise`\<[`EmbeddingResult`](../../interfaces/EmbeddingResult.md)\>
diff --git a/docs/reference/functions/maxIterations.md b/docs/reference/functions/maxIterations.md
new file mode 100644
index 000000000..696ccfad5
--- /dev/null
+++ b/docs/reference/functions/maxIterations.md
@@ -0,0 +1,40 @@
+---
+id: maxIterations
+title: maxIterations
+---
+
+# Function: maxIterations()
+
+```ts
+function maxIterations(max): AgentLoopStrategy;
+```
+
+Defined in: [utilities/agent-loop-strategies.ts:20](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/agent-loop-strategies.ts#L20)
+
+Creates a strategy that continues for a maximum number of iterations
+
+## Parameters
+
+### max
+
+`number`
+
+Maximum number of iterations to allow
+
+## Returns
+
+[`AgentLoopStrategy`](../../type-aliases/AgentLoopStrategy.md)
+
+AgentLoopStrategy that stops after max iterations
+
+## Example
+
+```typescript
+const stream = chat({
+ adapter: openai(),
+ model: "gpt-4o",
+ messages: [...],
+ tools: [weatherTool],
+ agentLoopStrategy: maxIterations(3), // Max 3 iterations
+});
+```
diff --git a/docs/reference/functions/summarize.md b/docs/reference/functions/summarize.md
new file mode 100644
index 000000000..2a0c35d25
--- /dev/null
+++ b/docs/reference/functions/summarize.md
@@ -0,0 +1,30 @@
+---
+id: summarize
+title: summarize
+---
+
+# Function: summarize()
+
+```ts
+function summarize(options): Promise;
+```
+
+Defined in: [core/summarize.ts:11](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/core/summarize.ts#L11)
+
+Standalone summarize function with type inference from adapter
+
+## Type Parameters
+
+### TAdapter
+
+`TAdapter` *extends* [`AIAdapter`](../../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`\>
+
+## Parameters
+
+### options
+
+`Omit`\<[`SummarizationOptions`](../../interfaces/SummarizationOptions.md), `"model"`\> & `object`
+
+## Returns
+
+`Promise`\<[`SummarizationResult`](../../interfaces/SummarizationResult.md)\>
diff --git a/docs/reference/functions/toServerSentEventsStream.md b/docs/reference/functions/toServerSentEventsStream.md
new file mode 100644
index 000000000..7cbeffe57
--- /dev/null
+++ b/docs/reference/functions/toServerSentEventsStream.md
@@ -0,0 +1,47 @@
+---
+id: toServerSentEventsStream
+title: toServerSentEventsStream
+---
+
+# Function: toServerSentEventsStream()
+
+```ts
+function toServerSentEventsStream(stream, abortController?): ReadableStream>;
+```
+
+Defined in: [utilities/stream-to-response.ts:22](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/stream-to-response.ts#L22)
+
+Convert a StreamChunk async iterable to a ReadableStream in Server-Sent Events format
+
+This creates a ReadableStream that emits chunks in SSE format:
+- Each chunk is prefixed with "data: "
+- Each chunk is followed by "\n\n"
+- Stream ends with "data: [DONE]\n\n"
+
+## Parameters
+
+### stream
+
+`AsyncIterable`\<[`StreamChunk`](../../type-aliases/StreamChunk.md)\>
+
+AsyncIterable of StreamChunks from chat()
+
+### abortController?
+
+`AbortController`
+
+Optional AbortController to abort when stream is cancelled
+
+## Returns
+
+`ReadableStream`\<`Uint8Array`\<`ArrayBufferLike`\>\>
+
+ReadableStream in Server-Sent Events format
+
+## Example
+
+```typescript
+const stream = chat({ adapter: openai(), model: "gpt-4o", messages: [...] });
+const readableStream = toServerSentEventsStream(stream);
+// Use with Response, or any API that accepts ReadableStream
+```
diff --git a/docs/reference/functions/toStreamResponse.md b/docs/reference/functions/toStreamResponse.md
new file mode 100644
index 000000000..883a9e222
--- /dev/null
+++ b/docs/reference/functions/toStreamResponse.md
@@ -0,0 +1,51 @@
+---
+id: toStreamResponse
+title: toStreamResponse
+---
+
+# Function: toStreamResponse()
+
+```ts
+function toStreamResponse(stream, init?): Response;
+```
+
+Defined in: [utilities/stream-to-response.ts:102](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/stream-to-response.ts#L102)
+
+Create a streaming HTTP response from a StreamChunk async iterable
+Includes proper headers for Server-Sent Events
+
+## Parameters
+
+### stream
+
+`AsyncIterable`\<[`StreamChunk`](../../type-aliases/StreamChunk.md)\>
+
+AsyncIterable of StreamChunks from chat()
+
+### init?
+
+`ResponseInit` & `object`
+
+Optional Response initialization options
+
+## Returns
+
+`Response`
+
+Response object with SSE headers and streaming body
+
+## Example
+
+```typescript
+export async function POST(request: Request) {
+ const { messages } = await request.json();
+ const abortController = new AbortController();
+ const stream = chat({
+ adapter: openai(),
+ model: "gpt-4o",
+ messages,
+ options: { abortSignal: abortController.signal }
+ });
+ return toStreamResponse(stream, undefined, abortController);
+}
+```
diff --git a/docs/reference/functions/tool.md b/docs/reference/functions/tool.md
new file mode 100644
index 000000000..f5fb6c7be
--- /dev/null
+++ b/docs/reference/functions/tool.md
@@ -0,0 +1,108 @@
+---
+id: tool
+title: tool
+---
+
+# Function: tool()
+
+```ts
+function tool(config): Tool;
+```
+
+Defined in: [tools/tool-utils.ts:70](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-utils.ts#L70)
+
+Helper to define a tool with enforced type safety.
+Automatically infers the execute function argument types from the parameters schema.
+User must provide the full Tool structure with type: "function" and function: {...}
+
+## Type Parameters
+
+### TProps
+
+`TProps` *extends* `Record`\<`string`, `any`\>
+
+### TRequired
+
+`TRequired` *extends* readonly `string`[] \| `undefined`
+
+## Parameters
+
+### config
+
+#### execute
+
+(`args`) => `string` \| `Promise`\<`string`\>
+
+#### function
+
+\{
+ `description`: `string`;
+ `name`: `string`;
+ `parameters`: \{
+ `properties`: `TProps`;
+ `required?`: `TRequired`;
+ `type`: `"object"`;
+ \};
+\}
+
+#### function.description
+
+`string`
+
+#### function.name
+
+`string`
+
+#### function.parameters
+
+\{
+ `properties`: `TProps`;
+ `required?`: `TRequired`;
+ `type`: `"object"`;
+\}
+
+#### function.parameters.properties
+
+`TProps`
+
+#### function.parameters.required?
+
+`TRequired`
+
+#### function.parameters.type
+
+`"object"`
+
+#### type
+
+`"function"`
+
+## Returns
+
+[`Tool`](../../interfaces/Tool.md)
+
+## Example
+
+```typescript
+const tools = {
+ myTool: tool({
+ type: "function",
+ function: {
+ name: "myTool",
+ description: "My tool description",
+ parameters: {
+ type: "object",
+ properties: {
+ id: { type: "string", description: "The ID" },
+ optional: { type: "number", description: "Optional param" },
+ },
+ required: ["id"],
+ },
+ },
+ execute: async (args) => {
+ // ā args is automatically typed as { id: string; optional?: number }
+ return args.id;
+ },
+ }),
+};
+```
diff --git a/docs/reference/functions/untilFinishReason.md b/docs/reference/functions/untilFinishReason.md
new file mode 100644
index 000000000..02f697ba9
--- /dev/null
+++ b/docs/reference/functions/untilFinishReason.md
@@ -0,0 +1,40 @@
+---
+id: untilFinishReason
+title: untilFinishReason
+---
+
+# Function: untilFinishReason()
+
+```ts
+function untilFinishReason(stopReasons): AgentLoopStrategy;
+```
+
+Defined in: [utilities/agent-loop-strategies.ts:41](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/agent-loop-strategies.ts#L41)
+
+Creates a strategy that continues until a specific finish reason is encountered
+
+## Parameters
+
+### stopReasons
+
+`string`[]
+
+Finish reasons that should stop the loop
+
+## Returns
+
+[`AgentLoopStrategy`](../../type-aliases/AgentLoopStrategy.md)
+
+AgentLoopStrategy that stops on specific finish reasons
+
+## Example
+
+```typescript
+const stream = chat({
+ adapter: openai(),
+ model: "gpt-4o",
+ messages: [...],
+ tools: [weatherTool],
+ agentLoopStrategy: untilFinishReason(["stop", "length"]),
+});
+```
diff --git a/docs/reference/index.md b/docs/reference/index.md
new file mode 100644
index 000000000..a1632d557
--- /dev/null
+++ b/docs/reference/index.md
@@ -0,0 +1,62 @@
+---
+id: "@tanstack/ai"
+title: "@tanstack/ai"
+---
+
+# @tanstack/ai
+
+## Classes
+
+- [BaseAdapter](../classes/BaseAdapter.md)
+- [ToolCallManager](../classes/ToolCallManager.md)
+
+## Interfaces
+
+- [AgentLoopState](../interfaces/AgentLoopState.md)
+- [AIAdapter](../interfaces/AIAdapter.md)
+- [AIAdapterConfig](../interfaces/AIAdapterConfig.md)
+- [ApprovalRequestedStreamChunk](../interfaces/ApprovalRequestedStreamChunk.md)
+- [BaseStreamChunk](../interfaces/BaseStreamChunk.md)
+- [ChatCompletionChunk](../interfaces/ChatCompletionChunk.md)
+- [ChatOptions](../interfaces/ChatOptions.md)
+- [ContentStreamChunk](../interfaces/ContentStreamChunk.md)
+- [DoneStreamChunk](../interfaces/DoneStreamChunk.md)
+- [EmbeddingOptions](../interfaces/EmbeddingOptions.md)
+- [EmbeddingResult](../interfaces/EmbeddingResult.md)
+- [ErrorStreamChunk](../interfaces/ErrorStreamChunk.md)
+- [ModelMessage](../interfaces/ModelMessage.md)
+- [ResponseFormat](../interfaces/ResponseFormat.md)
+- [SummarizationOptions](../interfaces/SummarizationOptions.md)
+- [SummarizationResult](../interfaces/SummarizationResult.md)
+- [ThinkingStreamChunk](../interfaces/ThinkingStreamChunk.md)
+- [Tool](../interfaces/Tool.md)
+- [ToolCall](../interfaces/ToolCall.md)
+- [ToolCallStreamChunk](../interfaces/ToolCallStreamChunk.md)
+- [ToolConfig](../interfaces/ToolConfig.md)
+- [ToolInputAvailableStreamChunk](../interfaces/ToolInputAvailableStreamChunk.md)
+- [ToolResultStreamChunk](../interfaces/ToolResultStreamChunk.md)
+
+## Type Aliases
+
+- [AgentLoopStrategy](../type-aliases/AgentLoopStrategy.md)
+- [ChatStreamOptionsUnion](../type-aliases/ChatStreamOptionsUnion.md)
+- [ExtractModelsFromAdapter](../type-aliases/ExtractModelsFromAdapter.md)
+- [StreamChunk](../type-aliases/StreamChunk.md)
+- [StreamChunkType](../type-aliases/StreamChunkType.md)
+
+## Variables
+
+- [aiEventClient](../variables/aiEventClient.md)
+
+## Functions
+
+- [chat](../functions/chat.md)
+- [chatOptions](../functions/chatOptions.md)
+- [combineStrategies](../functions/combineStrategies.md)
+- [embedding](../functions/embedding.md)
+- [maxIterations](../functions/maxIterations.md)
+- [summarize](../functions/summarize.md)
+- [tool](../functions/tool.md)
+- [toServerSentEventsStream](../functions/toServerSentEventsStream.md)
+- [toStreamResponse](../functions/toStreamResponse.md)
+- [untilFinishReason](../functions/untilFinishReason.md)
diff --git a/docs/reference/interfaces/AIAdapter.md b/docs/reference/interfaces/AIAdapter.md
new file mode 100644
index 000000000..6595d6dc9
--- /dev/null
+++ b/docs/reference/interfaces/AIAdapter.md
@@ -0,0 +1,182 @@
+---
+id: AIAdapter
+title: AIAdapter
+---
+
+# Interface: AIAdapter\
+
+Defined in: [types.ts:425](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L425)
+
+AI adapter interface with support for endpoint-specific models and provider options.
+
+Generic parameters:
+- TChatModels: Models that support chat/text completion
+- TImageModels: Models that support image generation
+- TEmbeddingModels: Models that support embeddings
+- TAudioModels: Models that support audio (transcription and text-to-speech)
+- TVideoModels: Models that support video generation
+- TChatProviderOptions: Provider-specific options for chat endpoint
+- TImageProviderOptions: Provider-specific options for image endpoint
+- TEmbeddingProviderOptions: Provider-specific options for embedding endpoint
+- TAudioProviderOptions: Provider-specific options for audio endpoint
+- TVideoProviderOptions: Provider-specific options for video endpoint
+
+## Type Parameters
+
+### TChatModels
+
+`TChatModels` *extends* `ReadonlyArray`\<`string`\> = `ReadonlyArray`\<`string`\>
+
+### TEmbeddingModels
+
+`TEmbeddingModels` *extends* `ReadonlyArray`\<`string`\> = `ReadonlyArray`\<`string`\>
+
+### TChatProviderOptions
+
+`TChatProviderOptions` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\>
+
+### TEmbeddingProviderOptions
+
+`TEmbeddingProviderOptions` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\>
+
+### TModelProviderOptionsByName
+
+`TModelProviderOptionsByName` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\>
+
+## Properties
+
+### \_chatProviderOptions?
+
+```ts
+optional _chatProviderOptions: TChatProviderOptions;
+```
+
+Defined in: [types.ts:441](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L441)
+
+***
+
+### \_embeddingProviderOptions?
+
+```ts
+optional _embeddingProviderOptions: TEmbeddingProviderOptions;
+```
+
+Defined in: [types.ts:442](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L442)
+
+***
+
+### \_modelProviderOptionsByName
+
+```ts
+_modelProviderOptionsByName: TModelProviderOptionsByName;
+```
+
+Defined in: [types.ts:448](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L448)
+
+Type-only map from model name to its specific provider options.
+Used by the core AI types to narrow providerOptions based on the selected model.
+Must be provided by all adapters.
+
+***
+
+### \_providerOptions?
+
+```ts
+optional _providerOptions: TChatProviderOptions;
+```
+
+Defined in: [types.ts:440](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L440)
+
+***
+
+### chatStream()
+
+```ts
+chatStream: (options) => AsyncIterable;
+```
+
+Defined in: [types.ts:451](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L451)
+
+#### Parameters
+
+##### options
+
+[`ChatOptions`](../ChatOptions.md)\<`string`, `TChatProviderOptions`\>
+
+#### Returns
+
+`AsyncIterable`\<[`StreamChunk`](../../type-aliases/StreamChunk.md)\>
+
+***
+
+### createEmbeddings()
+
+```ts
+createEmbeddings: (options) => Promise;
+```
+
+Defined in: [types.ts:459](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L459)
+
+#### Parameters
+
+##### options
+
+[`EmbeddingOptions`](../EmbeddingOptions.md)
+
+#### Returns
+
+`Promise`\<[`EmbeddingResult`](../EmbeddingResult.md)\>
+
+***
+
+### embeddingModels?
+
+```ts
+optional embeddingModels: TEmbeddingModels;
+```
+
+Defined in: [types.ts:437](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L437)
+
+Models that support embeddings
+
+***
+
+### models
+
+```ts
+models: TChatModels;
+```
+
+Defined in: [types.ts:434](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L434)
+
+Models that support chat/text completion
+
+***
+
+### name
+
+```ts
+name: string;
+```
+
+Defined in: [types.ts:432](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L432)
+
+***
+
+### summarize()
+
+```ts
+summarize: (options) => Promise;
+```
+
+Defined in: [types.ts:456](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L456)
+
+#### Parameters
+
+##### options
+
+[`SummarizationOptions`](../SummarizationOptions.md)
+
+#### Returns
+
+`Promise`\<[`SummarizationResult`](../SummarizationResult.md)\>
diff --git a/docs/reference/interfaces/AIAdapterConfig.md b/docs/reference/interfaces/AIAdapterConfig.md
new file mode 100644
index 000000000..aed5908ba
--- /dev/null
+++ b/docs/reference/interfaces/AIAdapterConfig.md
@@ -0,0 +1,58 @@
+---
+id: AIAdapterConfig
+title: AIAdapterConfig
+---
+
+# Interface: AIAdapterConfig
+
+Defined in: [types.ts:462](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L462)
+
+## Properties
+
+### apiKey?
+
+```ts
+optional apiKey: string;
+```
+
+Defined in: [types.ts:463](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L463)
+
+***
+
+### baseUrl?
+
+```ts
+optional baseUrl: string;
+```
+
+Defined in: [types.ts:464](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L464)
+
+***
+
+### headers?
+
+```ts
+optional headers: Record;
+```
+
+Defined in: [types.ts:467](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L467)
+
+***
+
+### maxRetries?
+
+```ts
+optional maxRetries: number;
+```
+
+Defined in: [types.ts:466](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L466)
+
+***
+
+### timeout?
+
+```ts
+optional timeout: number;
+```
+
+Defined in: [types.ts:465](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L465)
diff --git a/docs/reference/interfaces/AgentLoopState.md b/docs/reference/interfaces/AgentLoopState.md
new file mode 100644
index 000000000..14a78b4f6
--- /dev/null
+++ b/docs/reference/interfaces/AgentLoopState.md
@@ -0,0 +1,46 @@
+---
+id: AgentLoopState
+title: AgentLoopState
+---
+
+# Interface: AgentLoopState
+
+Defined in: [types.ts:205](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L205)
+
+State passed to agent loop strategy for determining whether to continue
+
+## Properties
+
+### finishReason
+
+```ts
+finishReason: string | null;
+```
+
+Defined in: [types.ts:211](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L211)
+
+Finish reason from the last response
+
+***
+
+### iterationCount
+
+```ts
+iterationCount: number;
+```
+
+Defined in: [types.ts:207](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L207)
+
+Current iteration count (0-indexed)
+
+***
+
+### messages
+
+```ts
+messages: ModelMessage[];
+```
+
+Defined in: [types.ts:209](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L209)
+
+Current messages array
diff --git a/docs/reference/interfaces/ApprovalRequestedStreamChunk.md b/docs/reference/interfaces/ApprovalRequestedStreamChunk.md
new file mode 100644
index 000000000..0d8e25f04
--- /dev/null
+++ b/docs/reference/interfaces/ApprovalRequestedStreamChunk.md
@@ -0,0 +1,120 @@
+---
+id: ApprovalRequestedStreamChunk
+title: ApprovalRequestedStreamChunk
+---
+
+# Interface: ApprovalRequestedStreamChunk
+
+Defined in: [types.ts:323](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L323)
+
+## Extends
+
+- [`BaseStreamChunk`](../BaseStreamChunk.md)
+
+## Properties
+
+### approval
+
+```ts
+approval: object;
+```
+
+Defined in: [types.ts:328](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L328)
+
+#### id
+
+```ts
+id: string;
+```
+
+#### needsApproval
+
+```ts
+needsApproval: true;
+```
+
+***
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`id`](../BaseStreamChunk.md#id)
+
+***
+
+### input
+
+```ts
+input: any;
+```
+
+Defined in: [types.ts:327](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L327)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`model`](../BaseStreamChunk.md#model)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`timestamp`](../BaseStreamChunk.md#timestamp)
+
+***
+
+### toolCallId
+
+```ts
+toolCallId: string;
+```
+
+Defined in: [types.ts:325](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L325)
+
+***
+
+### toolName
+
+```ts
+toolName: string;
+```
+
+Defined in: [types.ts:326](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L326)
+
+***
+
+### type
+
+```ts
+type: "approval-requested";
+```
+
+Defined in: [types.ts:324](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L324)
+
+#### Overrides
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`type`](../BaseStreamChunk.md#type)
diff --git a/docs/reference/interfaces/BaseStreamChunk.md b/docs/reference/interfaces/BaseStreamChunk.md
new file mode 100644
index 000000000..f8fc91435
--- /dev/null
+++ b/docs/reference/interfaces/BaseStreamChunk.md
@@ -0,0 +1,59 @@
+---
+id: BaseStreamChunk
+title: BaseStreamChunk
+---
+
+# Interface: BaseStreamChunk
+
+Defined in: [types.ts:272](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L272)
+
+## Extended by
+
+- [`ContentStreamChunk`](../ContentStreamChunk.md)
+- [`ToolCallStreamChunk`](../ToolCallStreamChunk.md)
+- [`ToolResultStreamChunk`](../ToolResultStreamChunk.md)
+- [`DoneStreamChunk`](../DoneStreamChunk.md)
+- [`ErrorStreamChunk`](../ErrorStreamChunk.md)
+- [`ApprovalRequestedStreamChunk`](../ApprovalRequestedStreamChunk.md)
+- [`ToolInputAvailableStreamChunk`](../ToolInputAvailableStreamChunk.md)
+- [`ThinkingStreamChunk`](../ThinkingStreamChunk.md)
+
+## Properties
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276)
+
+***
+
+### type
+
+```ts
+type: StreamChunkType;
+```
+
+Defined in: [types.ts:273](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L273)
diff --git a/docs/reference/interfaces/ChatCompletionChunk.md b/docs/reference/interfaces/ChatCompletionChunk.md
new file mode 100644
index 000000000..ffe878907
--- /dev/null
+++ b/docs/reference/interfaces/ChatCompletionChunk.md
@@ -0,0 +1,86 @@
+---
+id: ChatCompletionChunk
+title: ChatCompletionChunk
+---
+
+# Interface: ChatCompletionChunk
+
+Defined in: [types.ts:362](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L362)
+
+## Properties
+
+### content
+
+```ts
+content: string;
+```
+
+Defined in: [types.ts:365](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L365)
+
+***
+
+### finishReason?
+
+```ts
+optional finishReason: "stop" | "length" | "content_filter" | null;
+```
+
+Defined in: [types.ts:367](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L367)
+
+***
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:363](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L363)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:364](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L364)
+
+***
+
+### role?
+
+```ts
+optional role: "assistant";
+```
+
+Defined in: [types.ts:366](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L366)
+
+***
+
+### usage?
+
+```ts
+optional usage: object;
+```
+
+Defined in: [types.ts:368](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L368)
+
+#### completionTokens
+
+```ts
+completionTokens: number;
+```
+
+#### promptTokens
+
+```ts
+promptTokens: number;
+```
+
+#### totalTokens
+
+```ts
+totalTokens: number;
+```
diff --git a/docs/reference/interfaces/ChatOptions.md b/docs/reference/interfaces/ChatOptions.md
new file mode 100644
index 000000000..5866ecd14
--- /dev/null
+++ b/docs/reference/interfaces/ChatOptions.md
@@ -0,0 +1,145 @@
+---
+id: ChatOptions
+title: ChatOptions
+---
+
+# Interface: ChatOptions\
+
+Defined in: [types.ts:231](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L231)
+
+Options passed into the SDK and further piped to the AI provider.
+
+## Type Parameters
+
+### TModel
+
+`TModel` *extends* `string` = `string`
+
+### TProviderOptionsSuperset
+
+`TProviderOptionsSuperset` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\>
+
+### TOutput
+
+`TOutput` *extends* [`ResponseFormat`](../ResponseFormat.md)\<`any`\> \| `undefined` = `undefined`
+
+### TProviderOptionsForModel
+
+`TProviderOptionsForModel` = `TProviderOptionsSuperset`
+
+## Properties
+
+### abortController?
+
+```ts
+optional abortController: AbortController;
+```
+
+Defined in: [types.ts:259](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L259)
+
+AbortController for request cancellation.
+
+Allows you to cancel an in-progress request using an AbortController.
+Useful for implementing timeouts or user-initiated cancellations.
+
+#### Example
+
+```ts
+const abortController = new AbortController();
+setTimeout(() => abortController.abort(), 5000); // Cancel after 5 seconds
+await chat({ ..., abortController });
+```
+
+#### See
+
+https://developer.mozilla.org/en-US/docs/Web/API/AbortController
+
+***
+
+### agentLoopStrategy?
+
+```ts
+optional agentLoopStrategy: AgentLoopStrategy;
+```
+
+Defined in: [types.ts:241](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L241)
+
+***
+
+### messages
+
+```ts
+messages: ModelMessage[];
+```
+
+Defined in: [types.ts:238](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L238)
+
+***
+
+### model
+
+```ts
+model: TModel;
+```
+
+Defined in: [types.ts:237](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L237)
+
+***
+
+### options?
+
+```ts
+optional options: CommonOptions;
+```
+
+Defined in: [types.ts:242](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L242)
+
+***
+
+### output?
+
+```ts
+optional output: TOutput;
+```
+
+Defined in: [types.ts:245](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L245)
+
+***
+
+### providerOptions?
+
+```ts
+optional providerOptions: TProviderOptionsForModel;
+```
+
+Defined in: [types.ts:243](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L243)
+
+***
+
+### request?
+
+```ts
+optional request: Request | RequestInit;
+```
+
+Defined in: [types.ts:244](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L244)
+
+***
+
+### systemPrompts?
+
+```ts
+optional systemPrompts: string[];
+```
+
+Defined in: [types.ts:240](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L240)
+
+***
+
+### tools?
+
+```ts
+optional tools: Tool[];
+```
+
+Defined in: [types.ts:239](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L239)
diff --git a/docs/reference/interfaces/ContentStreamChunk.md b/docs/reference/interfaces/ContentStreamChunk.md
new file mode 100644
index 000000000..9f953f9a6
--- /dev/null
+++ b/docs/reference/interfaces/ContentStreamChunk.md
@@ -0,0 +1,98 @@
+---
+id: ContentStreamChunk
+title: ContentStreamChunk
+---
+
+# Interface: ContentStreamChunk
+
+Defined in: [types.ts:279](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L279)
+
+## Extends
+
+- [`BaseStreamChunk`](../BaseStreamChunk.md)
+
+## Properties
+
+### content
+
+```ts
+content: string;
+```
+
+Defined in: [types.ts:282](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L282)
+
+***
+
+### delta
+
+```ts
+delta: string;
+```
+
+Defined in: [types.ts:281](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L281)
+
+***
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`id`](../BaseStreamChunk.md#id)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`model`](../BaseStreamChunk.md#model)
+
+***
+
+### role?
+
+```ts
+optional role: "assistant";
+```
+
+Defined in: [types.ts:283](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L283)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`timestamp`](../BaseStreamChunk.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "content";
+```
+
+Defined in: [types.ts:280](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L280)
+
+#### Overrides
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`type`](../BaseStreamChunk.md#type)
diff --git a/docs/reference/interfaces/DoneStreamChunk.md b/docs/reference/interfaces/DoneStreamChunk.md
new file mode 100644
index 000000000..51689b8a5
--- /dev/null
+++ b/docs/reference/interfaces/DoneStreamChunk.md
@@ -0,0 +1,106 @@
+---
+id: DoneStreamChunk
+title: DoneStreamChunk
+---
+
+# Interface: DoneStreamChunk
+
+Defined in: [types.ts:305](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L305)
+
+## Extends
+
+- [`BaseStreamChunk`](../BaseStreamChunk.md)
+
+## Properties
+
+### finishReason
+
+```ts
+finishReason: "stop" | "length" | "content_filter" | "tool_calls" | null;
+```
+
+Defined in: [types.ts:307](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L307)
+
+***
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`id`](../BaseStreamChunk.md#id)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`model`](../BaseStreamChunk.md#model)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`timestamp`](../BaseStreamChunk.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "done";
+```
+
+Defined in: [types.ts:306](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L306)
+
+#### Overrides
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`type`](../BaseStreamChunk.md#type)
+
+***
+
+### usage?
+
+```ts
+optional usage: object;
+```
+
+Defined in: [types.ts:308](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L308)
+
+#### completionTokens
+
+```ts
+completionTokens: number;
+```
+
+#### promptTokens
+
+```ts
+promptTokens: number;
+```
+
+#### totalTokens
+
+```ts
+totalTokens: number;
+```
diff --git a/docs/reference/interfaces/EmbeddingOptions.md b/docs/reference/interfaces/EmbeddingOptions.md
new file mode 100644
index 000000000..0c73ef4da
--- /dev/null
+++ b/docs/reference/interfaces/EmbeddingOptions.md
@@ -0,0 +1,38 @@
+---
+id: EmbeddingOptions
+title: EmbeddingOptions
+---
+
+# Interface: EmbeddingOptions
+
+Defined in: [types.ts:394](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L394)
+
+## Properties
+
+### dimensions?
+
+```ts
+optional dimensions: number;
+```
+
+Defined in: [types.ts:397](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L397)
+
+***
+
+### input
+
+```ts
+input: string | string[];
+```
+
+Defined in: [types.ts:396](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L396)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:395](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L395)
diff --git a/docs/reference/interfaces/EmbeddingResult.md b/docs/reference/interfaces/EmbeddingResult.md
new file mode 100644
index 000000000..f1ec170c0
--- /dev/null
+++ b/docs/reference/interfaces/EmbeddingResult.md
@@ -0,0 +1,60 @@
+---
+id: EmbeddingResult
+title: EmbeddingResult
+---
+
+# Interface: EmbeddingResult
+
+Defined in: [types.ts:400](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L400)
+
+## Properties
+
+### embeddings
+
+```ts
+embeddings: number[][];
+```
+
+Defined in: [types.ts:403](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L403)
+
+***
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:401](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L401)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:402](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L402)
+
+***
+
+### usage
+
+```ts
+usage: object;
+```
+
+Defined in: [types.ts:404](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L404)
+
+#### promptTokens
+
+```ts
+promptTokens: number;
+```
+
+#### totalTokens
+
+```ts
+totalTokens: number;
+```
diff --git a/docs/reference/interfaces/ErrorStreamChunk.md b/docs/reference/interfaces/ErrorStreamChunk.md
new file mode 100644
index 000000000..a75aba122
--- /dev/null
+++ b/docs/reference/interfaces/ErrorStreamChunk.md
@@ -0,0 +1,90 @@
+---
+id: ErrorStreamChunk
+title: ErrorStreamChunk
+---
+
+# Interface: ErrorStreamChunk
+
+Defined in: [types.ts:315](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L315)
+
+## Extends
+
+- [`BaseStreamChunk`](../BaseStreamChunk.md)
+
+## Properties
+
+### error
+
+```ts
+error: object;
+```
+
+Defined in: [types.ts:317](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L317)
+
+#### code?
+
+```ts
+optional code: string;
+```
+
+#### message
+
+```ts
+message: string;
+```
+
+***
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`id`](../BaseStreamChunk.md#id)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`model`](../BaseStreamChunk.md#model)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`timestamp`](../BaseStreamChunk.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "error";
+```
+
+Defined in: [types.ts:316](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L316)
+
+#### Overrides
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`type`](../BaseStreamChunk.md#type)
diff --git a/docs/reference/interfaces/ModelMessage.md b/docs/reference/interfaces/ModelMessage.md
new file mode 100644
index 000000000..403690993
--- /dev/null
+++ b/docs/reference/interfaces/ModelMessage.md
@@ -0,0 +1,58 @@
+---
+id: ModelMessage
+title: ModelMessage
+---
+
+# Interface: ModelMessage
+
+Defined in: [types.ts:12](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L12)
+
+## Properties
+
+### content
+
+```ts
+content: string | null;
+```
+
+Defined in: [types.ts:14](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L14)
+
+***
+
+### name?
+
+```ts
+optional name: string;
+```
+
+Defined in: [types.ts:15](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L15)
+
+***
+
+### role
+
+```ts
+role: "system" | "user" | "assistant" | "tool";
+```
+
+Defined in: [types.ts:13](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L13)
+
+***
+
+### toolCallId?
+
+```ts
+optional toolCallId: string;
+```
+
+Defined in: [types.ts:17](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L17)
+
+***
+
+### toolCalls?
+
+```ts
+optional toolCalls: ToolCall[];
+```
+
+Defined in: [types.ts:16](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L16)
diff --git a/docs/reference/interfaces/ResponseFormat.md b/docs/reference/interfaces/ResponseFormat.md
new file mode 100644
index 000000000..62f53a2f4
--- /dev/null
+++ b/docs/reference/interfaces/ResponseFormat.md
@@ -0,0 +1,151 @@
+---
+id: ResponseFormat
+title: ResponseFormat
+---
+
+# Interface: ResponseFormat\
+
+Defined in: [types.ts:121](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L121)
+
+Structured output format specification.
+
+Constrains the model's output to match a specific JSON structure.
+Useful for extracting structured data, form filling, or ensuring consistent response formats.
+
+## See
+
+ - https://platform.openai.com/docs/guides/structured-outputs
+ - https://sdk.vercel.ai/docs/ai-sdk-core/structured-outputs
+
+## Type Parameters
+
+### TData
+
+`TData` = `any`
+
+TypeScript type of the expected data structure (for type safety)
+
+## Properties
+
+### \_\_data?
+
+```ts
+optional __data: TData;
+```
+
+Defined in: [types.ts:199](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L199)
+
+**`Internal`**
+
+Type-only property to carry the inferred data type.
+
+This is never set at runtime - it only exists for TypeScript type inference.
+Allows the SDK to know what type to expect when parsing the response.
+
+***
+
+### json\_schema?
+
+```ts
+optional json_schema: object;
+```
+
+Defined in: [types.ts:138](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L138)
+
+JSON schema specification (required when type is "json_schema").
+
+Defines the exact structure the model's output must conform to.
+OpenAI's structured outputs will guarantee the output matches this schema.
+
+#### description?
+
+```ts
+optional description: string;
+```
+
+Optional description of what the schema represents.
+
+Helps document the purpose of this structured output.
+
+##### Example
+
+```ts
+"User profile information including name, email, and preferences"
+```
+
+#### name
+
+```ts
+name: string;
+```
+
+Unique name for the schema.
+
+Used to identify the schema in logs and debugging.
+Should be descriptive (e.g., "user_profile", "search_results").
+
+#### schema
+
+```ts
+schema: Record;
+```
+
+JSON Schema definition for the expected output structure.
+
+Must be a valid JSON Schema (draft 2020-12 or compatible).
+The model's output will be validated against this schema.
+
+##### See
+
+https://json-schema.org/
+
+##### Example
+
+```ts
+{
+ * type: "object",
+ * properties: {
+ * name: { type: "string" },
+ * age: { type: "number" },
+ * email: { type: "string", format: "email" }
+ * },
+ * required: ["name", "email"],
+ * additionalProperties: false
+ * }
+```
+
+#### strict?
+
+```ts
+optional strict: boolean;
+```
+
+Whether to enforce strict schema validation.
+
+When true (recommended), the model guarantees output will match the schema exactly.
+When false, the model will "best effort" match the schema.
+
+Default: true (for providers that support it)
+
+##### See
+
+https://platform.openai.com/docs/guides/structured-outputs#strict-mode
+
+***
+
+### type
+
+```ts
+type: "json_object" | "json_schema";
+```
+
+Defined in: [types.ts:130](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L130)
+
+Type of structured output.
+
+- "json_object": Forces the model to output valid JSON (any structure)
+- "json_schema": Validates output against a provided JSON Schema (strict structure)
+
+#### See
+
+https://platform.openai.com/docs/api-reference/chat/create#chat-create-response_format
diff --git a/docs/reference/interfaces/SummarizationOptions.md b/docs/reference/interfaces/SummarizationOptions.md
new file mode 100644
index 000000000..dcd67c898
--- /dev/null
+++ b/docs/reference/interfaces/SummarizationOptions.md
@@ -0,0 +1,58 @@
+---
+id: SummarizationOptions
+title: SummarizationOptions
+---
+
+# Interface: SummarizationOptions
+
+Defined in: [types.ts:375](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L375)
+
+## Properties
+
+### focus?
+
+```ts
+optional focus: string[];
+```
+
+Defined in: [types.ts:380](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L380)
+
+***
+
+### maxLength?
+
+```ts
+optional maxLength: number;
+```
+
+Defined in: [types.ts:378](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L378)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:376](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L376)
+
+***
+
+### style?
+
+```ts
+optional style: "bullet-points" | "paragraph" | "concise";
+```
+
+Defined in: [types.ts:379](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L379)
+
+***
+
+### text
+
+```ts
+text: string;
+```
+
+Defined in: [types.ts:377](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L377)
diff --git a/docs/reference/interfaces/SummarizationResult.md b/docs/reference/interfaces/SummarizationResult.md
new file mode 100644
index 000000000..a926eb936
--- /dev/null
+++ b/docs/reference/interfaces/SummarizationResult.md
@@ -0,0 +1,66 @@
+---
+id: SummarizationResult
+title: SummarizationResult
+---
+
+# Interface: SummarizationResult
+
+Defined in: [types.ts:383](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L383)
+
+## Properties
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:384](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L384)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:385](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L385)
+
+***
+
+### summary
+
+```ts
+summary: string;
+```
+
+Defined in: [types.ts:386](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L386)
+
+***
+
+### usage
+
+```ts
+usage: object;
+```
+
+Defined in: [types.ts:387](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L387)
+
+#### completionTokens
+
+```ts
+completionTokens: number;
+```
+
+#### promptTokens
+
+```ts
+promptTokens: number;
+```
+
+#### totalTokens
+
+```ts
+totalTokens: number;
+```
diff --git a/docs/reference/interfaces/ThinkingStreamChunk.md b/docs/reference/interfaces/ThinkingStreamChunk.md
new file mode 100644
index 000000000..91a5fd21c
--- /dev/null
+++ b/docs/reference/interfaces/ThinkingStreamChunk.md
@@ -0,0 +1,88 @@
+---
+id: ThinkingStreamChunk
+title: ThinkingStreamChunk
+---
+
+# Interface: ThinkingStreamChunk
+
+Defined in: [types.ts:341](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L341)
+
+## Extends
+
+- [`BaseStreamChunk`](../BaseStreamChunk.md)
+
+## Properties
+
+### content
+
+```ts
+content: string;
+```
+
+Defined in: [types.ts:344](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L344)
+
+***
+
+### delta?
+
+```ts
+optional delta: string;
+```
+
+Defined in: [types.ts:343](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L343)
+
+***
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`id`](../BaseStreamChunk.md#id)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`model`](../BaseStreamChunk.md#model)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`timestamp`](../BaseStreamChunk.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "thinking";
+```
+
+Defined in: [types.ts:342](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L342)
+
+#### Overrides
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`type`](../BaseStreamChunk.md#type)
diff --git a/docs/reference/interfaces/Tool.md b/docs/reference/interfaces/Tool.md
new file mode 100644
index 000000000..a0668f0fc
--- /dev/null
+++ b/docs/reference/interfaces/Tool.md
@@ -0,0 +1,168 @@
+---
+id: Tool
+title: Tool
+---
+
+# Interface: Tool
+
+Defined in: [types.ts:29](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L29)
+
+Tool/Function definition for function calling.
+
+Tools allow the model to interact with external systems, APIs, or perform computations.
+The model will decide when to call tools based on the user's request and the tool descriptions.
+
+## See
+
+ - https://platform.openai.com/docs/guides/function-calling
+ - https://docs.anthropic.com/claude/docs/tool-use
+
+## Properties
+
+### execute()?
+
+```ts
+optional execute: (args) => string | Promise;
+```
+
+Defined in: [types.ts:99](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L99)
+
+Optional function to execute when the model calls this tool.
+
+If provided, the SDK will automatically execute the function with the model's arguments
+and feed the result back to the model. This enables autonomous tool use loops.
+
+Returns the result as a string (or Promise) to send back to the model.
+
+#### Parameters
+
+##### args
+
+`any`
+
+The arguments parsed from the model's tool call (matches the parameters schema)
+
+#### Returns
+
+`string` \| `Promise`\<`string`\>
+
+Result string to send back to the model
+
+#### Example
+
+```ts
+execute: async (args) => {
+ const weather = await fetchWeather(args.location);
+ return JSON.stringify(weather);
+}
+```
+
+***
+
+### function
+
+```ts
+function: object;
+```
+
+Defined in: [types.ts:40](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L40)
+
+Function definition and metadata.
+
+#### description
+
+```ts
+description: string;
+```
+
+Clear description of what the function does.
+
+This is crucial - the model uses this to decide when to call the function.
+Be specific about what the function does, what parameters it needs, and what it returns.
+
+##### Example
+
+```ts
+"Get the current weather in a given location. Returns temperature, conditions, and forecast."
+```
+
+#### name
+
+```ts
+name: string;
+```
+
+Unique name of the function (used by the model to call it).
+
+Should be descriptive and follow naming conventions (e.g., snake_case or camelCase).
+Must be unique within the tools array.
+
+##### Example
+
+```ts
+"get_weather", "search_database", "sendEmail"
+```
+
+#### parameters
+
+```ts
+parameters: Record;
+```
+
+JSON Schema describing the function's parameters.
+
+Defines the structure and types of arguments the function accepts.
+The model will generate arguments matching this schema.
+
+##### See
+
+https://json-schema.org/
+
+##### Example
+
+```ts
+{
+ * type: "object",
+ * properties: {
+ * location: { type: "string", description: "City name or coordinates" },
+ * unit: { type: "string", enum: ["celsius", "fahrenheit"] }
+ * },
+ * required: ["location"]
+ * }
+```
+
+***
+
+### metadata?
+
+```ts
+optional metadata: Record;
+```
+
+Defined in: [types.ts:103](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L103)
+
+***
+
+### needsApproval?
+
+```ts
+optional needsApproval: boolean;
+```
+
+Defined in: [types.ts:101](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L101)
+
+If true, tool execution requires user approval before running. Works with both server and client tools.
+
+***
+
+### type
+
+```ts
+type: "function";
+```
+
+Defined in: [types.ts:35](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L35)
+
+Type of tool - currently only "function" is supported.
+
+Future versions may support additional tool types.
diff --git a/docs/reference/interfaces/ToolCall.md b/docs/reference/interfaces/ToolCall.md
new file mode 100644
index 000000000..4843850df
--- /dev/null
+++ b/docs/reference/interfaces/ToolCall.md
@@ -0,0 +1,50 @@
+---
+id: ToolCall
+title: ToolCall
+---
+
+# Interface: ToolCall
+
+Defined in: [types.ts:3](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L3)
+
+## Properties
+
+### function
+
+```ts
+function: object;
+```
+
+Defined in: [types.ts:6](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L6)
+
+#### arguments
+
+```ts
+arguments: string;
+```
+
+#### name
+
+```ts
+name: string;
+```
+
+***
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:4](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L4)
+
+***
+
+### type
+
+```ts
+type: "function";
+```
+
+Defined in: [types.ts:5](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L5)
diff --git a/docs/reference/interfaces/ToolCallStreamChunk.md b/docs/reference/interfaces/ToolCallStreamChunk.md
new file mode 100644
index 000000000..92507c372
--- /dev/null
+++ b/docs/reference/interfaces/ToolCallStreamChunk.md
@@ -0,0 +1,118 @@
+---
+id: ToolCallStreamChunk
+title: ToolCallStreamChunk
+---
+
+# Interface: ToolCallStreamChunk
+
+Defined in: [types.ts:286](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L286)
+
+## Extends
+
+- [`BaseStreamChunk`](../BaseStreamChunk.md)
+
+## Properties
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`id`](../BaseStreamChunk.md#id)
+
+***
+
+### index
+
+```ts
+index: number;
+```
+
+Defined in: [types.ts:296](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L296)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`model`](../BaseStreamChunk.md#model)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`timestamp`](../BaseStreamChunk.md#timestamp)
+
+***
+
+### toolCall
+
+```ts
+toolCall: object;
+```
+
+Defined in: [types.ts:288](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L288)
+
+#### function
+
+```ts
+function: object;
+```
+
+##### function.arguments
+
+```ts
+arguments: string;
+```
+
+##### function.name
+
+```ts
+name: string;
+```
+
+#### id
+
+```ts
+id: string;
+```
+
+#### type
+
+```ts
+type: "function";
+```
+
+***
+
+### type
+
+```ts
+type: "tool_call";
+```
+
+Defined in: [types.ts:287](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L287)
+
+#### Overrides
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`type`](../BaseStreamChunk.md#type)
diff --git a/docs/reference/interfaces/ToolConfig.md b/docs/reference/interfaces/ToolConfig.md
new file mode 100644
index 000000000..0cecdd3d6
--- /dev/null
+++ b/docs/reference/interfaces/ToolConfig.md
@@ -0,0 +1,14 @@
+---
+id: ToolConfig
+title: ToolConfig
+---
+
+# Interface: ToolConfig
+
+Defined in: [types.ts:106](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L106)
+
+## Indexable
+
+```ts
+[key: string]: Tool
+```
diff --git a/docs/reference/interfaces/ToolInputAvailableStreamChunk.md b/docs/reference/interfaces/ToolInputAvailableStreamChunk.md
new file mode 100644
index 000000000..c3f294788
--- /dev/null
+++ b/docs/reference/interfaces/ToolInputAvailableStreamChunk.md
@@ -0,0 +1,98 @@
+---
+id: ToolInputAvailableStreamChunk
+title: ToolInputAvailableStreamChunk
+---
+
+# Interface: ToolInputAvailableStreamChunk
+
+Defined in: [types.ts:334](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L334)
+
+## Extends
+
+- [`BaseStreamChunk`](../BaseStreamChunk.md)
+
+## Properties
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`id`](../BaseStreamChunk.md#id)
+
+***
+
+### input
+
+```ts
+input: any;
+```
+
+Defined in: [types.ts:338](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L338)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`model`](../BaseStreamChunk.md#model)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`timestamp`](../BaseStreamChunk.md#timestamp)
+
+***
+
+### toolCallId
+
+```ts
+toolCallId: string;
+```
+
+Defined in: [types.ts:336](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L336)
+
+***
+
+### toolName
+
+```ts
+toolName: string;
+```
+
+Defined in: [types.ts:337](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L337)
+
+***
+
+### type
+
+```ts
+type: "tool-input-available";
+```
+
+Defined in: [types.ts:335](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L335)
+
+#### Overrides
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`type`](../BaseStreamChunk.md#type)
diff --git a/docs/reference/interfaces/ToolResultStreamChunk.md b/docs/reference/interfaces/ToolResultStreamChunk.md
new file mode 100644
index 000000000..58085881e
--- /dev/null
+++ b/docs/reference/interfaces/ToolResultStreamChunk.md
@@ -0,0 +1,88 @@
+---
+id: ToolResultStreamChunk
+title: ToolResultStreamChunk
+---
+
+# Interface: ToolResultStreamChunk
+
+Defined in: [types.ts:299](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L299)
+
+## Extends
+
+- [`BaseStreamChunk`](../BaseStreamChunk.md)
+
+## Properties
+
+### content
+
+```ts
+content: string;
+```
+
+Defined in: [types.ts:302](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L302)
+
+***
+
+### id
+
+```ts
+id: string;
+```
+
+Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`id`](../BaseStreamChunk.md#id)
+
+***
+
+### model
+
+```ts
+model: string;
+```
+
+Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`model`](../BaseStreamChunk.md#model)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276)
+
+#### Inherited from
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`timestamp`](../BaseStreamChunk.md#timestamp)
+
+***
+
+### toolCallId
+
+```ts
+toolCallId: string;
+```
+
+Defined in: [types.ts:301](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L301)
+
+***
+
+### type
+
+```ts
+type: "tool_result";
+```
+
+Defined in: [types.ts:300](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L300)
+
+#### Overrides
+
+[`BaseStreamChunk`](../BaseStreamChunk.md).[`type`](../BaseStreamChunk.md#type)
diff --git a/docs/reference/protocol.md b/docs/reference/protocol.md
deleted file mode 100644
index 351d3194f..000000000
--- a/docs/reference/protocol.md
+++ /dev/null
@@ -1,415 +0,0 @@
-# Stream Protocol
-
-This document describes the structure of chunks sent from `@tanstack/ai` to `@tanstack/ai-client`, regardless of the transport mechanism (SSE, HTTP stream, direct stream, etc.).
-
-## Overview
-
-The protocol is based on a stream of JSON objects, where each object represents a chunk of data. All chunks share a common base structure and are distinguished by their `type` field.
-
-## Base Structure
-
-All chunks extend a base structure with the following required fields:
-
-```typescript
-interface BaseStreamChunk {
- type: StreamChunkType;
- id: string; // Unique identifier for this chunk
- model: string; // Model name that generated this chunk
- timestamp: number; // Unix timestamp in milliseconds
-}
-```
-
-## Chunk Types
-
-### 1. Content Chunk
-
-Represents incremental text content from the AI model.
-
-```typescript
-interface ContentStreamChunk extends BaseStreamChunk {
- type: "content";
- delta?: string; // The incremental content token (preferred)
- content: string; // Full accumulated content so far
- role?: "assistant";
-}
-```
-
-**Example:**
-
-```json
-{
- "type": "content",
- "id": "chunk_abc123",
- "model": "gpt-4",
- "timestamp": 1699123456789,
- "delta": "Hello",
- "content": "Hello",
- "role": "assistant"
-}
-```
-
-**Notes:**
-
-- `delta` is preferred over `content` for incremental updates
-- `content` represents the full accumulated text up to this point
-- The client should prefer `delta` when both are present
-
-### 2. Tool Call Chunk
-
-Represents incremental tool call arguments being streamed.
-
-```typescript
-interface ToolCallStreamChunk extends BaseStreamChunk {
- type: "tool_call";
- toolCall: {
- id: string; // Unique identifier for this tool call
- type: "function";
- function: {
- name: string; // Name of the function/tool
- arguments: string; // Incremental JSON arguments (may be incomplete)
- };
- };
- index: number; // Zero-based index of this tool call in the current response
-}
-```
-
-**Example:**
-
-```json
-{
- "type": "tool_call",
- "id": "chunk_def456",
- "model": "gpt-4",
- "timestamp": 1699123456790,
- "toolCall": {
- "id": "call_xyz789",
- "type": "function",
- "function": {
- "name": "get_weather",
- "arguments": "{\"location\": \"San"
- }
- },
- "index": 0
-}
-```
-
-**Notes:**
-
-- `arguments` is a JSON string that may be incomplete (partial JSON)
-- Multiple chunks may be sent for the same tool call as arguments are streamed
-- The client should accumulate and parse the arguments incrementally
-
-### 3. Tool Result Chunk
-
-Represents the result of a tool execution.
-
-```typescript
-interface ToolResultStreamChunk extends BaseStreamChunk {
- type: "tool_result";
- toolCallId: string; // ID of the tool call this result belongs to
- content: string; // Result content (typically JSON stringified)
-}
-```
-
-**Example:**
-
-```json
-{
- "type": "tool_result",
- "id": "chunk_ghi012",
- "model": "gpt-4",
- "timestamp": 1699123456791,
- "toolCallId": "call_xyz789",
- "content": "{\"temperature\": 72, \"condition\": \"sunny\"}"
-}
-```
-
-### 4. Done Chunk
-
-Indicates the stream has completed.
-
-```typescript
-interface DoneStreamChunk extends BaseStreamChunk {
- type: "done";
- finishReason: "stop" | "length" | "content_filter" | "tool_calls" | null;
- usage?: {
- promptTokens: number;
- completionTokens: number;
- totalTokens: number;
- };
-}
-```
-
-**Example:**
-
-```json
-{
- "type": "done",
- "id": "chunk_jkl345",
- "model": "gpt-4",
- "timestamp": 1699123456792,
- "finishReason": "stop",
- "usage": {
- "promptTokens": 150,
- "completionTokens": 75,
- "totalTokens": 225
- }
-}
-```
-
-**Notes:**
-
-- `finishReason: "tool_calls"` indicates the model wants to make tool calls
-- `finishReason: "stop"` indicates normal completion
-- `finishReason: "length"` indicates the response was truncated due to token limits
-- `usage` is optional and may not be present in all cases
-
-### 5. Error Chunk
-
-Indicates an error occurred during streaming.
-
-```typescript
-interface ErrorStreamChunk extends BaseStreamChunk {
- type: "error";
- error: {
- message: string;
- code?: string;
- };
-}
-```
-
-**Example:**
-
-```json
-{
- "type": "error",
- "id": "chunk_mno678",
- "model": "gpt-4",
- "timestamp": 1699123456793,
- "error": {
- "message": "Rate limit exceeded",
- "code": "rate_limit_exceeded"
- }
-}
-```
-
-**Notes:**
-
-- When an error chunk is received, the stream should be considered terminated
-- The client should handle the error and stop processing further chunks
-
-### 6. Approval Requested Chunk
-
-Indicates a tool call requires user approval before execution.
-
-```typescript
-interface ApprovalRequestedStreamChunk extends BaseStreamChunk {
- type: "approval-requested";
- toolCallId: string; // ID of the tool call requiring approval
- toolName: string; // Name of the tool
- input: any; // Parsed input arguments for the tool
- approval: {
- id: string; // Unique approval request ID
- needsApproval: true;
- };
-}
-```
-
-**Example:**
-
-```json
-{
- "type": "approval-requested",
- "id": "chunk_pqr901",
- "model": "gpt-4",
- "timestamp": 1699123456794,
- "toolCallId": "call_xyz789",
- "toolName": "send_email",
- "input": {
- "to": "user@example.com",
- "subject": "Important Update",
- "body": "Your request has been processed."
- },
- "approval": {
- "id": "approval_abc123",
- "needsApproval": true
- }
-}
-```
-
-**Notes:**
-
-- This chunk is emitted when a tool has `needsApproval: true` in its definition
-- The client should pause execution and wait for user approval
-- The approval ID is used to respond to the approval request
-
-### 7. Tool Input Available Chunk
-
-Indicates a tool call's input is available for client-side execution.
-
-```typescript
-interface ToolInputAvailableStreamChunk extends BaseStreamChunk {
- type: "tool-input-available";
- toolCallId: string; // ID of the tool call
- toolName: string; // Name of the tool
- input: any; // Parsed input arguments for the tool
-}
-```
-
-**Example:**
-
-```json
-{
- "type": "tool-input-available",
- "id": "chunk_stu234",
- "model": "gpt-4",
- "timestamp": 1699123456795,
- "toolCallId": "call_xyz789",
- "toolName": "update_ui",
- "input": {
- "component": "status",
- "value": "completed"
- }
-}
-```
-
-**Notes:**
-
-- This chunk is emitted for client-side tools (tools without server-side execution)
-- The client should execute the tool locally and return the result
-- This is separate from approval-requested - a tool can be client-side without requiring approval
-
-### 8. Thinking Chunk
-
-Represents "thinking" or reasoning content from models that support it (e.g., Claude's thinking mode).
-
-```typescript
-interface ThinkingStreamChunk extends BaseStreamChunk {
- type: "thinking";
- delta?: string; // The incremental thinking token (preferred)
- content: string; // Full accumulated thinking content so far
-}
-```
-
-**Example:**
-
-```json
-{
- "type": "thinking",
- "id": "chunk_vwx567",
- "model": "claude-3-opus",
- "timestamp": 1699123456796,
- "delta": "Let me",
- "content": "Let me"
-}
-```
-
-**Notes:**
-
-- Similar to content chunks, `delta` is preferred over `content`
-- This represents internal reasoning that may not be shown to the user
-- Not all models support thinking chunks
-
-## Complete Type Definition
-
-```typescript
-type StreamChunkType =
- | "content"
- | "tool_call"
- | "tool_result"
- | "done"
- | "error"
- | "approval-requested"
- | "tool-input-available"
- | "thinking";
-
-type StreamChunk =
- | ContentStreamChunk
- | ToolCallStreamChunk
- | ToolResultStreamChunk
- | DoneStreamChunk
- | ErrorStreamChunk
- | ApprovalRequestedStreamChunk
- | ToolInputAvailableStreamChunk
- | ThinkingStreamChunk;
-```
-
-## Transport Mechanisms
-
-The protocol is transport-agnostic. Chunks can be sent via:
-
-1. **Server-Sent Events (SSE)**: Each chunk is sent as `data: \n\n`
-2. **HTTP Stream**: Newline-delimited JSON (NDJSON)
-3. **Direct Stream**: AsyncIterable of chunk objects
-
-### SSE Format
-
-```
-data: {"type":"content","id":"chunk_1","model":"gpt-4","timestamp":1699123456789,"delta":"Hello","content":"Hello"}
-
-data: {"type":"content","id":"chunk_2","model":"gpt-4","timestamp":1699123456790,"delta":" world","content":"Hello world"}
-
-data: [DONE]
-```
-
-### NDJSON Format
-
-```
-{"type":"content","id":"chunk_1","model":"gpt-4","timestamp":1699123456789,"delta":"Hello","content":"Hello"}
-{"type":"content","id":"chunk_2","model":"gpt-4","timestamp":1699123456790,"delta":" world","content":"Hello world"}
-```
-
-## Chunk Flow
-
-### Typical Text Response
-
-1. Multiple `content` chunks (with `delta` and `content`)
-2. One `done` chunk (with `finishReason: "stop"`)
-
-### Tool Call Flow
-
-1. Multiple `tool_call` chunks (incremental arguments)
-2. One `done` chunk (with `finishReason: "tool_calls"`)
-3. Either:
- - `approval-requested` chunk (if tool needs approval)
- - `tool-input-available` chunk (if client-side tool)
- - `tool_result` chunk (if server executed)
-4. Continue with more content or another tool call cycle
-
-### Error Flow
-
-1. Any number of chunks
-2. One `error` chunk
-3. Stream terminates
-
-## Client Processing
-
-The `@tanstack/ai-client` package processes these chunks through:
-
-1. **Connection Adapter**: Receives chunks from transport
-2. **Stream Parser**: Converts adapter format to processor format (if needed)
-3. **Stream Processor**: Accumulates state, tracks tool calls, emits events
-4. **Chat Client**: Manages message state and UI updates
-
-The processor handles:
-
-- Accumulating text content from `delta` or `content` fields
-- Tracking tool call state (awaiting-input, input-streaming, input-complete)
-- Parsing partial JSON arguments incrementally
-- Emitting lifecycle events for tool calls
-- Managing parallel tool calls
-
-## Best Practices
-
-1. **Always prefer `delta` over `content`** when both are present
-2. **Handle partial JSON** in tool call arguments gracefully
-3. **Track tool call state** using the `id` field, not the `index`
-4. **Handle errors gracefully** - an error chunk terminates the stream
-5. **Respect approval flow** - wait for user approval when `approval-requested` is received
-6. **Use timestamps** for debugging and ordering chunks if needed
-
-## See Also
-
-- [Chat Client API](/docs/api/ai-client.md) - How to use the client
-- [Streaming Guide](/docs/guides/streaming.md) - Streaming patterns and examples
-- [Tool Registry](/docs/guides/tool-registry.md) - Tool execution and approval
diff --git a/docs/reference/type-aliases/AgentLoopStrategy.md b/docs/reference/type-aliases/AgentLoopStrategy.md
new file mode 100644
index 000000000..371cb3c32
--- /dev/null
+++ b/docs/reference/type-aliases/AgentLoopStrategy.md
@@ -0,0 +1,35 @@
+---
+id: AgentLoopStrategy
+title: AgentLoopStrategy
+---
+
+# Type Alias: AgentLoopStrategy()
+
+```ts
+type AgentLoopStrategy = (state) => boolean;
+```
+
+Defined in: [types.ts:226](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L226)
+
+Strategy function that determines whether the agent loop should continue
+
+## Parameters
+
+### state
+
+[`AgentLoopState`](../../interfaces/AgentLoopState.md)
+
+Current state of the agent loop
+
+## Returns
+
+`boolean`
+
+true to continue looping, false to stop
+
+## Example
+
+```typescript
+// Continue for up to 5 iterations
+const strategy: AgentLoopStrategy = ({ iterationCount }) => iterationCount < 5;
+```
diff --git a/docs/reference/type-aliases/ChatStreamOptionsUnion.md b/docs/reference/type-aliases/ChatStreamOptionsUnion.md
new file mode 100644
index 000000000..3599785a3
--- /dev/null
+++ b/docs/reference/type-aliases/ChatStreamOptionsUnion.md
@@ -0,0 +1,18 @@
+---
+id: ChatStreamOptionsUnion
+title: ChatStreamOptionsUnion
+---
+
+# Type Alias: ChatStreamOptionsUnion\
+
+```ts
+type ChatStreamOptionsUnion = TAdapter extends AIAdapter ? Models[number] extends infer TModel ? TModel extends string ? Omit & object : never : never : never;
+```
+
+Defined in: [types.ts:470](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L470)
+
+## Type Parameters
+
+### TAdapter
+
+`TAdapter` *extends* [`AIAdapter`](../../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`\>
diff --git a/docs/reference/type-aliases/ExtractModelsFromAdapter.md b/docs/reference/type-aliases/ExtractModelsFromAdapter.md
new file mode 100644
index 000000000..1e5ab2524
--- /dev/null
+++ b/docs/reference/type-aliases/ExtractModelsFromAdapter.md
@@ -0,0 +1,18 @@
+---
+id: ExtractModelsFromAdapter
+title: ExtractModelsFromAdapter
+---
+
+# Type Alias: ExtractModelsFromAdapter\
+
+```ts
+type ExtractModelsFromAdapter = T extends AIAdapter ? M[number] : never;
+```
+
+Defined in: [types.ts:494](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L494)
+
+## Type Parameters
+
+### T
+
+`T`
diff --git a/docs/reference/type-aliases/StreamChunk.md b/docs/reference/type-aliases/StreamChunk.md
new file mode 100644
index 000000000..758a39737
--- /dev/null
+++ b/docs/reference/type-aliases/StreamChunk.md
@@ -0,0 +1,22 @@
+---
+id: StreamChunk
+title: StreamChunk
+---
+
+# Type Alias: StreamChunk
+
+```ts
+type StreamChunk =
+ | ContentStreamChunk
+ | ToolCallStreamChunk
+ | ToolResultStreamChunk
+ | DoneStreamChunk
+ | ErrorStreamChunk
+ | ApprovalRequestedStreamChunk
+ | ToolInputAvailableStreamChunk
+ | ThinkingStreamChunk;
+```
+
+Defined in: [types.ts:350](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L350)
+
+Chunk returned by the sdk during streaming chat completions.
diff --git a/docs/reference/type-aliases/StreamChunkType.md b/docs/reference/type-aliases/StreamChunkType.md
new file mode 100644
index 000000000..1fface013
--- /dev/null
+++ b/docs/reference/type-aliases/StreamChunkType.md
@@ -0,0 +1,20 @@
+---
+id: StreamChunkType
+title: StreamChunkType
+---
+
+# Type Alias: StreamChunkType
+
+```ts
+type StreamChunkType =
+ | "content"
+ | "tool_call"
+ | "tool_result"
+ | "done"
+ | "error"
+ | "approval-requested"
+ | "tool-input-available"
+ | "thinking";
+```
+
+Defined in: [types.ts:262](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L262)
diff --git a/docs/reference/variables/aiEventClient.md b/docs/reference/variables/aiEventClient.md
new file mode 100644
index 000000000..80b4d1405
--- /dev/null
+++ b/docs/reference/variables/aiEventClient.md
@@ -0,0 +1,12 @@
+---
+id: aiEventClient
+title: aiEventClient
+---
+
+# Variable: aiEventClient
+
+```ts
+const aiEventClient: AiEventClient;
+```
+
+Defined in: [event-client.ts:357](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/event-client.ts#L357)
diff --git a/eslint.config.js b/eslint.config.js
new file mode 100644
index 000000000..95261a887
--- /dev/null
+++ b/eslint.config.js
@@ -0,0 +1,25 @@
+// @ts-check
+
+// @ts-ignore Needed due to moduleResolution Node vs Bundler
+import { tanstackConfig } from '@tanstack/config/eslint'
+import unusedImports from 'eslint-plugin-unused-imports'
+
+/** @type {import('eslint').Linter.FlatConfig[]} */
+const config = [
+ ...tanstackConfig,
+ {
+ name: 'tanstack/temp',
+ plugins: {
+ 'unused-imports': unusedImports,
+ },
+ rules: {
+ 'no-case-declarations': 'off',
+ 'no-shadow': 'off',
+ 'unused-imports/no-unused-imports': 'warn',
+ 'pnpm/enforce-catalog': 'off',
+ 'pnpm/json-enforce-catalog': 'off',
+ },
+ },
+]
+
+export default config
diff --git a/examples/README.md b/examples/README.md
index 749f38756..e04570d09 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -1,437 +1,465 @@
-# TanStack AI Examples
-
-This directory contains comprehensive examples demonstrating TanStack AI across multiple languages and frameworks.
-
-## Quick Start
-
-Choose an example based on your use case:
-
-- **Want a full-stack TypeScript app?** ā [TanStack Chat (ts-chat)](#tanstack-chat-ts-chat)
-- **Want a simple CLI tool?** ā [CLI Example](#cli-example)
-- **Need a vanilla JS frontend?** ā [Vanilla Chat](#vanilla-chat)
-- **Building a Python backend?** ā [Python FastAPI Server](#python-fastapi-server)
-- **Building a PHP backend?** ā [PHP Slim Framework Server](#php-slim-framework-server)
-
-## TypeScript Examples
-
-### TanStack Chat (ts-chat)
-
-A full-featured chat application built with the TanStack ecosystem.
-
-**Tech Stack:**
-- TanStack Start (full-stack React framework)
-- TanStack Router (type-safe routing)
-- TanStack Store (state management)
-- `@tanstack/ai` (AI backend)
-- `@tanstack/ai-react` (React hooks)
-- `@tanstack/ai-client` (headless client)
-
-**Features:**
-- ā Real-time streaming with OpenAI GPT-4o
-- ā Automatic tool execution loop
-- ā Rich markdown rendering
-- ā Conversation management
-- ā Modern UI with Tailwind CSS
-
-**Getting Started:**
-```bash
-cd examples/ts-chat
-pnpm install
-cp env.example .env
-# Edit .env and add your OPENAI_API_KEY
-pnpm start
-```
-
-š [Full Documentation](ts-chat/README.md)
-
----
-
-### CLI Example
-
-An interactive command-line interface for AI interactions.
-
-**Features:**
-- ā Multi-provider support (OpenAI, Anthropic, Ollama, Gemini)
-- ā Interactive chat with streaming
-- ā Automatic tool/function calling
-- ā Smart API key management
-- ā Debug mode for development
-
-**Getting Started:**
-```bash
-cd examples/cli
-pnpm install
-pnpm dev chat --provider openai
-```
-
-**Available Commands:**
-- `chat` - Interactive chat with streaming
-- `generate` - One-shot text generation
-- `summarize` - Text summarization
-- `embed` - Generate embeddings
-
-š [Full Documentation](cli/README.md)
-
----
-
-### Vanilla Chat
-
-A framework-free chat application using pure JavaScript and `@tanstack/ai-client`.
-
-**Tech Stack:**
-- Vanilla JavaScript (no frameworks!)
-- `@tanstack/ai-client` (headless client)
-- Vite (dev server)
-- Connects to Python FastAPI backend
-
-**Features:**
-- ā Pure vanilla JavaScript
-- ā Real-time streaming messages
-- ā Beautiful, responsive UI
-- ā No framework dependencies
-
-**Getting Started:**
-```bash
-# Start the Python backend first
-cd examples/python-fastapi
-python anthropic-server.py
-
-# Then start the frontend
-cd examples/vanilla-chat
-pnpm install
-pnpm dev
-```
-
-Open `http://localhost:3000`
-
-š [Full Documentation](vanilla-chat/README.md)
-
----
-
-## Python Examples
-
-### Python FastAPI Server
-
-A FastAPI server that streams AI responses in Server-Sent Events (SSE) format, compatible with TanStack AI clients.
-
-**Features:**
-- ā FastAPI with SSE streaming
-- ā Converts Anthropic/OpenAI events to `StreamChunk` format
-- ā Compatible with `@tanstack/ai-client`
-- ā Tool call support
-- ā Type-safe with Pydantic
-
-**Getting Started:**
-```bash
-cd examples/python-fastapi
-
-# Create virtual environment
-python3 -m venv venv
-source venv/bin/activate # On Windows: venv\Scripts\activate
-
-# Install dependencies
-pip install -r requirements.txt
-
-# Set up environment
-cp env.example .env
-# Edit .env and add your ANTHROPIC_API_KEY or OPENAI_API_KEY
-
-# Run the server
-python anthropic-server.py # or openai-server.py
-```
-
-**API Endpoints:**
-- `POST /chat` - Stream chat responses in SSE format
-- `GET /health` - Health check
-
-**Usage with TypeScript Client:**
-```typescript
-import { ChatClient, fetchServerSentEvents } from "@tanstack/ai-client";
-
-const client = new ChatClient({
- connection: fetchServerSentEvents("http://localhost:8000/chat"),
-});
-
-await client.sendMessage("Hello!");
-```
-
-š [Full Documentation](python-fastapi/README.md)
-
----
-
-## PHP Examples
-
-### PHP Slim Framework Server
-
-A PHP Slim Framework server that streams AI responses in SSE format, with support for both Anthropic and OpenAI.
-
-**Features:**
-- ā Slim Framework with SSE streaming
-- ā Converts Anthropic/OpenAI events to `StreamChunk` format
-- ā Compatible with `@tanstack/ai-client`
-- ā Tool call support
-- ā PHP 8.1+ with type safety
-
-**Getting Started:**
-```bash
-cd examples/php-slim
-
-# Install dependencies
-composer install
-
-# Set up environment
-cp env.example .env
-# Edit .env and add your ANTHROPIC_API_KEY and/or OPENAI_API_KEY
-
-# Run the server
-composer start-anthropic # Runs on port 8000
-# or
-composer start-openai # Runs on port 8001
-```
-
-**API Endpoints:**
-- `POST /chat` - Stream chat responses in SSE format
-- `GET /health` - Health check
-
-**Usage with TypeScript Client:**
-```typescript
-import { ChatClient, fetchServerSentEvents } from "@tanstack/ai-client";
-
-const client = new ChatClient({
- connection: fetchServerSentEvents("http://localhost:8000/chat"),
-});
-
-await client.sendMessage("Hello!");
-```
-
-š [Full Documentation](php-slim/README.md)
-
----
-
-## Architecture Patterns
-
-### Full-Stack TypeScript
-
-Use TanStack AI end-to-end in TypeScript:
-
-```
-Frontend (React)
- ā (useChat hook)
-@tanstack/ai-react
- ā (ChatClient)
-@tanstack/ai-client
- ā (SSE/HTTP)
-Backend (TanStack Start API Route)
- ā (chat() function)
-@tanstack/ai
- ā (adapter)
-AI Provider (OpenAI/Anthropic/etc.)
-```
-
-**Example:** [TanStack Chat (ts-chat)](ts-chat/README.md)
-
-### Multi-Language Backend
-
-Use Python or PHP for the backend, TypeScript for the frontend:
-
-```
-Frontend (Vanilla JS/React/Vue/etc.)
- ā (ChatClient)
-@tanstack/ai-client
- ā (SSE/HTTP)
-Backend (Python FastAPI or PHP Slim)
- ā (tanstack-ai or tanstack/ai)
-Stream Conversion & Message Formatting
- ā (provider SDK)
-AI Provider (OpenAI/Anthropic/etc.)
-```
-
-**Examples:**
-- [Python FastAPI](python-fastapi/README.md) + [Vanilla Chat](vanilla-chat/README.md)
-- [PHP Slim](php-slim/README.md) + any frontend with `@tanstack/ai-client`
-
-### CLI Tool
-
-Use TanStack AI in command-line applications:
-
-```
-CLI
- ā (chat() function)
-@tanstack/ai
- ā (adapter)
-AI Provider (OpenAI/Anthropic/Ollama/Gemini)
-```
-
-**Example:** [CLI Example](cli/README.md)
-
----
-
-## Common Patterns
-
-### Server-Sent Events (SSE) Streaming
-
-All examples use SSE for real-time streaming:
-
-**Backend (TypeScript):**
-```typescript
-import { chat, toStreamResponse } from "@tanstack/ai";
-import { openai } from "@tanstack/ai-openai";
-
-const stream = chat({
- adapter: openai(),
- model: "gpt-4o",
- messages,
-});
-
-return toStreamResponse(stream);
-```
-
-**Backend (Python):**
-```python
-from tanstack_ai import StreamChunkConverter, format_sse_chunk
-
-async for event in anthropic_stream:
- chunks = await converter.convert_event(event)
- for chunk in chunks:
- yield format_sse_chunk(chunk)
-```
-
-**Backend (PHP):**
-```php
-use TanStack\AI\StreamChunkConverter;
-use TanStack\AI\SSEFormatter;
-
-foreach ($anthropicStream as $event) {
- $chunks = $converter->convertEvent($event);
- foreach ($chunks as $chunk) {
- echo SSEFormatter::formatChunk($chunk);
- }
-}
-```
-
-**Frontend:**
-```typescript
-import { ChatClient, fetchServerSentEvents } from "@tanstack/ai-client";
-
-const client = new ChatClient({
- connection: fetchServerSentEvents("/api/chat"),
-});
-```
-
-### Automatic Tool Execution
-
-The TypeScript backend (`@tanstack/ai`) automatically handles tool execution:
-
-```typescript
-import { chat, tool } from "@tanstack/ai";
-
-const weatherTool = tool({
- function: {
- name: "getWeather",
- description: "Get weather for a location",
- parameters: { /* ... */ },
- },
- execute: async (args) => {
- // This is called automatically by the SDK
- return JSON.stringify({ temp: 72, condition: "sunny" });
- },
-});
-
-const stream = chat({
- adapter: openai(),
- model: "gpt-4o",
- messages,
- tools: [weatherTool], // SDK executes these automatically
-});
-```
-
-Clients receive:
-- `content` chunks - text from the model
-- `tool_call` chunks - when the model calls a tool
-- `tool_result` chunks - results from tool execution
-- `done` chunk - conversation complete
-
----
-
-## Development Tips
-
-### Running Multiple Examples
-
-You can run backend and frontend examples together:
-
-```bash
-# Terminal 1: Start Python backend
-cd examples/python-fastapi
-python anthropic-server.py
-
-# Terminal 2: Start vanilla frontend
-cd examples/vanilla-chat
-pnpm dev
-
-# Terminal 3: Start ts-chat (full-stack)
-cd examples/ts-chat
-pnpm start
-```
-
-### Environment Variables
-
-Each example has an `env.example` file. Copy it to `.env` and add your API keys:
-
-```bash
-# TypeScript examples
-OPENAI_API_KEY=sk-...
-ANTHROPIC_API_KEY=sk-ant-...
-
-# Python examples
-ANTHROPIC_API_KEY=sk-ant-...
-OPENAI_API_KEY=sk-...
-
-# PHP examples
-ANTHROPIC_API_KEY=sk-ant-...
-OPENAI_API_KEY=sk-...
-```
-
-### Building for Production
-
-**TypeScript:**
-```bash
-pnpm build
-```
-
-**Python:**
-```bash
-# Use a production ASGI server
-uvicorn anthropic-server:app --host 0.0.0.0 --port 8000
-```
-
-**PHP:**
-```bash
-# Use a production web server (Apache, Nginx, etc.)
-# See php-slim/README.md for deployment details
-```
-
----
-
-## Contributing
-
-When adding new examples:
-
-1. **Create a README.md** with setup instructions
-2. **Add an env.example** file with required environment variables
-3. **Document the tech stack** and key features
-4. **Include usage examples** with code snippets
-5. **Update this README** to list your example
-
----
-
-## Learn More
-
-- š [Main README](../README.md) - Project overview
-- š [Documentation](../docs/) - Comprehensive guides
-- š [TypeScript Packages](../packages/typescript/) - Core libraries
-- š [Python Package](../packages/python/tanstack-ai/) - Python utilities
-- š [PHP Package](../packages/php/tanstack-ai/) - PHP utilities
-
----
-
-Built with ā¤ļø by the TanStack community
+# TanStack AI Examples
+
+This directory contains comprehensive examples demonstrating TanStack AI across multiple languages and frameworks.
+
+## Quick Start
+
+Choose an example based on your use case:
+
+- **Want a full-stack TypeScript app?** ā [TanStack Chat (ts-chat)](#tanstack-chat-ts-chat)
+- **Want a simple CLI tool?** ā [CLI Example](#cli-example)
+- **Need a vanilla JS frontend?** ā [Vanilla Chat](#vanilla-chat)
+- **Building a Python backend?** ā [Python FastAPI Server](#python-fastapi-server)
+- **Building a PHP backend?** ā [PHP Slim Framework Server](#php-slim-framework-server)
+
+## TypeScript Examples
+
+### TanStack Chat (ts-chat)
+
+A full-featured chat application built with the TanStack ecosystem.
+
+**Tech Stack:**
+
+- TanStack Start (full-stack React framework)
+- TanStack Router (type-safe routing)
+- TanStack Store (state management)
+- `@tanstack/ai` (AI backend)
+- `@tanstack/ai-react` (React hooks)
+- `@tanstack/ai-client` (headless client)
+
+**Features:**
+
+- ā Real-time streaming with OpenAI GPT-4o
+- ā Automatic tool execution loop
+- ā Rich markdown rendering
+- ā Conversation management
+- ā Modern UI with Tailwind CSS
+
+**Getting Started:**
+
+```bash
+cd examples/ts-chat
+pnpm install
+cp env.example .env
+# Edit .env and add your OPENAI_API_KEY
+pnpm start
+```
+
+š [Full Documentation](ts-chat/README.md)
+
+---
+
+### CLI Example
+
+An interactive command-line interface for AI interactions.
+
+**Features:**
+
+- ā Multi-provider support (OpenAI, Anthropic, Ollama, Gemini)
+- ā Interactive chat with streaming
+- ā Automatic tool/function calling
+- ā Smart API key management
+- ā Debug mode for development
+
+**Getting Started:**
+
+```bash
+cd examples/cli
+pnpm install
+pnpm dev chat --provider openai
+```
+
+**Available Commands:**
+
+- `chat` - Interactive chat with streaming
+- `generate` - One-shot text generation
+- `summarize` - Text summarization
+- `embed` - Generate embeddings
+
+š [Full Documentation](cli/README.md)
+
+---
+
+### Vanilla Chat
+
+A framework-free chat application using pure JavaScript and `@tanstack/ai-client`.
+
+**Tech Stack:**
+
+- Vanilla JavaScript (no frameworks!)
+- `@tanstack/ai-client` (headless client)
+- Vite (dev server)
+- Connects to Python FastAPI backend
+
+**Features:**
+
+- ā Pure vanilla JavaScript
+- ā Real-time streaming messages
+- ā Beautiful, responsive UI
+- ā No framework dependencies
+
+**Getting Started:**
+
+```bash
+# Start the Python backend first
+cd examples/python-fastapi
+python anthropic-server.py
+
+# Then start the frontend
+cd examples/vanilla-chat
+pnpm install
+pnpm dev
+```
+
+Open `http://localhost:3000`
+
+š [Full Documentation](vanilla-chat/README.md)
+
+---
+
+## Python Examples
+
+### Python FastAPI Server
+
+A FastAPI server that streams AI responses in Server-Sent Events (SSE) format, compatible with TanStack AI clients.
+
+**Features:**
+
+- ā FastAPI with SSE streaming
+- ā Converts Anthropic/OpenAI events to `StreamChunk` format
+- ā Compatible with `@tanstack/ai-client`
+- ā Tool call support
+- ā Type-safe with Pydantic
+
+**Getting Started:**
+
+```bash
+cd examples/python-fastapi
+
+# Create virtual environment
+python3 -m venv venv
+source venv/bin/activate # On Windows: venv\Scripts\activate
+
+# Install dependencies
+pip install -r requirements.txt
+
+# Set up environment
+cp env.example .env
+# Edit .env and add your ANTHROPIC_API_KEY or OPENAI_API_KEY
+
+# Run the server
+python anthropic-server.py # or openai-server.py
+```
+
+**API Endpoints:**
+
+- `POST /chat` - Stream chat responses in SSE format
+- `GET /health` - Health check
+
+**Usage with TypeScript Client:**
+
+```typescript
+import { ChatClient, fetchServerSentEvents } from '@tanstack/ai-client'
+
+const client = new ChatClient({
+ connection: fetchServerSentEvents('http://localhost:8000/chat'),
+})
+
+await client.sendMessage('Hello!')
+```
+
+š [Full Documentation](python-fastapi/README.md)
+
+---
+
+## PHP Examples
+
+### PHP Slim Framework Server
+
+A PHP Slim Framework server that streams AI responses in SSE format, with support for both Anthropic and OpenAI.
+
+**Features:**
+
+- ā Slim Framework with SSE streaming
+- ā Converts Anthropic/OpenAI events to `StreamChunk` format
+- ā Compatible with `@tanstack/ai-client`
+- ā Tool call support
+- ā PHP 8.1+ with type safety
+
+**Getting Started:**
+
+```bash
+cd examples/php-slim
+
+# Install dependencies
+composer install
+
+# Set up environment
+cp env.example .env
+# Edit .env and add your ANTHROPIC_API_KEY and/or OPENAI_API_KEY
+
+# Run the server
+composer start-anthropic # Runs on port 8000
+# or
+composer start-openai # Runs on port 8001
+```
+
+**API Endpoints:**
+
+- `POST /chat` - Stream chat responses in SSE format
+- `GET /health` - Health check
+
+**Usage with TypeScript Client:**
+
+```typescript
+import { ChatClient, fetchServerSentEvents } from '@tanstack/ai-client'
+
+const client = new ChatClient({
+ connection: fetchServerSentEvents('http://localhost:8000/chat'),
+})
+
+await client.sendMessage('Hello!')
+```
+
+š [Full Documentation](php-slim/README.md)
+
+---
+
+## Architecture Patterns
+
+### Full-Stack TypeScript
+
+Use TanStack AI end-to-end in TypeScript:
+
+```
+Frontend (React)
+ ā (useChat hook)
+@tanstack/ai-react
+ ā (ChatClient)
+@tanstack/ai-client
+ ā (SSE/HTTP)
+Backend (TanStack Start API Route)
+ ā (chat() function)
+@tanstack/ai
+ ā (adapter)
+AI Provider (OpenAI/Anthropic/etc.)
+```
+
+**Example:** [TanStack Chat (ts-chat)](ts-chat/README.md)
+
+### Multi-Language Backend
+
+Use Python or PHP for the backend, TypeScript for the frontend:
+
+```
+Frontend (Vanilla JS/React/Vue/etc.)
+ ā (ChatClient)
+@tanstack/ai-client
+ ā (SSE/HTTP)
+Backend (Python FastAPI or PHP Slim)
+ ā (tanstack-ai or tanstack/ai)
+Stream Conversion & Message Formatting
+ ā (provider SDK)
+AI Provider (OpenAI/Anthropic/etc.)
+```
+
+**Examples:**
+
+- [Python FastAPI](python-fastapi/README.md) + [Vanilla Chat](vanilla-chat/README.md)
+- [PHP Slim](php-slim/README.md) + any frontend with `@tanstack/ai-client`
+
+### CLI Tool
+
+Use TanStack AI in command-line applications:
+
+```
+CLI
+ ā (chat() function)
+@tanstack/ai
+ ā (adapter)
+AI Provider (OpenAI/Anthropic/Ollama/Gemini)
+```
+
+**Example:** [CLI Example](cli/README.md)
+
+---
+
+## Common Patterns
+
+### Server-Sent Events (SSE) Streaming
+
+All examples use SSE for real-time streaming:
+
+**Backend (TypeScript):**
+
+```typescript
+import { chat, toStreamResponse } from '@tanstack/ai'
+import { openai } from '@tanstack/ai-openai'
+
+const stream = chat({
+ adapter: openai(),
+ model: 'gpt-4o',
+ messages,
+})
+
+return toStreamResponse(stream)
+```
+
+**Backend (Python):**
+
+```python
+from tanstack_ai import StreamChunkConverter, format_sse_chunk
+
+async for event in anthropic_stream:
+ chunks = await converter.convert_event(event)
+ for chunk in chunks:
+ yield format_sse_chunk(chunk)
+```
+
+**Backend (PHP):**
+
+```php
+use TanStack\AI\StreamChunkConverter;
+use TanStack\AI\SSEFormatter;
+
+foreach ($anthropicStream as $event) {
+ $chunks = $converter->convertEvent($event);
+ foreach ($chunks as $chunk) {
+ echo SSEFormatter::formatChunk($chunk);
+ }
+}
+```
+
+**Frontend:**
+
+```typescript
+import { ChatClient, fetchServerSentEvents } from '@tanstack/ai-client'
+
+const client = new ChatClient({
+ connection: fetchServerSentEvents('/api/chat'),
+})
+```
+
+### Automatic Tool Execution
+
+The TypeScript backend (`@tanstack/ai`) automatically handles tool execution:
+
+```typescript
+import { chat, tool } from '@tanstack/ai'
+
+const weatherTool = tool({
+ function: {
+ name: 'getWeather',
+ description: 'Get weather for a location',
+ parameters: {
+ /* ... */
+ },
+ },
+ execute: async (args) => {
+ // This is called automatically by the SDK
+ return JSON.stringify({ temp: 72, condition: 'sunny' })
+ },
+})
+
+const stream = chat({
+ adapter: openai(),
+ model: 'gpt-4o',
+ messages,
+ tools: [weatherTool], // SDK executes these automatically
+})
+```
+
+Clients receive:
+
+- `content` chunks - text from the model
+- `tool_call` chunks - when the model calls a tool
+- `tool_result` chunks - results from tool execution
+- `done` chunk - conversation complete
+
+---
+
+## Development Tips
+
+### Running Multiple Examples
+
+You can run backend and frontend examples together:
+
+```bash
+# Terminal 1: Start Python backend
+cd examples/python-fastapi
+python anthropic-server.py
+
+# Terminal 2: Start vanilla frontend
+cd examples/vanilla-chat
+pnpm dev
+
+# Terminal 3: Start ts-chat (full-stack)
+cd examples/ts-chat
+pnpm start
+```
+
+### Environment Variables
+
+Each example has an `env.example` file. Copy it to `.env` and add your API keys:
+
+```bash
+# TypeScript examples
+OPENAI_API_KEY=sk-...
+ANTHROPIC_API_KEY=sk-ant-...
+
+# Python examples
+ANTHROPIC_API_KEY=sk-ant-...
+OPENAI_API_KEY=sk-...
+
+# PHP examples
+ANTHROPIC_API_KEY=sk-ant-...
+OPENAI_API_KEY=sk-...
+```
+
+### Building for Production
+
+**TypeScript:**
+
+```bash
+pnpm build
+```
+
+**Python:**
+
+```bash
+# Use a production ASGI server
+uvicorn anthropic-server:app --host 0.0.0.0 --port 8000
+```
+
+**PHP:**
+
+```bash
+# Use a production web server (Apache, Nginx, etc.)
+# See php-slim/README.md for deployment details
+```
+
+---
+
+## Contributing
+
+When adding new examples:
+
+1. **Create a README.md** with setup instructions
+2. **Add an env.example** file with required environment variables
+3. **Document the tech stack** and key features
+4. **Include usage examples** with code snippets
+5. **Update this README** to list your example
+
+---
+
+## Learn More
+
+- š [Main README](../README.md) - Project overview
+- š [Documentation](../docs/) - Comprehensive guides
+- š [TypeScript Packages](../packages/typescript/) - Core libraries
+- š [Python Package](../packages/python/tanstack-ai/) - Python utilities
+- š [PHP Package](../packages/php/tanstack-ai/) - PHP utilities
+
+---
+
+Built with ā¤ļø by the TanStack community
diff --git a/examples/php-slim/README.md b/examples/php-slim/README.md
index b2ea4ea8f..2d74876d6 100644
--- a/examples/php-slim/README.md
+++ b/examples/php-slim/README.md
@@ -45,26 +45,31 @@ cp env.example .env
4. **Run the server:**
**For Anthropic:**
+
```bash
php -S 0.0.0.0:8000 -t public public/anthropic-server.php
```
Or using Composer:
+
```bash
composer start-anthropic
```
**For OpenAI:**
+
```bash
php -S 0.0.0.0:8001 -t public public/openai-server.php
```
Or using Composer:
+
```bash
composer start-openai
```
The servers will start on:
+
- Anthropic: `http://localhost:8000`
- OpenAI: `http://localhost:8001`
@@ -108,13 +113,13 @@ Health check endpoint.
This server is compatible with the TypeScript TanStack AI client:
```typescript
-import { ChatClient, fetchServerSentEvents } from "@tanstack/ai-client";
+import { ChatClient, fetchServerSentEvents } from '@tanstack/ai-client'
const client = new ChatClient({
- connection: fetchServerSentEvents("http://localhost:8000/chat"),
-});
+ connection: fetchServerSentEvents('http://localhost:8000/chat'),
+})
-await client.sendMessage("Hello!");
+await client.sendMessage('Hello!')
```
## StreamChunk Format
@@ -131,6 +136,7 @@ See `packages/typescript/ai/src/types.ts` for the full TypeScript type definitio
## Supported Providers
The converter currently supports:
+
- ā **Anthropic** (Claude models) - fully implemented
- ā **OpenAI** (GPT models) - fully implemented
@@ -169,6 +175,7 @@ The converter package is installed as a local dependency, making it easy to deve
To use the local `tanstack/ai` package during development:
1. Add to `composer.json`:
+
```json
{
"repositories": [
@@ -184,7 +191,7 @@ To use the local `tanstack/ai` package during development:
```
2. Run:
+
```bash
composer update tanstack/ai
```
-
diff --git a/examples/php-slim/composer.json b/examples/php-slim/composer.json
index c7d997e9b..5422a972c 100644
--- a/examples/php-slim/composer.json
+++ b/examples/php-slim/composer.json
@@ -1,40 +1,40 @@
{
- "name": "tanstack/ai-php-example",
- "description": "PHP Slim Framework example for TanStack AI",
- "type": "project",
- "repositories": [
- {
- "type": "path",
- "url": "../../packages/php/tanstack-ai"
- }
- ],
- "require": {
- "php": ">=8.1",
- "slim/slim": "^4.12",
- "slim/psr7": "^1.6",
- "vlucas/phpdotenv": "^5.5",
- "monolog/monolog": "^3.0",
- "anthropic-ai/sdk": "^0.3.0",
- "openai-php/client": "^0.10.0",
- "tanstack/ai": "@dev",
- "symfony/http-client": "^7.3"
- },
- "require-dev": {
- "slim/psr7": "^1.6"
- },
- "autoload": {
- "psr-4": {
- "TanStack\\AI\\Example\\": "src/"
- }
- },
- "scripts": {
- "start": "php -S 0.0.0.0:8000 -t public public/index.php",
- "start-anthropic": "php -S 0.0.0.0:8000 -t public public/anthropic-server.php",
- "start-openai": "php -S 0.0.0.0:8001 -t public public/openai-server.php"
- },
- "config": {
- "allow-plugins": {
- "php-http/discovery": true
- }
+ "name": "tanstack/ai-php-example",
+ "description": "PHP Slim Framework example for TanStack AI",
+ "type": "project",
+ "repositories": [
+ {
+ "type": "path",
+ "url": "../../packages/php/tanstack-ai"
}
+ ],
+ "require": {
+ "php": ">=8.1",
+ "slim/slim": "^4.12",
+ "slim/psr7": "^1.6",
+ "vlucas/phpdotenv": "^5.5",
+ "monolog/monolog": "^3.0",
+ "anthropic-ai/sdk": "^0.3.0",
+ "openai-php/client": "^0.10.0",
+ "tanstack/ai": "@dev",
+ "symfony/http-client": "^7.3"
+ },
+ "require-dev": {
+ "slim/psr7": "^1.6"
+ },
+ "autoload": {
+ "psr-4": {
+ "TanStack\\AI\\Example\\": "src/"
+ }
+ },
+ "scripts": {
+ "start": "php -S 0.0.0.0:8000 -t public public/index.php",
+ "start-anthropic": "php -S 0.0.0.0:8000 -t public public/anthropic-server.php",
+ "start-openai": "php -S 0.0.0.0:8001 -t public public/openai-server.php"
+ },
+ "config": {
+ "allow-plugins": {
+ "php-http/discovery": true
+ }
+ }
}
diff --git a/examples/php-slim/package.json b/examples/php-slim/package.json
new file mode 100644
index 000000000..d8747401d
--- /dev/null
+++ b/examples/php-slim/package.json
@@ -0,0 +1,5 @@
+{
+ "name": "php-slim",
+ "version": "0.0.0",
+ "private": true
+}
diff --git a/examples/python-fastapi/README.md b/examples/python-fastapi/README.md
index 55104bb8f..a7cd78cf0 100644
--- a/examples/python-fastapi/README.md
+++ b/examples/python-fastapi/README.md
@@ -34,7 +34,6 @@ python3 -m venv venv
```
3. **Activate the virtual environment:**
-
- **On macOS/Linux:**
```bash
@@ -129,13 +128,13 @@ Health check endpoint.
This server is compatible with the TypeScript TanStack AI client:
```typescript
-import { ChatClient, fetchServerSentEvents } from "@tanstack/ai-client";
+import { ChatClient, fetchServerSentEvents } from '@tanstack/ai-client'
const client = new ChatClient({
- connection: fetchServerSentEvents("http://localhost:8000/chat"),
-});
+ connection: fetchServerSentEvents('http://localhost:8000/chat'),
+})
-await client.sendMessage("Hello!");
+await client.sendMessage('Hello!')
```
## StreamChunk Format
@@ -152,6 +151,7 @@ See `packages/typescript/ai/src/types.ts` for the full TypeScript type definitio
## Supported Providers
The converter currently supports:
+
- ā **Anthropic** (Claude models) - fully implemented
- ā **OpenAI** (GPT models) - converter implemented, ready to use
diff --git a/examples/python-fastapi/package.json b/examples/python-fastapi/package.json
new file mode 100644
index 000000000..ec87def56
--- /dev/null
+++ b/examples/python-fastapi/package.json
@@ -0,0 +1,5 @@
+{
+ "name": "python-fastapi",
+ "version": "0.0.0",
+ "private": true
+}
diff --git a/examples/ts-chat/.cta.json b/examples/ts-chat/.cta.json
index 2371ddc70..8142313ea 100644
--- a/examples/ts-chat/.cta.json
+++ b/examples/ts-chat/.cta.json
@@ -8,10 +8,5 @@
"git": true,
"version": 1,
"framework": "react-cra",
- "chosenAddOns": [
- "nitro",
- "start",
- "tanchat",
- "store"
- ]
-}
\ No newline at end of file
+ "chosenAddOns": ["nitro", "start", "tanchat", "store"]
+}
diff --git a/examples/ts-chat/README.md b/examples/ts-chat/README.md
index 0db61b98a..c2671c43d 100644
--- a/examples/ts-chat/README.md
+++ b/examples/ts-chat/README.md
@@ -94,7 +94,7 @@ Now that you have two routes you can use a `Link` component to navigate between
To use SPA (Single Page Application) navigation you will need to import the `Link` component from `@tanstack/react-router`.
```tsx
-import { Link } from "@tanstack/react-router";
+import { Link } from '@tanstack/react-router'
```
Then anywhere in your JSX you can use it like so:
@@ -114,10 +114,10 @@ In the File Based Routing setup the layout is located in `src/routes/__root.tsx`
Here is an example layout that includes a header:
```tsx
-import { Outlet, createRootRoute } from "@tanstack/react-router";
-import { TanStackRouterDevtools } from "@tanstack/react-router-devtools";
+import { Outlet, createRootRoute } from '@tanstack/react-router'
+import { TanStackRouterDevtools } from '@tanstack/react-router-devtools'
-import { Link } from "@tanstack/react-router";
+import { Link } from '@tanstack/react-router'
export const Route = createRootRoute({
component: () => (
@@ -132,7 +132,7 @@ export const Route = createRootRoute({
>
),
-});
+})
```
The `` component is not required so you can remove it if you don't want it in your layout.
@@ -148,26 +148,26 @@ For example:
```tsx
const peopleRoute = createRoute({
getParentRoute: () => rootRoute,
- path: "/people",
+ path: '/people',
loader: async () => {
- const response = await fetch("https://swapi.dev/api/people");
+ const response = await fetch('https://swapi.dev/api/people')
return response.json() as Promise<{
results: {
- name: string;
- }[];
- }>;
+ name: string
+ }[]
+ }>
},
component: () => {
- const data = peopleRoute.useLoaderData();
+ const data = peopleRoute.useLoaderData()
return (
{data.results.map((person) => (
{person.name}
))}
- );
+ )
},
-});
+})
```
Loaders simplify your data fetching logic dramatically. Check out more information in the [Loader documentation](https://tanstack.com/router/latest/docs/framework/react/guide/data-loading#loader-parameters).
@@ -185,29 +185,29 @@ pnpm add @tanstack/react-query @tanstack/react-query-devtools
Next we'll need to create a query client and provider. We recommend putting those in `main.tsx`.
```tsx
-import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
+import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
// ...
-const queryClient = new QueryClient();
+const queryClient = new QueryClient()
// ...
if (!rootElement.innerHTML) {
- const root = ReactDOM.createRoot(rootElement);
+ const root = ReactDOM.createRoot(rootElement)
root.render(
-
- );
+ ,
+ )
}
```
You can also add TanStack Query Devtools to the root route (optional).
```tsx
-import { ReactQueryDevtools } from "@tanstack/react-query-devtools";
+import { ReactQueryDevtools } from '@tanstack/react-query-devtools'
const rootRoute = createRootRoute({
component: () => (
@@ -217,25 +217,25 @@ const rootRoute = createRootRoute({
>
),
-});
+})
```
Now you can use `useQuery` to fetch your data.
```tsx
-import { useQuery } from "@tanstack/react-query";
+import { useQuery } from '@tanstack/react-query'
-import "./App.css";
+import './App.css'
function App() {
const { data } = useQuery({
- queryKey: ["people"],
+ queryKey: ['people'],
queryFn: () =>
- fetch("https://swapi.dev/api/people")
+ fetch('https://swapi.dev/api/people')
.then((res) => res.json())
.then((data) => data.results as { name: string }[]),
initialData: [],
- });
+ })
return (
@@ -245,10 +245,10 @@ function App() {
))}
- );
+ )
}
-export default App;
+export default App
```
You can find out everything you need to know on how to use React-Query in the [React-Query documentation](https://tanstack.com/query/latest/docs/framework/react/overview).
@@ -266,24 +266,24 @@ pnpm add @tanstack/store
Now let's create a simple counter in the `src/App.tsx` file as a demonstration.
```tsx
-import { useStore } from "@tanstack/react-store";
-import { Store } from "@tanstack/store";
-import "./App.css";
+import { useStore } from '@tanstack/react-store'
+import { Store } from '@tanstack/store'
+import './App.css'
-const countStore = new Store(0);
+const countStore = new Store(0)
function App() {
- const count = useStore(countStore);
+ const count = useStore(countStore)
return (
- );
+ )
}
-export default App;
+export default App
```
One of the many nice features of TanStack Store is the ability to derive state from other state. That derived state will update when the base state updates.
@@ -291,21 +291,21 @@ One of the many nice features of TanStack Store is the ability to derive state f
Let's check this out by doubling the count using derived state.
```tsx
-import { useStore } from "@tanstack/react-store";
-import { Store, Derived } from "@tanstack/store";
-import "./App.css";
+import { useStore } from '@tanstack/react-store'
+import { Store, Derived } from '@tanstack/store'
+import './App.css'
-const countStore = new Store(0);
+const countStore = new Store(0)
const doubledStore = new Derived({
fn: () => countStore.state * 2,
deps: [countStore],
-});
-doubledStore.mount();
+})
+doubledStore.mount()
function App() {
- const count = useStore(countStore);
- const doubledCount = useStore(doubledStore);
+ const count = useStore(countStore)
+ const doubledCount = useStore(doubledStore)
return (