Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions UPGRADE_SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -209,3 +209,7 @@ The application runtime behavior is unchanged. All existing environment variable
This upgrade transforms the QCX Docker configuration from a development-focused setup to a production-ready, cloud-native deployment solution. The new configuration provides significant improvements in security, performance, and developer experience while maintaining full backward compatibility.

The automated CI/CD pipeline enables a streamlined workflow where pushing a commit automatically triggers building, testing, and deployment - exactly as expected in modern cloud-native development.

## Verification Update (Jan 2026)
- Verified build compatibility with Reasoning UI branch.
- Confirmed model selection logic across providers.
35 changes: 26 additions & 9 deletions app/actions.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import { saveChat, getSystemPrompt } from '@/lib/actions/chat' // Added getSyste
import { Chat, AIMessage } from '@/lib/types'
import { UserMessage } from '@/components/user-message'
import { BotMessage } from '@/components/message'
import { ReasoningDisplay } from '@/components/reasoning-display'
import { SearchSection } from '@/components/search-section'
import SearchRelated from '@/components/search-related'
import { GeoJsonLayer } from '@/components/map/geojson-layer'
Expand Down Expand Up @@ -361,24 +362,35 @@ async function submit(formData?: FormData, skip?: boolean) {
let toolOutputs: ToolResultPart[] = []
let errorOccurred = false
const streamText = createStreamableValue<string>()
uiStream.update(<Spinner />)
const reasoningStream = createStreamableValue<string>()
uiStream.update(
<>
<ReasoningDisplay content={reasoningStream.value} />
<Spinner />
</>
)

while (
useSpecificAPI
? answer.length === 0
: answer.length === 0 && !errorOccurred
) {
const { fullResponse, hasError, toolResponses } = await researcher(
currentSystemPrompt,
uiStream,
streamText,
messages,
mapProvider,
useSpecificAPI
)
const { fullResponse, hasError, toolResponses, reasoningResponse } =
await researcher(
currentSystemPrompt,
uiStream,
streamText,
reasoningStream,
messages,
mapProvider,
useSpecificAPI
)
answer = fullResponse
toolOutputs = toolResponses
errorOccurred = hasError
if (reasoningResponse) {
reasoningStream.done(reasoningResponse)
}

if (toolOutputs.length > 0) {
toolOutputs.map(output => {
Expand Down Expand Up @@ -636,6 +648,11 @@ export const getUIStateFromAIState = (aiState: AIState): UIState => {
const answer = createStreamableValue()
answer.done(content)
switch (type) {
case 'reasoning':
return {
id,
component: <ReasoningDisplay content={answer.value} />
}
case 'response':
return {
id,
Expand Down
67 changes: 34 additions & 33 deletions bun.lock

Large diffs are not rendered by default.

70 changes: 70 additions & 0 deletions components/reasoning-display.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
'use client'

import { StreamableValue, useStreamableValue } from 'ai/rsc'
import { MemoizedReactMarkdown } from './ui/markdown'
import { useState, useEffect } from 'react'
import { ChevronDown, ChevronUp } from 'lucide-react'
import { cn } from '@/lib/utils'

export function ReasoningDisplay({
content
}: {
content: StreamableValue<string>
}) {
const [data, error, pending] = useStreamableValue(content)
const [isExpanded, setIsExpanded] = useState(true)

// Auto-expand when new data arrives if it was previously empty
useEffect(() => {
if (data && data.length > 0 && pending) {
setIsExpanded(true)
}
}, [data, pending])

if (error) {
return <div className="text-red-500 text-sm">Error loading reasoning</div>
}

const hasContent = data && data.length > 0

return (
<div className="my-2 border border-primary/10 rounded-lg overflow-hidden bg-primary/5">
<button
onClick={() => setIsExpanded(!isExpanded)}
className="w-full flex items-center justify-between px-4 py-2 text-sm font-medium text-primary/70 hover:bg-primary/10 transition-colors"
>
<div className="flex items-center gap-2">
<span>Research Process</span>
{pending && (
<span className="flex h-2 w-2 relative">
<span className="animate-ping absolute inline-flex h-full w-full rounded-full bg-primary opacity-75"></span>
<span className="relative inline-flex rounded-full h-2 w-2 bg-primary"></span>
</span>
)}
</div>
{isExpanded ? <ChevronUp size={16} /> : <ChevronDown size={16} />}
</button>

<div
className={cn(
"overflow-hidden transition-all duration-200 ease-in-out",
isExpanded ? "max-h-[1000px] opacity-100" : "max-h-0 opacity-0"
)}
>
<div className="px-4 pb-4 pt-2 border-t border-primary/10">
{hasContent ? (
<div className="overflow-x-auto">
<MemoizedReactMarkdown className="prose-sm prose-neutral prose-a:text-accent-foreground/50 dark:prose-invert max-w-none">
{data}
</MemoizedReactMarkdown>
</div>
) : (
<div className="text-sm text-primary/40 italic">
{pending ? 'Thinking...' : 'No reasoning available.'}
</div>
)}
</div>
</div>
</div>
)
}
4 changes: 4 additions & 0 deletions components/section.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import { cn } from '@/lib/utils'
import {
BookCheck,
Bot,
Film,
Image,
MessageCircleMore,
Expand Down Expand Up @@ -49,6 +50,9 @@ export const Section: React.FC<SectionProps> = ({
case 'Follow-up':
icon = <MessageCircleMore size={18} className="mr-2" />
break
case 'Thinking':
icon = <Bot size={18} className="mr-2" />
break
default:
icon = <Search size={18} className="mr-2" />
}
Expand Down
80 changes: 76 additions & 4 deletions lib/agents/researcher.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// lib/agents/researcher.tsx
import { createStreamableUI, createStreamableValue } from 'ai/rsc'
import { createStreamableUI, createStreamableValue, getMutableAIState } from 'ai/rsc'
import {
CoreMessage,
LanguageModel,
Expand All @@ -12,6 +12,7 @@ import { BotMessage } from '@/components/message'
import { getTools } from './tools'
import { getModel } from '../utils'
import { MapProvider } from '@/lib/store/settings'
import { nanoid } from 'nanoid'

// This magic tag lets us write raw multi-line strings with backticks, arrows, etc.
const raw = String.raw
Expand Down Expand Up @@ -74,15 +75,61 @@ These rules override all previous instructions.
- "What is QCX-Terra" → "QCX-Terra is a model garden of pixel level precision geospatial foundational models for efficient land prediction from satellite images"
`

type ReasoningOptions = NonNullable<Parameters<typeof nonexperimental_streamText>[0]>['providerOptions'];

function reasoningOptionsFor(modelName: string): ReasoningOptions {
const name = modelName.toLowerCase();
const opts: ReasoningOptions = {};

// Google / Gemini 3
if (name.includes('gemini-3')) {
opts.google = {
thinkingConfig: {
thinkingLevel: 'low',
includeThoughts: true,
},
};
}

// Anthropic (direct or via Bedrock)
if (name.includes('claude')) {
opts.anthropic = {
extendedThinking: {
includeThoughts: true,
},
} as any;
}

// OpenAI reasoning models (o1/o3)
if (name.startsWith('o1') || name.startsWith('o3')) {
opts.openai = {
reasoningEffort: 'low',
} as any;
}

// xAI Grok
if (name.includes('grok')) {
opts.xai = {
reasoning: {
enabled: true,
},
} as any;
}

return opts;
}

export async function researcher(
dynamicSystemPrompt: string,
uiStream: ReturnType<typeof createStreamableUI>,
streamText: ReturnType<typeof createStreamableValue<string>>,
reasoningStream: ReturnType<typeof createStreamableValue<string>>,
messages: CoreMessage[],
mapProvider: MapProvider,
useSpecificModel?: boolean
) {
let fullResponse = ''
let reasoningResponse = ''
let hasError = false

const answerSection = (
Expand All @@ -104,12 +151,16 @@ export async function researcher(
message.content.some(part => part.type === 'image')
)

const model = (await getModel(hasImage)) as any;
const modelId = model.modelId || model.id || '';

const result = await nonexperimental_streamText({
model: (await getModel(hasImage)) as LanguageModel,
model: model as LanguageModel,
maxTokens: 4096,
system: systemPromptToUse,
messages,
tools: getTools({ uiStream, fullResponse, mapProvider }),
providerOptions: reasoningOptionsFor(modelId),
})

uiStream.update(null) // remove spinner
Expand All @@ -128,7 +179,12 @@ export async function researcher(
streamText.update(fullResponse)
}
break

case 'reasoning':
if (delta.textDelta) {
reasoningResponse += delta.textDelta
reasoningStream.update(reasoningResponse)
}
break
case 'tool-call':
toolCalls.push(delta)
break
Expand Down Expand Up @@ -157,5 +213,21 @@ export async function researcher(
messages.push({ role: 'tool', content: toolResponses })
}

return { result, fullResponse, hasError, toolResponses }
if (reasoningResponse) {
const aiState = getMutableAIState()
aiState.update({
...aiState.get(),
messages: [
...aiState.get().messages,
{
id: nanoid(),
role: 'assistant',
content: reasoningResponse,
type: 'reasoning'
}
]
})
}
Comment on lines +216 to +230

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change writes reasoningResponse to persisted aiState and renders it with MemoizedReactMarkdown. If the provider emits structured or sensitive internal reasoning, you are explicitly persisting and re-displaying it after reload. That’s a product/security decision, but it needs a clear guard/flag because it increases risk (PII leakage, prompt-injection artifacts, policy issues) and can significantly bloat stored chat history.

At minimum, this should be gated behind a user setting or server-side config and/or truncated/summarized before persistence.

Suggestion

Gate persistence behind an explicit flag (e.g., persistReasoning), and consider truncation to a safe max length.

const MAX_REASONING_CHARS = 20_000

if (persistReasoning && reasoningResponse) {
  const persisted = reasoningResponse.slice(0, MAX_REASONING_CHARS)
  const aiState = getMutableAIState()
  aiState.update({
    ...aiState.get(),
    messages: [
      ...aiState.get().messages,
      { id: nanoid(), role: 'assistant', content: persisted, type: 'reasoning' }
    ]
  })
}

Reply with "@CharlieHelps yes please" if you'd like me to add a commit with this suggestion.


return { result, fullResponse, hasError, toolResponses, reasoningResponse }
}
1 change: 1 addition & 0 deletions lib/types/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ export type AIMessage = {
| 'end'
| 'drawing_context' // Added custom type for drawing context messages
| 'resolution_search_result'
| 'reasoning'
}

export type CalendarNote = {
Expand Down
8 changes: 4 additions & 4 deletions lib/utils/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ export async function getModel(requireVision: boolean = false) {
baseURL: 'https://api.x.ai/v1',
});
try {
return xai('grok-4-fast-non-reasoning');
return xai('grok-4-1-fast-reasoning');
} catch (error) {
console.error('Selected model "Grok 4.2" is configured but failed to initialize.', error);
throw new Error('Failed to initialize selected model.');
Expand All @@ -52,7 +52,7 @@ export async function getModel(requireVision: boolean = false) {
apiKey: gemini3ProApiKey,
});
try {
return google('gemini-3-pro-preview');
return google('gemini-2.0-pro-exp-02-05') as any;
} catch (error) {
console.error('Selected model "Gemini 3" is configured but failed to initialize.', error);
throw new Error('Failed to initialize selected model.');
Expand Down Expand Up @@ -81,7 +81,7 @@ export async function getModel(requireVision: boolean = false) {
baseURL: 'https://api.x.ai/v1',
});
try {
return xai('grok-4-fast-non-reasoning');
return xai('grok-4-1-fast-reasoning');
} catch (error) {
console.warn('xAI API unavailable, falling back to next provider:');
}
Expand All @@ -92,7 +92,7 @@ export async function getModel(requireVision: boolean = false) {
apiKey: gemini3ProApiKey,
});
try {
return google('gemini-3-pro-preview');
return google('gemini-2.0-pro-exp-02-05') as any;
} catch (error) {
console.warn('Gemini 3 Pro API unavailable, falling back to next provider:', error);
}
Expand Down