Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions src/askui/models/anthropic/messages_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
TextBlockParam,
ThinkingConfigParam,
ToolChoiceParam,
ToolResultBlockParam,
ToolUseBlockParam,
)
from askui.models.shared.messages_api import MessagesApi
Expand All @@ -52,8 +53,9 @@ def from_content_block(block: ContentBlockParam) -> BetaContentBlockParam:
"""Convert an internal content block to an Anthropic API-compatible dict.

Uses `model_dump()` to produce plain dicts compatible with Anthropic's
TypedDicts. Strips ``visual_representation`` from `ToolUseBlockParam`
as it is not accepted by the API.
TypedDicts. Strips internal-only fields that are not accepted by the API:
- ``visual_representation`` from `ToolUseBlockParam`
- ``error_type`` from `ToolResultBlockParam`
"""
if isinstance(block, ToolUseBlockParam):
# visual_representation is an internal field (perceptual hash for cache
Expand All @@ -63,6 +65,13 @@ def from_content_block(block: ContentBlockParam) -> BetaContentBlockParam:
"BetaContentBlockParam",
block.model_dump(exclude={"visual_representation"}),
)
if isinstance(block, ToolResultBlockParam):
# error_type is an internal field for unfixable error detection that
# does not exist in the Anthropic API schema.
return cast(
"BetaContentBlockParam",
block.model_dump(exclude={"error_type"}),
)
return cast("BetaContentBlockParam", block.model_dump())


Expand Down
1 change: 1 addition & 0 deletions src/askui/models/shared/agent_message_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ class ToolResultBlockParam(BaseModel):
cache_control: CacheControlEphemeralParam | None = None
content: str | list[TextBlockParam | ImageBlockParam]
is_error: bool = False
error_type: str | None = None


class ToolUseBlockParam(BaseModel):
Expand Down
83 changes: 64 additions & 19 deletions src/askui/models/shared/conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@
from askui.model_providers.detection_provider import DetectionProvider
from askui.model_providers.image_qa_provider import ImageQAProvider
from askui.model_providers.vlm_provider import VlmProvider
from askui.models.shared.agent_message_param import MessageParam
from askui.models.shared.agent_message_param import (
MessageParam,
ToolResultBlockParam,
)
from askui.models.shared.settings import ActSettings
from askui.models.shared.tools import ToolCollection
from askui.models.shared.truncation_strategies import (
Expand Down Expand Up @@ -299,19 +302,20 @@ def _execute_step(self) -> bool:
self._add_message(message)

# 3. Execute tool calls if applicable
continue_loop = False
tool_result_message = None
if result.messages_to_add:
last_message = result.messages_to_add[-1]
tool_result_message = self._execute_tools_if_present(last_message)
if tool_result_message:
self._add_message(tool_result_message)
continue_loop = True # we always continue after a tool was called

# 4. Check if conversation should continue and switch speaker if necessary
# Note:_handle_continue_conversation must always be called (not short-circuited)
# because it has side effects (e.g., triggering speaker switches).
status_continue = self._handle_continue_conversation(result)
continue_loop = continue_loop or status_continue
continue_loop = self._handle_continue_conversation(result, tool_result_message)
if result.status == "switch_speaker" and result.next_speaker:
self.switch_speaker(
result.next_speaker,
speaker_context=result.speaker_context,
)

self._on_step_end(self._step_index, result)
self._step_index += 1
Expand Down Expand Up @@ -376,30 +380,71 @@ def _add_message(self, message: MessageParam) -> None:
)

@tracer.start_as_current_span("_handle_continue_conversation")
def _handle_continue_conversation(self, result: SpeakerResult) -> bool:
def _handle_continue_conversation(
self, result: SpeakerResult, tool_result_message: MessageParam | None
) -> bool:
"""Handle speaker result status and determine if loop should continue.

Side effects (logging, speaker switches) always run regardless of
whether tools were called. After that, tool execution overrides the
status-based decision: if tools ran we always continue.

Args:
result: Result from speaker
tool_result_message: Tool result message if tools were executed

Returns:
True if loop should continue, False if done
"""
if result.status == "done":
logger.info("Conversation completed successfully")
return False
if result.status == "failed":
status_continue = False
elif result.status == "failed":
logger.error("Conversation failed")
return False
if result.status == "switch_speaker":
if result.next_speaker:
self.switch_speaker(
result.next_speaker,
speaker_context=result.speaker_context,
)
status_continue = False
elif result.status == "switch_speaker":
status_continue = True
else:
# status == "continue"
status_continue = True

if tool_result_message:
if self._has_unfixable_error(tool_result_message):
return False
# we always continue after a tool was called
return True
# status == "continue"
return True
return status_continue

def _has_unfixable_error(self, tool_result_message: MessageParam) -> bool:
"""Check if a tool result message contains an unfixable error.

An error is unfixable if its ``error_type`` appears in
``self.settings.unfixable_errors``.

Args:
tool_result_message: The message containing tool results.

Returns:
``True`` if an unfixable error was found, ``False`` otherwise.
"""
if not self.settings.unfixable_errors:
return False

if isinstance(tool_result_message.content, str):
return False

unfixable_set = set(self.settings.unfixable_errors)
for block in tool_result_message.content:
if (
isinstance(block, ToolResultBlockParam)
and block.is_error
and block.error_type is not None
and block.error_type in unfixable_set
):
msg = f"Unfixable error detected: {block.error_type}"
logger.error(msg)
return True
return False

def _switch_speaker_if_needed(self) -> None:
"""Switch to default speaker if current one cannot handle."""
Expand Down
8 changes: 8 additions & 0 deletions src/askui/models/shared/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,12 +84,20 @@ class ActSettings(BaseModel):
Args:
messages (MessageSettings): Settings for message creation including
max tokens, temperature, and system prompt configuration.
max_steps (int | None): Maximum number of conversation steps before
the agent stops. If ``None``, no step limit is imposed.
unfixable_errors (list[str]): Exception type names that should
immediately stop the conversation loop when encountered as tool
errors. Matched against ``ToolResultBlockParam.error_type``.
Example: ``["ConnectionRefusedError", "FileNotFoundError"]``.
Defaults to an empty list (all tool errors are retryable).
"""

model_config = ConfigDict(arbitrary_types_allowed=True)

messages: MessageSettings = Field(default_factory=MessageSettings)
max_steps: int | None = None
unfixable_errors: list[str] = Field(default_factory=list)


class GetSettings(BaseModel):
Expand Down
3 changes: 3 additions & 0 deletions src/askui/models/shared/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ def _create_tool_result_block_param_for_playwright_error(
content="\n\n".join(lines),
is_error=True,
tool_use_id=param.id,
error_type=type(error).__name__,
)


Expand Down Expand Up @@ -552,6 +553,7 @@ def _run_regular_tool(
content=f"Tool raised an unexpected error: {error_message}",
is_error=True,
tool_use_id=tool_use_block_param.id,
error_type=type(e).__name__,
)

async def _call_mcp_tool(
Expand Down Expand Up @@ -598,6 +600,7 @@ def _run_mcp_tool(
content=str(e),
is_error=True,
tool_use_id=tool_use_block_param.id,
error_type=type(e).__name__,
)

def __add__(self, other: "ToolCollection") -> "ToolCollection":
Expand Down
Loading