|
| 1 | +import json |
| 2 | +from typing import TYPE_CHECKING, Any, Optional, Type |
| 3 | + |
| 4 | +import openai |
| 5 | +from openai import OpenAI |
| 6 | +from typing_extensions import override |
| 7 | + |
| 8 | +from askui.logger import logger |
| 9 | +from askui.models.exceptions import QueryNoResponseError |
| 10 | +from askui.models.models import GetModel |
| 11 | +from askui.models.shared.prompts import SYSTEM_PROMPT_GET |
| 12 | +from askui.models.types.response_schemas import ResponseSchema, to_response_schema |
| 13 | +from askui.utils.image_utils import ImageSource |
| 14 | + |
| 15 | +from .settings import OpenRouterSettings |
| 16 | + |
| 17 | +if TYPE_CHECKING: |
| 18 | + from openai.types.chat.completion_create_params import ResponseFormat |
| 19 | + |
| 20 | + |
| 21 | +def _clean_schema_refs(schema: dict[str, Any] | list[Any]) -> None: |
| 22 | + """Remove title fields that are at the same level as $ref fields as they are not supported by OpenAI.""" # noqa: E501 |
| 23 | + if isinstance(schema, dict): |
| 24 | + if "$ref" in schema and "title" in schema: |
| 25 | + del schema["title"] |
| 26 | + for value in schema.values(): |
| 27 | + if isinstance(value, (dict, list)): |
| 28 | + _clean_schema_refs(value) |
| 29 | + elif isinstance(schema, list): |
| 30 | + for item in schema: |
| 31 | + if isinstance(item, (dict, list)): |
| 32 | + _clean_schema_refs(item) |
| 33 | + |
| 34 | + |
| 35 | +class OpenRouterModel(GetModel): |
| 36 | + """ |
| 37 | + This class implements the GetModel interface for the OpenRouter API. |
| 38 | +
|
| 39 | + Args: |
| 40 | + settings (OpenRouterSettings): The settings for the OpenRouter model. |
| 41 | +
|
| 42 | + Example: |
| 43 | + ```python |
| 44 | + from askui import VisionAgent |
| 45 | + from askui.models import ( |
| 46 | + OpenRouterModel, |
| 47 | + OpenRouterSettings, |
| 48 | + ModelRegistry, |
| 49 | + ) |
| 50 | +
|
| 51 | +
|
| 52 | + # Register OpenRouter model in the registry |
| 53 | + custom_models: ModelRegistry = { |
| 54 | + "my-custom-model": OpenRouterGetModel( |
| 55 | + OpenRouterSettings( |
| 56 | + model="anthropic/claude-opus-4", |
| 57 | + ) |
| 58 | + ), |
| 59 | + } |
| 60 | +
|
| 61 | + with VisionAgent(models=custom_models, model={"get":"my-custom-model"}) as agent: |
| 62 | + result = agent.get("What is the main heading on the screen?") |
| 63 | + print(result) |
| 64 | + ``` |
| 65 | + """ # noqa: E501 |
| 66 | + |
| 67 | + def __init__( |
| 68 | + self, |
| 69 | + settings: OpenRouterSettings | None = None, |
| 70 | + client: Optional[OpenAI] = None, |
| 71 | + ): |
| 72 | + self._settings = settings or OpenRouterSettings() |
| 73 | + |
| 74 | + self._client = ( |
| 75 | + client |
| 76 | + if client is not None |
| 77 | + else OpenAI( |
| 78 | + api_key=self._settings.open_router_api_key.get_secret_value(), |
| 79 | + base_url=str(self._settings.base_url), |
| 80 | + ) |
| 81 | + ) |
| 82 | + |
| 83 | + def _predict( |
| 84 | + self, |
| 85 | + image_url: str, |
| 86 | + instruction: str, |
| 87 | + prompt: str, |
| 88 | + response_schema: type[ResponseSchema] | None, |
| 89 | + ) -> str | None | ResponseSchema: |
| 90 | + extra_body: dict[str, object] = {} |
| 91 | + |
| 92 | + if len(self._settings.models) > 0: |
| 93 | + extra_body["models"] = self._settings.models |
| 94 | + |
| 95 | + _response_schema = ( |
| 96 | + to_response_schema(response_schema) if response_schema else None |
| 97 | + ) |
| 98 | + |
| 99 | + response_format: openai.NotGiven | ResponseFormat = openai.NOT_GIVEN |
| 100 | + if _response_schema is not None: |
| 101 | + extra_body["provider"] = {"require_parameters": True} |
| 102 | + schema = _response_schema.model_json_schema() |
| 103 | + _clean_schema_refs(schema) |
| 104 | + |
| 105 | + defs = schema.pop("$defs", None) |
| 106 | + schema_response_wrapper = { |
| 107 | + "type": "object", |
| 108 | + "properties": {"response": schema}, |
| 109 | + "additionalProperties": False, |
| 110 | + "required": ["response"], |
| 111 | + } |
| 112 | + if defs: |
| 113 | + schema_response_wrapper["$defs"] = defs |
| 114 | + response_format = { |
| 115 | + "type": "json_schema", |
| 116 | + "json_schema": { |
| 117 | + "name": "user_json_schema", |
| 118 | + "schema": schema_response_wrapper, |
| 119 | + "strict": True, |
| 120 | + }, |
| 121 | + } |
| 122 | + |
| 123 | + chat_completion = self._client.chat.completions.create( |
| 124 | + model=self._settings.model, |
| 125 | + extra_body=extra_body, |
| 126 | + response_format=response_format, |
| 127 | + messages=[ |
| 128 | + { |
| 129 | + "role": "user", |
| 130 | + "content": [ |
| 131 | + { |
| 132 | + "type": "image_url", |
| 133 | + "image_url": { |
| 134 | + "url": image_url, |
| 135 | + }, |
| 136 | + }, |
| 137 | + {"type": "text", "text": prompt + instruction}, |
| 138 | + ], |
| 139 | + } |
| 140 | + ], |
| 141 | + stream=False, |
| 142 | + top_p=self._settings.chat_completions_create_settings.top_p, |
| 143 | + temperature=self._settings.chat_completions_create_settings.temperature, |
| 144 | + max_tokens=self._settings.chat_completions_create_settings.max_tokens, |
| 145 | + seed=self._settings.chat_completions_create_settings.seed, |
| 146 | + stop=self._settings.chat_completions_create_settings.stop, |
| 147 | + frequency_penalty=self._settings.chat_completions_create_settings.frequency_penalty, |
| 148 | + presence_penalty=self._settings.chat_completions_create_settings.presence_penalty, |
| 149 | + ) |
| 150 | + |
| 151 | + model_response = chat_completion.choices[0].message.content |
| 152 | + |
| 153 | + if _response_schema is not None and model_response is not None: |
| 154 | + try: |
| 155 | + response_json = json.loads(model_response) |
| 156 | + except json.JSONDecodeError: |
| 157 | + error_msg = f"Expected JSON, but model {self._settings.model} returned: {model_response}" # noqa: E501 |
| 158 | + logger.error(error_msg) |
| 159 | + raise ValueError(error_msg) from None |
| 160 | + |
| 161 | + validated_response = _response_schema.model_validate( |
| 162 | + response_json["response"] |
| 163 | + ) |
| 164 | + return validated_response.root |
| 165 | + |
| 166 | + return model_response |
| 167 | + |
| 168 | + @override |
| 169 | + def get( |
| 170 | + self, |
| 171 | + query: str, |
| 172 | + image: ImageSource, |
| 173 | + response_schema: Type[ResponseSchema] | None, |
| 174 | + model_choice: str, |
| 175 | + ) -> ResponseSchema | str: |
| 176 | + response = self._predict( |
| 177 | + image_url=image.to_data_url(), |
| 178 | + instruction=query, |
| 179 | + prompt=SYSTEM_PROMPT_GET, |
| 180 | + response_schema=response_schema, |
| 181 | + ) |
| 182 | + if response is None: |
| 183 | + error_msg = f'No response from model "{model_choice}" to query: "{query}"' |
| 184 | + raise QueryNoResponseError(error_msg, query) |
| 185 | + return response |
0 commit comments