From 87774397a190d9b339e5f0885c21a2391fa45d45 Mon Sep 17 00:00:00 2001 From: jason-lee08 Date: Fri, 21 Mar 2025 01:12:50 -0700 Subject: [PATCH 01/14] Adding progress... --- .../model/examples/diversity_testbench.ipynb | 1864 ++++++++++++ src/ember/core/utils/embedding_utils.py | 22 + src/ember/core/utils/eval/evaluators.py | 76 + src/ember/examples/diversity_testbench.ipynb | 2545 +++++++++++++++++ 4 files changed, 4507 insertions(+) create mode 100644 src/ember/core/registry/model/examples/diversity_testbench.ipynb create mode 100644 src/ember/examples/diversity_testbench.ipynb diff --git a/src/ember/core/registry/model/examples/diversity_testbench.ipynb b/src/ember/core/registry/model/examples/diversity_testbench.ipynb new file mode 100644 index 00000000..6e7a12da --- /dev/null +++ b/src/ember/core/registry/model/examples/diversity_testbench.ipynb @@ -0,0 +1,1864 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Diversity Testbench\n", + "\n", + "---\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Ember Package Testing (WIP)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setup Dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import logging, sys, os\n", + "from typing import Dict, Any, List\n", + "\n", + "logging.basicConfig(level=logging.ERROR)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "openai_key = os.getenv(\"OPENAI_API_KEY\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/root/ember/jared/ember/src/ember/core/registry/model/examples\n" + ] + } + ], + "source": [ + "# fixing dependencies if current path is /src/ember/examples/diversity_testbench.ipynb\n", + "target_dir = 'src/ember/examples'\n", + "if os.getcwd()[-18:] == target_dir:\n", + " os.chdir('../../..')\n", + "print(os.getcwd())\n", + "\n", + "project_root = os.path.abspath(os.path.join(os.getcwd(), \"../../..\"))\n", + "if project_root not in sys.path:\n", + " sys.path.insert(0, project_root)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/root/ember/jared/ember/src/ember/core/registry/model/examples\n" + ] + } + ], + "source": [ + "!echo $PWD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "NOTE: things below this are to install required dependencies (only do this the venv)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "# %pip install -q -e .\n", + "# %pip install -q google-generativeai==0.7.2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Ember Repo Loads (WIP)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "# from ember.core.registry.model.model_module.lm import LMModule, LMModuleConfig\n", + "from ember.core.registry.model.config.settings import initialize_registry\n", + "from ember.core.registry.model.base.services.model_service import ModelService\n", + "from ember.core.registry.model.base.schemas.model_info import ModelInfo\n", + "from ember.core.registry.model.base.schemas.cost import ModelCost, RateLimit\n", + "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", + "\n", + "from ember.core.registry.model import load_model, ChatResponse\n", + "from ember.core.registry.model.base.services.model_service import ModelService\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:55:38,516 [DEBUG] ConfigManager: Loading configuration...\n", + "2025-03-21 00:55:38,523 [DEBUG] ConfigManager: Configuration loaded successfully\n", + "2025-03-21 00:55:38,524 [INFO] ember.core.registry.model.initialization: Execute model discovery (timeout: 30 seconds per provider, running in parallel)\n", + "2025-03-21 00:55:38,527 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", + "2025-03-21 00:55:38,542 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", + "2025-03-21 00:55:38,572 [DEBUG] ember.core.registry.model.base.registry.discovery: OPENAI_API_KEY found, initialized OpenAIDiscovery successfully\n", + "2025-03-21 00:55:38,575 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", + "2025-03-21 00:55:38,577 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", + "2025-03-21 00:55:38,588 [DEBUG] ember.core.registry.model.base.registry.discovery: ANTHROPIC_API_KEY found, initialized AnthropicDiscovery successfully\n", + "2025-03-21 00:55:38,590 [DEBUG] ember.core.registry.model.base.registry.discovery: GOOGLE_API_KEY found, initialized DeepmindDiscovery successfully\n", + "2025-03-21 00:55:38,593 [INFO] ember.core.registry.model.initialization: Initiating model discovery via ModelDiscoveryService\n", + "2025-03-21 00:55:38,608 [DEBUG] openai._base_client: Request options: {'method': 'get', 'url': '/models', 'post_parser': ._parser at 0x7fbfe4e29940>, 'json_data': None}\n", + "2025-03-21 00:55:38,615 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Starting Anthropic model fetch via REST API...\n", + "2025-03-21 00:55:38,621 [DEBUG] openai._base_client: Sending HTTP Request: GET https://api.openai.com/v1/models\n", + "2025-03-21 00:55:38,674 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Calling Anthropic REST API: https://api.anthropic.com/v1/models with timeout=(2,5)\n", + "2025-03-21 00:55:38,676 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=5.0 socket_options=None\n", + "2025-03-21 00:55:38,702 [DEBUG] urllib3.connectionpool: Starting new HTTPS connection (1): api.anthropic.com:443\n", + "2025-03-21 00:55:38,726 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 00:55:38,727 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=5.0\n", + "2025-03-21 00:55:38,779 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 00:55:38,782 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:55:38,784 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:55:38,786 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:55:38,789 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:55:38,792 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:55:38,866 [INFO] ember.core.registry.model.base.registry.discovery: Provider DeepmindDiscovery completed in 0.25s\n", + "2025-03-21 00:55:38,893 [DEBUG] urllib3.connectionpool: https://api.anthropic.com:443 \"GET /v1/models HTTP/1.1\" 401 86\n", + "2025-03-21 00:55:38,895 [ERROR] ember.core.registry.model.providers.anthropic.anthropic_discovery: Error fetching Anthropic models via REST API: 401 Client Error: Unauthorized for url: https://api.anthropic.com/v1/models\n", + "2025-03-21 00:55:38,897 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Using fallback models due to API request error\n", + "2025-03-21 00:55:38,899 [INFO] ember.core.registry.model.base.registry.discovery: Provider AnthropicDiscovery completed in 0.28s\n", + "2025-03-21 00:55:39,163 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:55:39 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'openai-version', b'2020-10-01'), (b'x-request-id', b'1e6dc2bf4213f80b492956c74129745f'), (b'openai-processing-ms', b'353'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=lGJr7UVxgaDN4HBlLITX3hjGQeXqf8MmO7ENb75MuR4-1742543739-1.0.1.1-eD8tCUx18oWut6fWHu02UdcjYqC46qA6wHnAqfb.G1O88xdXLLSnptXbqkEBF3NSfZxvpT4_vB1RRzNOL6jZ_VJ08qnkyVlZtfQePLLlvgg; path=/; expires=Fri, 21-Mar-25 08:25:39 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=jISO5ZxiQaGeAfqOUQOdrcmVaCqM3RDIsOf_LKzHB5A-1742543739496-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be3618b64eb35-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:55:39,168 [INFO] httpx: HTTP Request: GET https://api.openai.com/v1/models \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:55:39,170 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:55:39,176 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:55:39,179 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:55:39,181 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:55:39,184 [DEBUG] openai._base_client: HTTP Response: GET https://api.openai.com/v1/models \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 07:55:39 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('openai-version', '2020-10-01'), ('x-request-id', '1e6dc2bf4213f80b492956c74129745f'), ('openai-processing-ms', '353'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=lGJr7UVxgaDN4HBlLITX3hjGQeXqf8MmO7ENb75MuR4-1742543739-1.0.1.1-eD8tCUx18oWut6fWHu02UdcjYqC46qA6wHnAqfb.G1O88xdXLLSnptXbqkEBF3NSfZxvpT4_vB1RRzNOL6jZ_VJ08qnkyVlZtfQePLLlvgg; path=/; expires=Fri, 21-Mar-25 08:25:39 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=jISO5ZxiQaGeAfqOUQOdrcmVaCqM3RDIsOf_LKzHB5A-1742543739496-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923be3618b64eb35-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:55:39,190 [DEBUG] openai._base_client: request_id: 1e6dc2bf4213f80b492956c74129745f\n", + "2025-03-21 00:55:39,209 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Fetched 65 models from OpenAI API\n", + "2025-03-21 00:55:39,213 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Filtered to 43 relevant models\n", + "2025-03-21 00:55:39,218 [INFO] ember.core.registry.model.base.registry.discovery: Provider OpenAIDiscovery completed in 0.62s\n", + "2025-03-21 00:55:39,222 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 43 models from OpenAIDiscovery\n", + "2025-03-21 00:55:39,232 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 5 models from AnthropicDiscovery\n", + "2025-03-21 00:55:39,235 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 32 models from DeepmindDiscovery\n", + "2025-03-21 00:55:39,239 [INFO] ember.core.registry.model.base.registry.discovery: Discovered 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 00:55:39,245 [DEBUG] ember.core.registry.model.initialization: Raw discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 00:55:39,254 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-transcribe discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,262 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-transcribe\n", + "2025-03-21 00:55:39,268 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,272 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-12-17\n", + "2025-03-21 00:55:39,275 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-3 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,279 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-3\n", + "2025-03-21 00:55:39,281 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-2 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,283 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-2\n", + "2025-03-21 00:55:39,287 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,288 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-10-01\n", + "2025-03-21 00:55:39,290 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,292 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-10-01\n", + "2025-03-21 00:55:39,293 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,294 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview\n", + "2025-03-21 00:55:39,298 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-large discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,299 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-large\n", + "2025-03-21 00:55:39,301 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,302 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4\n", + "2025-03-21 00:55:39,305 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-05-13 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,308 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-05-13\n", + "2025-03-21 00:55:39,310 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,311 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview\n", + "2025-03-21 00:55:39,312 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,314 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview\n", + "2025-03-21 00:55:39,316 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct-0914 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,318 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct-0914\n", + "2025-03-21 00:55:39,324 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,327 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview\n", + "2025-03-21 00:55:39,328 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-1106 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,330 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-1106\n", + "2025-03-21 00:55:39,331 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,332 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview\n", + "2025-03-21 00:55:39,334 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,335 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo\n", + "2025-03-21 00:55:39,337 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,340 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-12-17\n", + "2025-03-21 00:55:39,342 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,346 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct\n", + "2025-03-21 00:55:39,347 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,349 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo\n", + "2025-03-21 00:55:39,350 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,352 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-preview\n", + "2025-03-21 00:55:39,354 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,355 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview-2025-03-11\n", + "2025-03-21 00:55:39,357 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,358 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview\n", + "2025-03-21 00:55:39,360 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-0125 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,362 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-0125\n", + "2025-03-21 00:55:39,363 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-08-06 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,365 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-08-06\n", + "2025-03-21 00:55:39,366 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-2024-04-09 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,369 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-2024-04-09\n", + "2025-03-21 00:55:39,370 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-16k discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,372 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-16k\n", + "2025-03-21 00:55:39,373 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,375 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o\n", + "2025-03-21 00:55:39,377 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,380 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview-2024-12-17\n", + "2025-03-21 00:55:39,381 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-1106-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,383 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-1106-preview\n", + "2025-03-21 00:55:39,385 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-ada-002 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,385 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-ada-002\n", + "2025-03-21 00:55:39,387 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0613 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,392 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0613\n", + "2025-03-21 00:55:39,393 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,395 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview\n", + "2025-03-21 00:55:39,396 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview-2025-02-27 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,398 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview-2025-02-27\n", + "2025-03-21 00:55:39,400 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,402 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview-2025-03-11\n", + "2025-03-21 00:55:39,404 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-11-20 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,406 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-11-20\n", + "2025-03-21 00:55:39,407 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-tts discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,409 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-tts\n", + "2025-03-21 00:55:39,411 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0125-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,412 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0125-preview\n", + "2025-03-21 00:55:39,415 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-transcribe discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,418 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-transcribe\n", + "2025-03-21 00:55:39,420 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-2024-07-18 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,421 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-2024-07-18\n", + "2025-03-21 00:55:39,423 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-small discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,425 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-small\n", + "2025-03-21 00:55:39,426 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,428 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini\n", + "2025-03-21 00:55:39,429 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,430 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview-2024-12-17\n", + "2025-03-21 00:55:39,432 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-sonnet discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,434 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-sonnet\n", + "2025-03-21 00:55:39,435 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-opus discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,437 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-opus\n", + "2025-03-21 00:55:39,438 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-haiku discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,440 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-haiku\n", + "2025-03-21 00:55:39,441 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.5-sonnet discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,444 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.5-sonnet\n", + "2025-03-21 00:55:39,445 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.7-sonnet discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,446 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.7-sonnet\n", + "2025-03-21 00:55:39,448 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.0-pro-vision-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,449 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.0-pro-vision-latest\n", + "2025-03-21 00:55:39,451 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-pro-vision discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,452 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-pro-vision\n", + "2025-03-21 00:55:39,454 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,457 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-latest\n", + "2025-03-21 00:55:39,459 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,461 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-001\n", + "2025-03-21 00:55:39,462 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-002 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,465 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-002\n", + "2025-03-21 00:55:39,469 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,470 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro\n", + "2025-03-21 00:55:39,471 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,473 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-latest\n", + "2025-03-21 00:55:39,475 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,476 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001\n", + "2025-03-21 00:55:39,477 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001-tuning discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,479 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001-tuning\n", + "2025-03-21 00:55:39,480 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,482 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash\n", + "2025-03-21 00:55:39,485 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-002 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,486 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-002\n", + "2025-03-21 00:55:39,487 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,490 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b\n", + "2025-03-21 00:55:39,491 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,494 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-001\n", + "2025-03-21 00:55:39,496 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,498 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-latest\n", + "2025-03-21 00:55:39,502 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0827 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,505 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0827\n", + "2025-03-21 00:55:39,506 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0924 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,508 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0924\n", + "2025-03-21 00:55:39,509 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,512 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp\n", + "2025-03-21 00:55:39,514 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,516 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash\n", + "2025-03-21 00:55:39,521 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,532 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-001\n", + "2025-03-21 00:55:39,536 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp-image-generation discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,539 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp-image-generation\n", + "2025-03-21 00:55:39,540 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,541 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-001\n", + "2025-03-21 00:55:39,543 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,547 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite\n", + "2025-03-21 00:55:39,549 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview-02-05 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,550 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview-02-05\n", + "2025-03-21 00:55:39,551 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,552 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview\n", + "2025-03-21 00:55:39,554 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,557 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp\n", + "2025-03-21 00:55:39,559 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp-02-05 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,560 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp-02-05\n", + "2025-03-21 00:55:39,563 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-exp-1206 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,565 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-exp-1206\n", + "2025-03-21 00:55:39,567 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-01-21 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,569 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-01-21\n", + "2025-03-21 00:55:39,572 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,574 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp\n", + "2025-03-21 00:55:39,576 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-1219 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,578 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-1219\n", + "2025-03-21 00:55:39,581 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/learnlm-1.5-pro-experimental discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,583 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/learnlm-1.5-pro-experimental\n", + "2025-03-21 00:55:39,585 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemma-3-27b-it discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:55:39,587 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemma-3-27b-it\n", + "2025-03-21 00:55:39,589 [DEBUG] ember.core.registry.model.initialization: Merged discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 00:55:39,590 [INFO] ember.core.registry.model.initialization: Registering 80 models from discovery\n", + "2025-03-21 00:55:39,592 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-transcribe (provider: Openai)\n", + "2025-03-21 00:55:39,597 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", + "2025-03-21 00:55:39,599 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", + "2025-03-21 00:55:39,602 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 00:55:39,604 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:55:39,605 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:55:39,611 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-3 (provider: Openai)\n", + "2025-03-21 00:55:39,612 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", + "2025-03-21 00:55:39,613 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", + "2025-03-21 00:55:39,614 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-2 (provider: Openai)\n", + "2025-03-21 00:55:39,615 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", + "2025-03-21 00:55:39,616 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", + "2025-03-21 00:55:39,617 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-10-01 (provider: Openai)\n", + "2025-03-21 00:55:39,618 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", + "2025-03-21 00:55:39,618 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", + "2025-03-21 00:55:39,619 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-10-01 (provider: Openai)\n", + "2025-03-21 00:55:39,623 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", + "2025-03-21 00:55:39,624 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", + "2025-03-21 00:55:39,632 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview (provider: Openai)\n", + "2025-03-21 00:55:39,634 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", + "2025-03-21 00:55:39,635 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", + "2025-03-21 00:55:39,636 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-large (provider: Openai)\n", + "2025-03-21 00:55:39,637 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", + "2025-03-21 00:55:39,638 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", + "2025-03-21 00:55:39,639 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4 (provider: Openai)\n", + "2025-03-21 00:55:39,640 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", + "2025-03-21 00:55:39,640 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", + "2025-03-21 00:55:39,641 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-05-13 (provider: Openai)\n", + "2025-03-21 00:55:39,642 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", + "2025-03-21 00:55:39,643 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", + "2025-03-21 00:55:39,644 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview (provider: Openai)\n", + "2025-03-21 00:55:39,646 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", + "2025-03-21 00:55:39,647 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", + "2025-03-21 00:55:39,650 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview (provider: Openai)\n", + "2025-03-21 00:55:39,653 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", + "2025-03-21 00:55:39,655 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", + "2025-03-21 00:55:39,656 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct-0914 (provider: Openai)\n", + "2025-03-21 00:55:39,657 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", + "2025-03-21 00:55:39,659 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", + "2025-03-21 00:55:39,660 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview (provider: Openai)\n", + "2025-03-21 00:55:39,661 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", + "2025-03-21 00:55:39,662 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", + "2025-03-21 00:55:39,663 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-1106 (provider: Openai)\n", + "2025-03-21 00:55:39,664 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", + "2025-03-21 00:55:39,666 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", + "2025-03-21 00:55:39,668 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview (provider: Openai)\n", + "2025-03-21 00:55:39,669 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", + "2025-03-21 00:55:39,671 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", + "2025-03-21 00:55:39,673 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo (provider: Openai)\n", + "2025-03-21 00:55:39,674 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", + "2025-03-21 00:55:39,676 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", + "2025-03-21 00:55:39,682 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 00:55:39,683 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:55:39,685 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:55:39,686 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct (provider: Openai)\n", + "2025-03-21 00:55:39,687 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", + "2025-03-21 00:55:39,689 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", + "2025-03-21 00:55:39,695 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo (provider: Openai)\n", + "2025-03-21 00:55:39,697 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", + "2025-03-21 00:55:39,698 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", + "2025-03-21 00:55:39,701 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-preview (provider: Openai)\n", + "2025-03-21 00:55:39,704 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", + "2025-03-21 00:55:39,705 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", + "2025-03-21 00:55:39,707 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview-2025-03-11 (provider: Openai)\n", + "2025-03-21 00:55:39,708 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 00:55:39,709 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 00:55:39,711 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview (provider: Openai)\n", + "2025-03-21 00:55:39,712 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", + "2025-03-21 00:55:39,713 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", + "2025-03-21 00:55:39,714 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-0125 (provider: Openai)\n", + "2025-03-21 00:55:39,723 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", + "2025-03-21 00:55:39,725 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", + "2025-03-21 00:55:39,726 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-08-06 (provider: Openai)\n", + "2025-03-21 00:55:39,728 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", + "2025-03-21 00:55:39,730 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", + "2025-03-21 00:55:39,731 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-2024-04-09 (provider: Openai)\n", + "2025-03-21 00:55:39,733 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", + "2025-03-21 00:55:39,734 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", + "2025-03-21 00:55:39,737 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-16k (provider: Openai)\n", + "2025-03-21 00:55:39,738 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", + "2025-03-21 00:55:39,744 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", + "2025-03-21 00:55:39,747 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o (provider: Openai)\n", + "2025-03-21 00:55:39,749 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", + "2025-03-21 00:55:39,750 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", + "2025-03-21 00:55:39,756 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 00:55:39,758 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:55:39,759 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:55:39,762 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-1106-preview (provider: Openai)\n", + "2025-03-21 00:55:39,764 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", + "2025-03-21 00:55:39,765 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", + "2025-03-21 00:55:39,773 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-ada-002 (provider: Openai)\n", + "2025-03-21 00:55:39,777 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", + "2025-03-21 00:55:39,782 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", + "2025-03-21 00:55:39,783 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0613 (provider: Openai)\n", + "2025-03-21 00:55:39,788 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", + "2025-03-21 00:55:39,791 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", + "2025-03-21 00:55:39,792 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview (provider: Openai)\n", + "2025-03-21 00:55:39,793 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", + "2025-03-21 00:55:39,794 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", + "2025-03-21 00:55:39,796 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview-2025-02-27 (provider: Openai)\n", + "2025-03-21 00:55:39,797 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", + "2025-03-21 00:55:39,798 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", + "2025-03-21 00:55:39,799 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview-2025-03-11 (provider: Openai)\n", + "2025-03-21 00:55:39,801 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 00:55:39,803 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 00:55:39,804 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-11-20 (provider: Openai)\n", + "2025-03-21 00:55:39,805 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", + "2025-03-21 00:55:39,806 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", + "2025-03-21 00:55:39,807 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-tts (provider: Openai)\n", + "2025-03-21 00:55:39,810 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", + "2025-03-21 00:55:39,813 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", + "2025-03-21 00:55:39,814 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0125-preview (provider: Openai)\n", + "2025-03-21 00:55:39,816 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", + "2025-03-21 00:55:39,818 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", + "2025-03-21 00:55:39,819 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-transcribe (provider: Openai)\n", + "2025-03-21 00:55:39,820 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", + "2025-03-21 00:55:39,821 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", + "2025-03-21 00:55:39,823 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-2024-07-18 (provider: Openai)\n", + "2025-03-21 00:55:39,824 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", + "2025-03-21 00:55:39,828 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", + "2025-03-21 00:55:39,829 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-small (provider: Openai)\n", + "2025-03-21 00:55:39,831 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", + "2025-03-21 00:55:39,833 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", + "2025-03-21 00:55:39,835 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini (provider: Openai)\n", + "2025-03-21 00:55:39,837 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", + "2025-03-21 00:55:39,838 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", + "2025-03-21 00:55:39,838 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 00:55:39,840 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:55:39,842 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:55:39,848 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-sonnet (provider: Anthropic)\n", + "2025-03-21 00:55:39,850 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", + "2025-03-21 00:55:39,851 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", + "2025-03-21 00:55:39,853 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-opus (provider: Anthropic)\n", + "2025-03-21 00:55:39,854 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", + "2025-03-21 00:55:39,856 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", + "2025-03-21 00:55:39,858 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-haiku (provider: Anthropic)\n", + "2025-03-21 00:55:39,860 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", + "2025-03-21 00:55:39,861 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", + "2025-03-21 00:55:39,862 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.5-sonnet (provider: Anthropic)\n", + "2025-03-21 00:55:39,862 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", + "2025-03-21 00:55:39,863 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", + "2025-03-21 00:55:39,864 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.7-sonnet (provider: Anthropic)\n", + "2025-03-21 00:55:39,865 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", + "2025-03-21 00:55:39,866 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", + "2025-03-21 00:55:39,867 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.0-pro-vision-latest (provider: Google)\n", + "2025-03-21 00:55:39,868 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", + "2025-03-21 00:55:39,869 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", + "2025-03-21 00:55:39,869 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-pro-vision (provider: Google)\n", + "2025-03-21 00:55:39,870 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", + "2025-03-21 00:55:39,871 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", + "2025-03-21 00:55:39,871 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-latest (provider: Google)\n", + "2025-03-21 00:55:39,873 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", + "2025-03-21 00:55:39,875 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", + "2025-03-21 00:55:39,878 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-001 (provider: Google)\n", + "2025-03-21 00:55:39,878 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", + "2025-03-21 00:55:39,879 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", + "2025-03-21 00:55:39,882 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-002 (provider: Google)\n", + "2025-03-21 00:55:39,883 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", + "2025-03-21 00:55:39,884 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", + "2025-03-21 00:55:39,885 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro (provider: Google)\n", + "2025-03-21 00:55:39,886 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", + "2025-03-21 00:55:39,886 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", + "2025-03-21 00:55:39,887 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-latest (provider: Google)\n", + "2025-03-21 00:55:39,888 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", + "2025-03-21 00:55:39,889 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", + "2025-03-21 00:55:39,889 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001 (provider: Google)\n", + "2025-03-21 00:55:39,890 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", + "2025-03-21 00:55:39,892 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", + "2025-03-21 00:55:39,893 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001-tuning (provider: Google)\n", + "2025-03-21 00:55:39,894 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", + "2025-03-21 00:55:39,895 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", + "2025-03-21 00:55:39,896 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash (provider: Google)\n", + "2025-03-21 00:55:39,897 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", + "2025-03-21 00:55:39,902 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", + "2025-03-21 00:55:39,903 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-002 (provider: Google)\n", + "2025-03-21 00:55:39,904 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", + "2025-03-21 00:55:39,905 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", + "2025-03-21 00:55:39,906 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b (provider: Google)\n", + "2025-03-21 00:55:39,907 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", + "2025-03-21 00:55:39,908 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", + "2025-03-21 00:55:39,909 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-001 (provider: Google)\n", + "2025-03-21 00:55:39,912 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", + "2025-03-21 00:55:39,913 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", + "2025-03-21 00:55:39,914 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-latest (provider: Google)\n", + "2025-03-21 00:55:39,915 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", + "2025-03-21 00:55:39,916 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", + "2025-03-21 00:55:39,917 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0827 (provider: Google)\n", + "2025-03-21 00:55:39,918 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", + "2025-03-21 00:55:39,919 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", + "2025-03-21 00:55:39,920 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0924 (provider: Google)\n", + "2025-03-21 00:55:39,921 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", + "2025-03-21 00:55:39,922 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", + "2025-03-21 00:55:39,923 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp (provider: Google)\n", + "2025-03-21 00:55:39,925 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", + "2025-03-21 00:55:39,929 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", + "2025-03-21 00:55:39,930 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash (provider: Google)\n", + "2025-03-21 00:55:39,934 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", + "2025-03-21 00:55:39,935 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", + "2025-03-21 00:55:39,936 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-001 (provider: Google)\n", + "2025-03-21 00:55:39,937 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", + "2025-03-21 00:55:39,938 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", + "2025-03-21 00:55:39,939 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp-image-generation (provider: Google)\n", + "2025-03-21 00:55:39,940 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", + "2025-03-21 00:55:39,940 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", + "2025-03-21 00:55:39,941 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-001 (provider: Google)\n", + "2025-03-21 00:55:39,942 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", + "2025-03-21 00:55:39,943 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", + "2025-03-21 00:55:39,944 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite (provider: Google)\n", + "2025-03-21 00:55:39,945 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", + "2025-03-21 00:55:39,946 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", + "2025-03-21 00:55:39,947 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview-02-05 (provider: Google)\n", + "2025-03-21 00:55:39,948 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", + "2025-03-21 00:55:39,952 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", + "2025-03-21 00:55:39,953 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview (provider: Google)\n", + "2025-03-21 00:55:39,954 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", + "2025-03-21 00:55:39,955 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", + "2025-03-21 00:55:39,956 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp (provider: Google)\n", + "2025-03-21 00:55:39,958 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", + "2025-03-21 00:55:39,959 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", + "2025-03-21 00:55:39,960 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp-02-05 (provider: Google)\n", + "2025-03-21 00:55:39,960 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", + "2025-03-21 00:55:39,961 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", + "2025-03-21 00:55:39,963 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-exp-1206 (provider: Google)\n", + "2025-03-21 00:55:39,963 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", + "2025-03-21 00:55:39,965 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", + "2025-03-21 00:55:39,966 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-01-21 (provider: Google)\n", + "2025-03-21 00:55:39,968 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", + "2025-03-21 00:55:39,969 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", + "2025-03-21 00:55:39,970 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp (provider: Google)\n", + "2025-03-21 00:55:39,970 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", + "2025-03-21 00:55:39,971 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", + "2025-03-21 00:55:39,972 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-1219 (provider: Google)\n", + "2025-03-21 00:55:39,972 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", + "2025-03-21 00:55:39,973 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", + "2025-03-21 00:55:39,973 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/learnlm-1.5-pro-experimental (provider: Google)\n", + "2025-03-21 00:55:39,974 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", + "2025-03-21 00:55:39,976 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", + "2025-03-21 00:55:39,977 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemma-3-27b-it (provider: Google)\n", + "2025-03-21 00:55:39,977 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", + "2025-03-21 00:55:39,980 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", + "2025-03-21 00:55:39,981 [INFO] ember.core.registry.model.initialization: Registration summary: 80 new, 0 skipped, 0 failed\n", + "2025-03-21 00:55:39,983 [INFO] ember.core.registry.model.initialization: Successfully discovered and registered 80 new models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 00:55:39,985 [INFO] ember.core.registry.model.initialization: Discovered 80 new models in 1.46s: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13'] and 70 more\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n" + ] + } + ], + "source": [ + "model_registry = initialize_registry()\n", + "print(model_registry.list_models())\n", + "llm = ModelService(registry=model_registry)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "['openai:gpt-4o-mini-transcribe',\n", + " 'openai:gpt-4o-audio-preview-2024-12-17',\n", + " 'openai:dall-e-3',\n", + " 'openai:dall-e-2',\n", + " 'openai:gpt-4o-audio-preview-2024-10-01',\n", + " 'openai:gpt-4o-realtime-preview-2024-10-01',\n", + " 'openai:gpt-4o-audio-preview',\n", + " 'openai:text-embedding-3-large',\n", + " 'openai:gpt-4',\n", + " 'openai:gpt-4o-2024-05-13',\n", + " 'openai:gpt-4o-realtime-preview',\n", + " 'openai:gpt-4o-mini-audio-preview',\n", + " 'openai:gpt-3.5-turbo-instruct-0914',\n", + " 'openai:gpt-4o-mini-search-preview',\n", + " 'openai:gpt-3.5-turbo-1106',\n", + " 'openai:gpt-4o-search-preview',\n", + " 'openai:gpt-4-turbo',\n", + " 'openai:gpt-4o-realtime-preview-2024-12-17',\n", + " 'openai:gpt-3.5-turbo-instruct',\n", + " 'openai:gpt-3.5-turbo',\n", + " 'openai:gpt-4-turbo-preview',\n", + " 'openai:gpt-4o-mini-search-preview-2025-03-11',\n", + " 'openai:gpt-4o-mini-realtime-preview',\n", + " 'openai:gpt-3.5-turbo-0125',\n", + " 'openai:gpt-4o-2024-08-06',\n", + " 'openai:gpt-4-turbo-2024-04-09',\n", + " 'openai:gpt-3.5-turbo-16k',\n", + " 'openai:gpt-4o',\n", + " 'openai:gpt-4o-mini-realtime-preview-2024-12-17',\n", + " 'openai:gpt-4-1106-preview',\n", + " 'openai:text-embedding-ada-002',\n", + " 'openai:gpt-4-0613',\n", + " 'openai:gpt-4.5-preview',\n", + " 'openai:gpt-4.5-preview-2025-02-27',\n", + " 'openai:gpt-4o-search-preview-2025-03-11',\n", + " 'openai:gpt-4o-2024-11-20',\n", + " 'openai:gpt-4o-mini-tts',\n", + " 'openai:gpt-4-0125-preview',\n", + " 'openai:gpt-4o-transcribe',\n", + " 'openai:gpt-4o-mini-2024-07-18',\n", + " 'openai:text-embedding-3-small',\n", + " 'openai:gpt-4o-mini',\n", + " 'openai:gpt-4o-mini-audio-preview-2024-12-17',\n", + " 'anthropic:claude-3-sonnet',\n", + " 'anthropic:claude-3-opus',\n", + " 'anthropic:claude-3-haiku',\n", + " 'anthropic:claude-3.5-sonnet',\n", + " 'anthropic:claude-3.7-sonnet',\n", + " 'google:models/gemini-1.0-pro-vision-latest',\n", + " 'google:models/gemini-pro-vision',\n", + " 'google:models/gemini-1.5-pro-latest',\n", + " 'google:models/gemini-1.5-pro-001',\n", + " 'google:models/gemini-1.5-pro-002',\n", + " 'google:models/gemini-1.5-pro',\n", + " 'google:models/gemini-1.5-flash-latest',\n", + " 'google:models/gemini-1.5-flash-001',\n", + " 'google:models/gemini-1.5-flash-001-tuning',\n", + " 'google:models/gemini-1.5-flash',\n", + " 'google:models/gemini-1.5-flash-002',\n", + " 'google:models/gemini-1.5-flash-8b',\n", + " 'google:models/gemini-1.5-flash-8b-001',\n", + " 'google:models/gemini-1.5-flash-8b-latest',\n", + " 'google:models/gemini-1.5-flash-8b-exp-0827',\n", + " 'google:models/gemini-1.5-flash-8b-exp-0924',\n", + " 'google:models/gemini-2.0-flash-exp',\n", + " 'google:models/gemini-2.0-flash',\n", + " 'google:models/gemini-2.0-flash-001',\n", + " 'google:models/gemini-2.0-flash-exp-image-generation',\n", + " 'google:models/gemini-2.0-flash-lite-001',\n", + " 'google:models/gemini-2.0-flash-lite',\n", + " 'google:models/gemini-2.0-flash-lite-preview-02-05',\n", + " 'google:models/gemini-2.0-flash-lite-preview',\n", + " 'google:models/gemini-2.0-pro-exp',\n", + " 'google:models/gemini-2.0-pro-exp-02-05',\n", + " 'google:models/gemini-exp-1206',\n", + " 'google:models/gemini-2.0-flash-thinking-exp-01-21',\n", + " 'google:models/gemini-2.0-flash-thinking-exp',\n", + " 'google:models/gemini-2.0-flash-thinking-exp-1219',\n", + " 'google:models/learnlm-1.5-pro-experimental',\n", + " 'google:models/gemma-3-27b-it']" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model_registry.list_models()" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "model_ids: List[str] = [\n", + " \"openai:o1\",\n", + " \"openai:gpt-4o\",\n", + " \"openai:gpt-4o-mini\",\n", + " # \"anthropic:claude-3.5-sonnet\", # API key not working\n", + " # \"invalid:model\", # Expected to trigger an error.\n", + " # \"google:model/gemini-1.5-pro\", # need to fix model alignment\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:56:34,200 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", + "2025-03-21 00:56:34,201 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o' using provider class 'OpenAIModel'.\n", + "2025-03-21 00:56:34,202 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o\n", + "2025-03-21 00:56:34,203 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:56:34,205 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", + "2025-03-21 00:56:34,208 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", + "2025-03-21 00:56:34,228 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:56:34,230 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:56:34,232 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", + "2025-03-21 00:56:34,244 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 00:56:34,246 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "➡️ Testing model: openai:o1\n", + "❌ Error with model openai:o1: Model 'openai:o1' not found. Available models:\n", + "- openai:gpt-4o-mini-transcribe\n", + "- openai:gpt-4o-audio-preview-2024-12-17\n", + "- openai:dall-e-3\n", + "- openai:dall-e-2\n", + "- openai:gpt-4o-audio-preview-2024-10-01\n", + "- openai:gpt-4o-realtime-preview-2024-10-01\n", + "- openai:gpt-4o-audio-preview\n", + "- openai:text-embedding-3-large\n", + "- openai:gpt-4\n", + "- openai:gpt-4o-2024-05-13\n", + "- openai:gpt-4o-realtime-preview\n", + "- openai:gpt-4o-mini-audio-preview\n", + "- openai:gpt-3.5-turbo-instruct-0914\n", + "- openai:gpt-4o-mini-search-preview\n", + "- openai:gpt-3.5-turbo-1106\n", + "- openai:gpt-4o-search-preview\n", + "- openai:gpt-4-turbo\n", + "- openai:gpt-4o-realtime-preview-2024-12-17\n", + "- openai:gpt-3.5-turbo-instruct\n", + "- openai:gpt-3.5-turbo\n", + "- openai:gpt-4-turbo-preview\n", + "- openai:gpt-4o-mini-search-preview-2025-03-11\n", + "- openai:gpt-4o-mini-realtime-preview\n", + "- openai:gpt-3.5-turbo-0125\n", + "- openai:gpt-4o-2024-08-06\n", + "- openai:gpt-4-turbo-2024-04-09\n", + "- openai:gpt-3.5-turbo-16k\n", + "- openai:gpt-4o\n", + "- openai:gpt-4o-mini-realtime-preview-2024-12-17\n", + "- openai:gpt-4-1106-preview\n", + "- openai:text-embedding-ada-002\n", + "- openai:gpt-4-0613\n", + "- openai:gpt-4.5-preview\n", + "- openai:gpt-4.5-preview-2025-02-27\n", + "- openai:gpt-4o-search-preview-2025-03-11\n", + "- openai:gpt-4o-2024-11-20\n", + "- openai:gpt-4o-mini-tts\n", + "- openai:gpt-4-0125-preview\n", + "- openai:gpt-4o-transcribe\n", + "- openai:gpt-4o-mini-2024-07-18\n", + "- openai:text-embedding-3-small\n", + "- openai:gpt-4o-mini\n", + "- openai:gpt-4o-mini-audio-preview-2024-12-17\n", + "- anthropic:claude-3-sonnet\n", + "- anthropic:claude-3-opus\n", + "- anthropic:claude-3-haiku\n", + "- anthropic:claude-3.5-sonnet\n", + "- anthropic:claude-3.7-sonnet\n", + "- google:models/gemini-1.0-pro-vision-latest\n", + "- google:models/gemini-pro-vision\n", + "- google:models/gemini-1.5-pro-latest\n", + "- google:models/gemini-1.5-pro-001\n", + "- google:models/gemini-1.5-pro-002\n", + "- google:models/gemini-1.5-pro\n", + "- google:models/gemini-1.5-flash-latest\n", + "- google:models/gemini-1.5-flash-001\n", + "- google:models/gemini-1.5-flash-001-tuning\n", + "- google:models/gemini-1.5-flash\n", + "- google:models/gemini-1.5-flash-002\n", + "- google:models/gemini-1.5-flash-8b\n", + "- google:models/gemini-1.5-flash-8b-001\n", + "- google:models/gemini-1.5-flash-8b-latest\n", + "- google:models/gemini-1.5-flash-8b-exp-0827\n", + "- google:models/gemini-1.5-flash-8b-exp-0924\n", + "- google:models/gemini-2.0-flash-exp\n", + "- google:models/gemini-2.0-flash\n", + "- google:models/gemini-2.0-flash-001\n", + "- google:models/gemini-2.0-flash-exp-image-generation\n", + "- google:models/gemini-2.0-flash-lite-001\n", + "- google:models/gemini-2.0-flash-lite\n", + "- google:models/gemini-2.0-flash-lite-preview-02-05\n", + "- google:models/gemini-2.0-flash-lite-preview\n", + "- google:models/gemini-2.0-pro-exp\n", + "- google:models/gemini-2.0-pro-exp-02-05\n", + "- google:models/gemini-exp-1206\n", + "- google:models/gemini-2.0-flash-thinking-exp-01-21\n", + "- google:models/gemini-2.0-flash-thinking-exp\n", + "- google:models/gemini-2.0-flash-thinking-exp-1219\n", + "- google:models/learnlm-1.5-pro-experimental\n", + "- google:models/gemma-3-27b-it\n", + "➡️ Testing model: openai:gpt-4o\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:56:34,480 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 00:56:34,482 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:56:34,483 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:56:34,484 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:56:34,486 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:56:34,486 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:56:36,126 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:56:36 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1233'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_f8552ee7d3cda74089c40b09a667a11c'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=K4JnKFgR4hjVklhiYpYOBJWq_aNM03jL.3b6rr6i.mI-1742543796-1.0.1.1-M9iB0xiqdmYVjTNsePN_lcGUzDMaDs3HHtxqi3P9SRCyRCxwewtZvhwXe8FHpQtpTUYayHQJCuirL1j0CbPyHmViUXtbAht3Pk3Tq0dZjVQ; path=/; expires=Fri, 21-Mar-25 08:26:36 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=TpdB9iIHIQ7IEdkiMOoQVOJlXDWtypVFJQW_pjMp3no-1742543796441-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be4bdafb57e21-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:56:36,131 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:56:36,135 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:56:36,145 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:56:36,147 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:56:36,149 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:56:36,151 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 07:56:36 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('access-control-expose-headers', 'X-Request-ID'), ('openai-organization', 'user-iqhmndueuqg2ljzblqkr2tgh'), ('openai-processing-ms', '1233'), ('openai-version', '2020-10-01'), ('x-ratelimit-limit-requests', '50000'), ('x-ratelimit-limit-tokens', '150000000'), ('x-ratelimit-remaining-requests', '49999'), ('x-ratelimit-remaining-tokens', '149999987'), ('x-ratelimit-reset-requests', '1ms'), ('x-ratelimit-reset-tokens', '0s'), ('x-request-id', 'req_f8552ee7d3cda74089c40b09a667a11c'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=K4JnKFgR4hjVklhiYpYOBJWq_aNM03jL.3b6rr6i.mI-1742543796-1.0.1.1-M9iB0xiqdmYVjTNsePN_lcGUzDMaDs3HHtxqi3P9SRCyRCxwewtZvhwXe8FHpQtpTUYayHQJCuirL1j0CbPyHmViUXtbAht3Pk3Tq0dZjVQ; path=/; expires=Fri, 21-Mar-25 08:26:36 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=TpdB9iIHIQ7IEdkiMOoQVOJlXDWtypVFJQW_pjMp3no-1742543796441-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923be4bdafb57e21-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:56:36,153 [DEBUG] openai._base_client: request_id: req_f8552ee7d3cda74089c40b09a667a11c\n", + "2025-03-21 00:56:36,172 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:56:36,178 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:56:36,181 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:56:36,185 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:56:36,192 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:56:36,194 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:56:36,197 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:56:36,199 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🛎️ Service response from openai:gpt-4o:\n", + "Quantum computing leverages the principles of quantum mechanics to process information. Unlike classical bits, quantum bits (qubits) can exist in multiple states simultaneously (superposition). This enables quantum computers to perform complex calculations at unprecedented speeds, addressing problems in cryptography, optimization, and simulations beyond the capabilities of classical computers.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:56:36,509 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:56:36 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'248'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_6bcbc261c6340def00989716c3350a3c'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be4c85ba77e21-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:56:36,511 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:56:36,512 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:56:36,514 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:56:36,515 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:56:36,516 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:56:36,518 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:56:36 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '248', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_6bcbc261c6340def00989716c3350a3c', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923be4c85ba77e21-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:56:36,519 [DEBUG] openai._base_client: request_id: req_6bcbc261c6340def00989716c3350a3c\n", + "2025-03-21 00:56:36,520 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", + "2025-03-21 00:56:36,521 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o-mini' using provider class 'OpenAIModel'.\n", + "2025-03-21 00:56:36,522 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o-mini\n", + "2025-03-21 00:56:36,524 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:56:36,531 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:56:36,533 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:56:36,535 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:56:36,536 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:56:36,537 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:56:36,541 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:56:36,542 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🎯 Direct response from openai:gpt-4o:\n", + "The capital of France is Paris.\n", + "\n", + "➡️ Testing model: openai:gpt-4o-mini\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:56:38,149 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:56:38 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1525'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_86c2355bc4104f135311da7c5d4abdfa'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be4ca7e3e7e21-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:56:38,152 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:56:38,155 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:56:38,159 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:56:38,161 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:56:38,163 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:56:38,164 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:56:38 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '1525', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_86c2355bc4104f135311da7c5d4abdfa', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923be4ca7e3e7e21-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:56:38,166 [DEBUG] openai._base_client: request_id: req_86c2355bc4104f135311da7c5d4abdfa\n", + "2025-03-21 00:56:38,168 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:56:38,177 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:56:38,180 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:56:38,182 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:56:38,184 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:56:38,186 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:56:38,188 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:56:38,190 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🛎️ Service response from openai:gpt-4o-mini:\n", + "Quantum computing leverages quantum bits (qubits) to perform calculations at unprecedented speeds. Unlike classical bits, qubits can exist in multiple states simultaneously due to superposition and can be entangled, allowing for complex problem-solving. This technology has the potential to revolutionize fields like cryptography, optimization, and drug discovery.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:56:38,657 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:56:38 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'412'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_58b051d94e4dc3bd6a99637af1dbae60'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be4d4cac57e21-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:56:38,659 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:56:38,660 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:56:38,672 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:56:38,673 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:56:38,674 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:56:38,676 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:56:38 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '412', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_58b051d94e4dc3bd6a99637af1dbae60', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923be4d4cac57e21-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:56:38,677 [DEBUG] openai._base_client: request_id: req_58b051d94e4dc3bd6a99637af1dbae60\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🎯 Direct response from openai:gpt-4o-mini:\n", + "The capital of France is Paris.\n", + "\n" + ] + } + ], + "source": [ + "for model_id in model_ids:\n", + " try:\n", + " print(f\"➡️ Testing model: {model_id}\")\n", + "\n", + " # Two usage styles are demonstrated below:\n", + " # 1. Service-based invocation: Recommended for automatic usage tracking.\n", + " service_response: ChatResponse = llm.invoke_model(\n", + " model_id=model_id,\n", + " prompt=\"Explain quantum computing in 50 words\",\n", + " )\n", + " print(f\"🛎️ Service response from {model_id}:\\n{service_response.data}\\n\")\n", + "\n", + " # 2. Direct model instance usage: Useful for more granular or PyTorch-like workflows.\n", + " model = load_model(model_id=model_id, registry=model_registry)\n", + " direct_response: ChatResponse = model(\n", + " prompt=\"What's the capital of France?\"\n", + " )\n", + " print(f\"🎯 Direct response from {model_id}:\\n{direct_response.data}\\n\")\n", + "\n", + " except Exception as error:\n", + " print(f\"❌ Error with model {model_id}: {str(error)}\")\n", + " continue\n" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:57:15,048 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:57:15,059 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello!'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:57:15,062 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:57:15,065 [DEBUG] httpcore.connection: close.started\n", + "2025-03-21 00:57:15,067 [DEBUG] httpcore.connection: close.complete\n", + "2025-03-21 00:57:15,069 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", + "2025-03-21 00:57:15,095 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 00:57:15,098 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", + "2025-03-21 00:57:15,117 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 00:57:15,120 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:57:15,124 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:57:15,126 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:57:15,129 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:57:15,131 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:57:15,874 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:57:16 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'700'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999996'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_e5f04f92ab9b2a181e6951edb196397d'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be5bbacd9cfbc-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:57:15,875 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:57:15,876 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:57:15,878 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:57:15,879 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:57:15,881 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:57:15,882 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:57:16 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '700', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999996', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_e5f04f92ab9b2a181e6951edb196397d', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923be5bbacd9cfbc-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:57:15,886 [DEBUG] openai._base_client: request_id: req_e5f04f92ab9b2a181e6951edb196397d\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello! How can I assist you today?\n" + ] + } + ], + "source": [ + "response = llm(prompt=\"Hello!\", model_id=\"openai:gpt-4o\")\n", + "print(response.data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "---\n", + "\n", + "## Neural Similarity Scoring - Cosine Similarity (WIP)\n", + "\n", + "- from `src/ember/core/utils/embedding_utils.py`\n", + "- from jason" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q openai" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from abc import ABC, abstractmethod\n", + "from typing import List, Protocol\n", + "import math\n", + "\n", + "import openai\n", + "import os\n", + "\n", + "\n", + "################################################################\n", + "# 1) Embedding Model Interfaces & Implementations\n", + "################################################################\n", + "\n", + "\n", + "class EmbeddingModel(Protocol):\n", + " \"\"\"Interface for embedding models.\n", + "\n", + " This protocol defines the minimal interface required to compute a text\n", + " embedding. Implementations may use local models, external APIs, or custom\n", + " neural networks.\n", + "\n", + " Methods:\n", + " embed_text: Compute the embedding for a given text.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Computes the embedding vector for the provided text.\n", + "\n", + " Args:\n", + " text (str): The text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding vector.\n", + " \"\"\"\n", + " ...\n", + "\n", + "class Text_Embedding_3_EmbeddingModel(EmbeddingModel):\n", + " \"\"\"Interface for embedding models.\n", + "\n", + " This protocol defines the minimal interface required to compute a text\n", + " embedding. Implementations may use local models, external APIs, or custom\n", + " neural networks.\n", + "\n", + " Methods:\n", + " embed_text: Compute the embedding for a given text.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Computes the embedding vector for the provided text.\n", + "\n", + " Args:\n", + " text (str): The text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding vector.\n", + " \"\"\"\n", + " response = llm(model_id=\"openai:text-embedding-3-large\", prompt=text)\n", + " return response.data\n", + "\n", + "\n", + "class MockEmbeddingModel:\n", + " \"\"\"Mock implementation of an embedding model using naive ASCII encoding.\n", + "\n", + " This simple model converts each character in the text to a normalized ASCII\n", + " value. It is intended solely for demonstration and testing purposes.\n", + "\n", + " Methods:\n", + " embed_text: Converts text to a sequence of normalized ASCII values.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Embeds text by converting each character to its normalized ASCII code.\n", + "\n", + " Args:\n", + " text (str): The input text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding. Returns an\n", + " empty list if the text is empty.\n", + " \"\"\"\n", + " if not text:\n", + " return []\n", + " return [ord(ch) / 256.0 for ch in text]\n", + "\n", + "\n", + "################################################################\n", + "# 2) Similarity Metric Interface & Implementations\n", + "################################################################\n", + "\n", + "\n", + "class SimilarityMetric(ABC):\n", + " \"\"\"Abstract base class for computing similarity between embedding vectors.\n", + "\n", + " Subclasses must implement the similarity method to calculate a similarity\n", + " score between two vectors.\n", + " \"\"\"\n", + "\n", + " @abstractmethod\n", + " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", + " \"\"\"Calculates the similarity between two embedding vectors.\n", + "\n", + " Args:\n", + " vec_a (List[float]): The first embedding vector.\n", + " vec_b (List[float]): The second embedding vector.\n", + "\n", + " Returns:\n", + " float: The similarity score, typically in the range [0, 1] or [-1, 1].\n", + " \"\"\"\n", + " ...\n", + "\n", + "\n", + "class CosineSimilarity(SimilarityMetric):\n", + " \"\"\"Implementation of cosine similarity for embedding vectors.\n", + "\n", + " The cosine similarity is defined as:\n", + " similarity(a, b) = (a · b) / (||a|| * ||b||)\n", + "\n", + " Returns 0.0 if either vector is empty or if any vector's norm is zero.\n", + " \"\"\"\n", + "\n", + " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", + " \"\"\"Computes cosine similarity between two embedding vectors.\n", + "\n", + " Args:\n", + " vec_a (List[float]): The first embedding vector.\n", + " vec_b (List[float]): The second embedding vector.\n", + "\n", + " Returns:\n", + " float: The cosine similarity score.\n", + " \"\"\"\n", + " if not vec_a or not vec_b:\n", + " return 0.0\n", + "\n", + " dot_product: float = sum(a * b for a, b in zip(vec_a, vec_b))\n", + " norm_a: float = math.sqrt(sum(a * a for a in vec_a))\n", + " norm_b: float = math.sqrt(sum(b * b for b in vec_b))\n", + " if norm_a == 0 or norm_b == 0:\n", + " return 0.0\n", + "\n", + " return dot_product / (norm_a * norm_b)\n", + "\n", + "\n", + "################################################################\n", + "# 3) High-Level Utility Function\n", + "################################################################\n", + "\n", + "\n", + "def calculate_text_similarity(\n", + " text1: str, text2: str, model: EmbeddingModel, metric: SimilarityMetric\n", + ") -> float:\n", + " \"\"\"Calculates text similarity using an embedding model and a similarity metric.\n", + "\n", + " This function generates embeddings for the provided texts and then computes a\n", + " similarity score using the given similarity metric.\n", + "\n", + " Args:\n", + " text1 (str): The first text string.\n", + " text2 (str): The second text string.\n", + " model (EmbeddingModel): An instance conforming to the embedding model interface.\n", + " metric (SimilarityMetric): An instance implementing a similarity metric.\n", + "\n", + " Returns:\n", + " float: The computed similarity score.\n", + " \"\"\"\n", + " embedding1: List[float] = model.embed_text(text=text1)\n", + " embedding2: List[float] = model.embed_text(text=text2)\n", + " return metric.similarity(vec_a=embedding1, vec_b=embedding2)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'mock_model' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[8], line 8\u001b[0m\n\u001b[1;32m 4\u001b[0m text_a: \u001b[38;5;28mstr\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHello world!\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 5\u001b[0m text_b: \u001b[38;5;28mstr\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHello, world??\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 7\u001b[0m score: \u001b[38;5;28mfloat\u001b[39m \u001b[38;5;241m=\u001b[39m calculate_text_similarity(\n\u001b[0;32m----> 8\u001b[0m text1\u001b[38;5;241m=\u001b[39mtext_a, text2\u001b[38;5;241m=\u001b[39mtext_b, model\u001b[38;5;241m=\u001b[39mmock_model, metric\u001b[38;5;241m=\u001b[39mcosine\n\u001b[1;32m 9\u001b[0m )\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSimilarity between \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_a\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m and \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_b\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mscore\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n", + "\u001b[0;31mNameError\u001b[0m: name 'mock_model' is not defined" + ] + } + ], + "source": [ + "mock_model: Text_Embedding_3_EmbeddingModel = Text_Embedding_3_EmbeddingModel()\n", + "cosine: CosineSimilarity = CosineSimilarity()\n", + "\n", + "text_a: str = \"Hello world!\"\n", + "text_b: str = \"Hello, world??\"\n", + "\n", + "score: float = calculate_text_similarity(\n", + " text1=text_a, text2=text_b, model=mock_model, metric=cosine\n", + ")\n", + "print(f\"Similarity between '{text_a}' and '{text_b}': {score}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "---\n", + "\n", + "## Compression Ratio (WIP)\n", + "\n", + "- from `src/ember/core/utils/eval/evaluators.py`\n", + "- from connor" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q diversity==0.2.0\n", + "%pip install -q spacy==3.8.4" + ] + }, + { + "cell_type": "code", + "execution_count": 110, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "import re\n", + "import subprocess\n", + "from typing import Any, Dict, TypeVar, Optional, List, Generic, Callable, Union\n", + "\n", + "from ember.core.utils.eval.base_evaluator import IEvaluator, EvaluationResult\n", + "from ember.core.utils.eval.extractors import RegexExtractor\n", + "\n", + "from diversity import compression_ratio\n", + "\n", + "T_out = TypeVar(\"T_out\")\n", + "T_truth = TypeVar(\"T_truth\")\n", + "\n", + "\n", + "class ComposedEvaluator(IEvaluator[T_out, T_truth], Generic[T_out, T_truth]):\n", + " \"\"\"Combines an output extractor with an evaluator for the extracted data.\n", + "\n", + " This evaluator first transforms the system output using the provided extractor,\n", + " then evaluates the extracted value using the specified base evaluator.\n", + "\n", + " Args:\n", + " extractor: An object with an `extract` method to process the system output.\n", + " base_evaluator (IEvaluator): An evaluator that processes the extracted output.\n", + "\n", + " Returns:\n", + " EvaluationResult: The result of the evaluation.\n", + " \"\"\"\n", + "\n", + " def __init__(\n", + " self,\n", + " extractor: Any, # Expecting an extractor with an `extract` method.\n", + " base_evaluator: IEvaluator[Any, Any],\n", + " ) -> None:\n", + " self.extractor = extractor\n", + " self.base_evaluator = base_evaluator\n", + "\n", + " def evaluate(\n", + " self, system_output: T_out, correct_answer: Any, **kwargs: Any\n", + " ) -> EvaluationResult:\n", + " \"\"\"Evaluates the provided system output against the correct answer.\n", + "\n", + " Args:\n", + " system_output (T_out): The raw output generated by the system.\n", + " correct_answer (Any): The expected correct answer.\n", + " **kwargs: Additional keyword arguments for extraction or evaluation.\n", + "\n", + " Returns:\n", + " EvaluationResult: The result of evaluating the extracted value.\n", + " \"\"\"\n", + " extracted_value = self.extractor.extract(system_output, **kwargs)\n", + " return self.base_evaluator.evaluate(extracted_value, correct_answer, **kwargs)\n", + "\n", + "\n", + "# Basic Evaluators\n", + "\n", + "\n", + "class ExactMatchEvaluator(IEvaluator[str, str]):\n", + " \"\"\"Evaluator to check for an exact match between two strings,\n", + " ignoring differences in whitespace and case.\n", + "\n", + " Example:\n", + " evaluator = ExactMatchEvaluator()\n", + " result = evaluator.evaluate(\"Hello World\", \"hello world\")\n", + "\n", + " Args:\n", + " compare_fn (Optional[Callable[[str, str], bool]]): Optional custom comparison function.\n", + " If not provided, strings are normalized (whitespace removed, lowercase) before comparison.\n", + "\n", + " Returns:\n", + " EvaluationResult: The result containing a correctness flag and a score.\n", + " \"\"\"\n", + "\n", + " def __init__(self, compare_fn: Optional[Callable[[str, str], bool]] = None) -> None:\n", + " self.compare_fn = compare_fn or self._default_compare\n", + "\n", + " def _default_compare(self, str1: str, str2: str) -> bool:\n", + " \"\"\"Default string comparison function that ignores case and whitespace.\n", + "\n", + " Args:\n", + " str1 (str): First string to compare\n", + " str2 (str): Second string to compare\n", + "\n", + " Returns:\n", + " bool: True if strings match after normalization\n", + " \"\"\"\n", + " return str1.strip().lower() == str2.strip().lower()\n", + "\n", + " def evaluate(\n", + " self, system_output: str, correct_answer: str, **kwargs: Any\n", + " ) -> EvaluationResult:\n", + " \"\"\"Evaluates whether a system output exactly matches the correct answer.\n", + "\n", + " Args:\n", + " system_output (str): The system-generated string.\n", + " correct_answer (str): The expected answer string.\n", + " **kwargs: Additional keyword arguments (unused).\n", + "\n", + " Returns:\n", + " EvaluationResult: An object with `is_correct` set to True if the normalized strings match,\n", + " along with a corresponding score.\n", + " \"\"\"\n", + " is_correct = self.compare_fn(system_output, correct_answer)\n", + " score = 1.0 if is_correct else 0.0\n", + " return EvaluationResult(is_correct=is_correct, score=score)\n", + "\n", + "class DiversityScoringEvaluator(IEvaluator[List[str], None]):\n", + " \"\"\"\n", + " Evaluator to test ensemble outputs -> score them (float)\n", + " \"\"\"\n", + " def evaluate(\n", + " self, \n", + " system_output: List[str], \n", + " **kwargs) -> EvaluationResult:\n", + " if system_output is None or len(system_output) == 0:\n", + " return EvaluationResult(is_correct=False, score=-1)\n", + "\n", + " # current compression ratio formula\n", + " # TODO: update scoring function to make it better\n", + " # -> like use token count\n", + "\n", + " # example I was thinking about:\n", + " letter_sum = sum(len(response) for response in system_output)\n", + " # ratio = compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100)\n", + " ratio = compression_ratio(system_output, algorithm='gzip',verbose=True)\n", + " return EvaluationResult(is_correct=True,score=ratio,metadata = {'responses': system_output})\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Edit Distance (WIP)\n", + "- from kathleen" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "metadata": { + "collapsed": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q python-Levenshtein" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import Levenshtein\n", + "from typing import List\n", + "from dataclasses import dataclass\n", + "\n", + "@dataclass\n", + "class EvaluationResult:\n", + " is_correct: bool\n", + " score: float\n", + " metadata: dict\n", + "\n", + "class EditDistanceScoringEvaluator:\n", + "\n", + " def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult:\n", + " if system_output is None or len(system_output) == 0:\n", + " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", + "\n", + " diversity_score = self.compute_distance(system_output)\n", + "\n", + " return EvaluationResult(\n", + " is_correct=True, \n", + " score=diversity_score,\n", + " metadata={'responses': system_output}\n", + " )\n", + "\n", + " def compute_distance(self, outputs: List[str]) -> float:\n", + " n = len(outputs)\n", + " if n < 2:\n", + " return 0.0\n", + "\n", + " total_distance = 0\n", + " pairs = 0\n", + "\n", + " for i in range(n):\n", + " for j in range(i + 1, n):\n", + " dist = Levenshtein.distance(outputs[i], outputs[j])\n", + " max_len = max(len(outputs[i]), len(outputs[j]))\n", + " normalized_dist = dist / max_len if max_len > 0 else 0 \n", + " total_distance += normalized_dist\n", + " pairs += 1\n", + " \n", + " return total_distance / pairs if pairs > 0 else 0.0\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Diversity Score: 0.8301\n", + "Is Correct: True\n", + "Metadata: {'responses': ['hi there', 'hi', 'hello', 'yo whatup']}\n" + ] + } + ], + "source": [ + "distance_evaluator = EditDistanceScoringEvaluator()\n", + "\n", + "# input_strs = [\n", + "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", + "# \"fej89qw098efjq29f38j0938j20f398jqwe098fjq98wf -- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", + "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", + "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", + "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", + "# ]\n", + "\n", + "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", + "\n", + "# input_strs = [\"This is a sample text with lots of repetition.\", \n", + "# \"This is a sample text with lots of repetition.\",\n", + "# \"This is a sample text with lots of repetition.\"]\n", + "\n", + "edit_distance = distance_evaluator.evaluate(input_strs)\n", + "\n", + "print(f\"Diversity Score: {edit_distance.score:.4f}\")\n", + "print(f\"Is Correct: {edit_distance.is_correct}\")\n", + "print(f\"Metadata: {edit_distance.metadata}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Novelty Score\n", + "- need to merge" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List\n", + "from dataclasses import dataclass\n", + "import numpy as np\n", + "\n", + "@dataclass\n", + "class EvaluationResult:\n", + " is_correct: bool\n", + " score: float\n", + " metadata: dict\n", + "\n", + "class NoveltyScoringEvaluator:\n", + " \n", + " def evaluate(self, model: EmbeddingModel, system_output: List[str], **kwargs) -> EvaluationResult:\n", + " if not system_output or len(system_output) == 0:\n", + " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", + "\n", + " novelty_scores = [self.compute_novelty(r, system_output[:i]) for i, r in enumerate(system_output)]\n", + "\n", + " avg_novelty = sum(novelty_scores) / len(novelty_scores) if novelty_scores else 0.0\n", + "\n", + " return EvaluationResult(\n", + " is_correct=True,\n", + " score=avg_novelty,\n", + " metadata={'responses': system_output, 'novelty_scores': novelty_scores}\n", + " )\n", + "\n", + " def compute_novelty(self, response: str, prior_responses: List[str]) -> float:\n", + " if not prior_responses:\n", + " return 1.0\n", + "\n", + " new_embedding = self.model.embed_text(response)\n", + " prior_embeddings = [self.model.embed_text(r) for r in prior_responses]\n", + "\n", + " similarities = [\n", + " np.dot(new_embedding, prior_embedding) /\n", + " (np.linalg.norm(new_embedding) * np.linalg.norm(prior_embedding))\n", + " for prior_embedding in prior_embeddings\n", + " ]\n", + "\n", + " return 1 - max(similarities)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "EvaluationResult(is_correct=True, score=0.08368770360509659, metadata={'responses': ['Hello world!', 'Hi there!', 'Goodbye!']})\n" + ] + } + ], + "source": [ + "novelty_evaluator = NoveltyScoringEvaluator()\n", + "\n", + "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", + "\n", + "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", + "novelty = novelty_evaluator.evaluate(mock_model, input_strs)\n", + "\n", + "print(f\"Diversity Score: {novelty.score:.4f}\")\n", + "print(f\"Is Correct: {novelty.is_correct}\")\n", + "print(f\"Metadata: {novelty.metadata}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "---\n", + "\n", + "## Putting it all together" + ] + }, + { + "cell_type": "code", + "execution_count": 171, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", + "cosine: CosineSimilarity = CosineSimilarity()\n", + "exact_evaluator = ExactMatchEvaluator()\n", + "diversity_evaluator = DiversityScoringEvaluator()\n", + "edit_dist_evaluator = EditDistanceScoringEvaluator()\n", + "\n", + "def ensemble_diversity(strings):\n", + " compression = diversity_evaluator.evaluate(strings)\n", + " print(\"DiversityScoringEvaluator result:\", compression)\n", + " print(\"1/compression: \", 1/compression.score)\n", + " scores = list()\n", + " for ind1 in range(len(strings)):\n", + " ind2 = ind1+1 if ind1+1 != len(strings) else 0\n", + " curr_score = calculate_text_similarity(text1=strings[ind1], text2=strings[ind2], model=mock_model, metric=cosine)\n", + " # print(f\"SimilarityScore between ind1={ind1} and ind2={ind2}: {curr_score}\")\n", + " scores.append(curr_score)\n", + " avg_score = np.average(scores)\n", + " print(f\"Avg cosine similarity: {avg_score}\")\n", + " print(f\"diversity cosine-sim inverse: {1-avg_score}\")\n", + " edit_distance = edit_dist_evaluator.evaluate(strings)\n", + " print(f\"edit-dist score: {edit_distance.score:.4f}\")\n", + " print(\"-------------------------------\")\n", + " print(f\"possible diversity score (higher is better): {((1 - avg_score) + (min(1/compression.score, 1)) + edit_distance.score)/3}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original Size: 140\n", + "Compressed Size: 103\n", + "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.359, metadata={'responses': ['This is a sample text with lots of repetition.', 'This is a sample text with lots of repetition.', 'This is a sample text with lots of repetition.']})\n", + "1/compression: 0.7358351729212657\n", + "Avg cosine similarity: 1.0\n", + "diversity cosine-sim inverse: 0.0\n", + "edit-dist score: 0.0000\n", + "-------------------------------\n", + "possible diversity score (higher is better): 0.24527839097375523\n" + ] + } + ], + "source": [ + "input_strs = []\n", + "scores = []\n", + "input_strs.append([\n", + " \"Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", + " \"In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", + " \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", + " \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", + " \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", + "])\n", + "# input_strs.append([\"hi there\", \"hi\", \"hello\", \"yo whatup\"])\n", + "\n", + "input_strs.append([\"This is a sample text with lots of repetition.\", \n", + " \"This is a sample text with lots of repetition.\",\n", + " \"This is a sample text with lots of repetition.\"])\n", + "\n", + "responses = []\n", + "for i in range(10):\n", + " res = llm(prompt=\"Tell me a funny joke. Keep it concise.\", model_id=\"openai:gpt-4o\").data.replace(\"\\n\", \"\")\n", + " responses.append(res)\n", + " print(f\"Joke {i}: [{res}]\")\n", + "\n", + "responses = []\n", + "res = llm(prompt=\"Tell me 10 different jokes. make them split with \\'||\\'. Don't say anything else besides the joke. \", model_id=\"openai:gpt-4o\").data.replace(\"\\n\", \"\").split('||')\n", + "responses += res\n", + "\n", + "\n", + "ensemble_diversity(input_strs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 0: [Why don’t skeletons fight each other? They don’t have the guts. ]\n", + "Joke 1: [ Why did the scarecrow win an award? Because he was outstanding in his field. ]\n", + "Joke 2: [ Parallel lines have so much in common. It’s a shame they’ll never meet. ]\n", + "Joke 3: [ I told my wife she was drawing her eyebrows too high. She looked surprised. ]\n", + "Joke 4: [ I threw a boomerang a few years ago. I now live in constant fear. ]\n", + "Joke 5: [ Why don’t scientists trust atoms? Because they make up everything. ]\n", + "Joke 6: [ I told my computer I needed a break, and it gave me a Kit-Kat. ]\n", + "Joke 7: [ Why did the tomato turn red? Because it saw the salad dressing! ]\n", + "Joke 8: [ Did you hear about the cheese factory that exploded in France? There was nothing left but de-brie. ]\n", + "Joke 9: [ What’s orange and sounds like a parrot? A carrot.]\n", + "-----\n", + "Original Size: 727\n", + "Compressed Size: 470\n", + "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.547, metadata={'responses': ['Why don’t skeletons fight each other? They don’t have the guts. ', ' Why did the scarecrow win an award? Because he was outstanding in his field. ', ' Parallel lines have so much in common. It’s a shame they’ll never meet. ', ' I told my wife she was drawing her eyebrows too high. She looked surprised. ', ' I threw a boomerang a few years ago. I now live in constant fear. ', ' Why don’t scientists trust atoms? Because they make up everything. ', ' I told my computer I needed a break, and it gave me a Kit-Kat. ', ' Why did the tomato turn red? Because it saw the salad dressing! ', ' Did you hear about the cheese factory that exploded in France? There was nothing left but de-brie. ', ' What’s orange and sounds like a parrot? A carrot.']})\n", + "1/compression: 0.6464124111182935\n", + "Avg cosine similarity: 0.3749999878695394\n", + "diversity cosine-sim inverse: 0.6250000121304606\n", + "edit-dist score: 0.7371\n", + "-------------------------------\n", + "possible diversity score (higher is better): 0.6695073326061808\n" + ] + } + ], + "source": [ + "responses = []\n", + "res = llm(prompt=\"Tell me 10 different jokes. make them split with \\'||\\'. Don't say anything else besides the joke. \", model_id=\"openai:gpt-4o\").data.replace(\"\\n\", \"\").split('||')\n", + "responses += res\n", + "\n", + "if prompts == 1 and len(responses) > 1:\n", + " for i in range(len(responses)):\n", + " print(f\"Joke {i}: [{responses[i]}]\")\n", + "\n", + "print(\"-----\")\n", + "ensemble_diversity(responses)" + ] + }, + { + "cell_type": "code", + "execution_count": 179, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Story 0: [In a realm where atoms dance on quantum stages, quantum computers emerge. Unlike classical bits that stand as ones or zeroes, their quantum siblings, qubits, perform an intricate ballet of superposition. Entanglement binds qubits in ghostly embrace, whispering answers across realms. A programmer conjures an algorithm, setting the qubits into mesmerizing motion. As quantum gates usher them through a tapestry of parallel universes, qubits explore myriad solutions simultaneously. Amidst the quantum haze, decoherence threatens their delicate existence. Yet, as the dance concludes, a singular truth crystallizes, offering solutions with speed and power beyond classical comprehension.]\n", + "Story 1: [Under the golden sun, a family of bunnies frolicked in the meadow, their soft fur kissed by the gentle breeze. Snowball, with her long ears twitching, led the way through a maze of daisies. Hopscotch, her brother, leaped high, disappearing momentarily among the swaying grass. Little Thumper giggled, his tiny feet thumping in delight. The meadow was alive with laughter as the bunnies chased butterflies, their joy infectious. A curious robin joined, fluttering above, adding a chorus to their playful romp. As the sun dipped below the horizon, a serene hush settled, with the bunnies cuddled close, dreaming of tomorrow's adventures.]\n", + "Story 2: [Once upon a time, Pikachu embarked on a journey through the Enchanted Forest, known for its mystical glow. Eager to explore, Pikachu bounded over sparkling streams and under vibrant canopies. Along the path, it encountered a lost Charmander, its flame dim. Pikachu, determined to help, sparked tiny bursts of electricity to guide Charmander. Together, they made it to the Healing Spring, rejuvenating Charmander and enabling its flame to blaze bright again. Grateful, Charmander joined Pikachu, and the duo continued their adventure, forging a bond of friendship that lit up even the darkest trails ahead. Thus, their adventure began anew.]\n", + "Story 3: [In the heart of Tokyo, nestled between bustling streets, stood Haruto's ramen shop. The little shop was famed for its rich miso ramen, a recipe passed down through generations. Each morning, Haruto would rise before dawn, crafting the broth with meticulous care. Locals and tourists alike lined up eagerly, savoring the aroma wafting through the air. Young Akira, a regular since childhood, adored the warmth of each bowl. One chilly winter, Haruto, now gray-haired, smiled at Akira, handing him the ladle. With a nod, Haruto retired, knowing the flavors and memories had found a new guardian in Akira.]\n", + "-----\n", + "Original Size: 2565\n", + "Compressed Size: 1374\n", + "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.867, metadata={'responses': ['In a realm where atoms dance on quantum stages, quantum computers emerge. Unlike classical bits that stand as ones or zeroes, their quantum siblings, qubits, perform an intricate ballet of superposition. Entanglement binds qubits in ghostly embrace, whispering answers across realms. A programmer conjures an algorithm, setting the qubits into mesmerizing motion. As quantum gates usher them through a tapestry of parallel universes, qubits explore myriad solutions simultaneously. Amidst the quantum haze, decoherence threatens their delicate existence. Yet, as the dance concludes, a singular truth crystallizes, offering solutions with speed and power beyond classical comprehension.', \"Under the golden sun, a family of bunnies frolicked in the meadow, their soft fur kissed by the gentle breeze. Snowball, with her long ears twitching, led the way through a maze of daisies. Hopscotch, her brother, leaped high, disappearing momentarily among the swaying grass. Little Thumper giggled, his tiny feet thumping in delight. The meadow was alive with laughter as the bunnies chased butterflies, their joy infectious. A curious robin joined, fluttering above, adding a chorus to their playful romp. As the sun dipped below the horizon, a serene hush settled, with the bunnies cuddled close, dreaming of tomorrow's adventures.\", 'Once upon a time, Pikachu embarked on a journey through the Enchanted Forest, known for its mystical glow. Eager to explore, Pikachu bounded over sparkling streams and under vibrant canopies. Along the path, it encountered a lost Charmander, its flame dim. Pikachu, determined to help, sparked tiny bursts of electricity to guide Charmander. Together, they made it to the Healing Spring, rejuvenating Charmander and enabling its flame to blaze bright again. Grateful, Charmander joined Pikachu, and the duo continued their adventure, forging a bond of friendship that lit up even the darkest trails ahead. Thus, their adventure began anew.', \"In the heart of Tokyo, nestled between bustling streets, stood Haruto's ramen shop. The little shop was famed for its rich miso ramen, a recipe passed down through generations. Each morning, Haruto would rise before dawn, crafting the broth with meticulous care. Locals and tourists alike lined up eagerly, savoring the aroma wafting through the air. Young Akira, a regular since childhood, adored the warmth of each bowl. One chilly winter, Haruto, now gray-haired, smiled at Akira, handing him the ladle. With a nod, Haruto retired, knowing the flavors and memories had found a new guardian in Akira.\"]})\n", + "1/compression: 0.5356186395286556\n", + "Avg cosine similarity: 0.8809454989311085\n", + "diversity cosine-sim inverse: 0.11905450106889148\n", + "edit-dist score: 0.7669\n", + "-------------------------------\n", + "possible diversity score (higher is better): 0.47384919069532816\n" + ] + } + ], + "source": [ + "responses = []\n", + "res1 = llm(prompt=\"Tell me a story about how quantum computers work. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", + "responses.append(res1)\n", + "res2 = llm(prompt=\"Tell me a story about bunnies frolicking in the grass. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", + "responses.append(res2)\n", + "res3 = llm(prompt=\"Tell me a story about the pokemon pikachu and it's adventures. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", + "responses.append(res3)\n", + "res4 = llm(prompt=\"Tell me a story about a ramen shop. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", + "responses.append(res4)\n", + "\n", + "if prompts == 1 and len(responses) > 1:\n", + " for i in range(len(responses)):\n", + " print(f\"Story {i}: [{responses[i]}]\")\n", + "\n", + "print(\"-----\")\n", + "ensemble_diversity(responses)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "---\n", + "## Improvements TODO\n", + "- Merge all functions\n", + "- fix ensembling\n", + "## Potential other cases to explore\n", + "- work ensembling all \"diversity\" related metrics \n", + " - add more metrics\n", + " - tune added metrics\n", + "- combination of validation/hallucination metric + ensembled diversity metric -> score" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ember_upgrade", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/src/ember/core/utils/embedding_utils.py b/src/ember/core/utils/embedding_utils.py index a02592f1..365035b7 100644 --- a/src/ember/core/utils/embedding_utils.py +++ b/src/ember/core/utils/embedding_utils.py @@ -57,6 +57,28 @@ def embed_text(self, text: str) -> List[float]: return [ord(ch) / 256.0 for ch in text] +class OpenAITextEmbedding3(Protocol): + """Interface for embedding models. + + This protocol defines the minimal interface required to compute a text + embedding. Implementations may use local models, external APIs, or custom + neural networks. + + Methods: + embed_text: Compute the embedding for a given text. + """ + + def embed_text(self, text: str) -> List[float]: + """Computes the embedding vector for the provided text. + + Args: + text (str): The text to be embedded. + + Returns: + List[float]: A list of floats representing the embedding vector. + """ + + ################################################################ # 2) Similarity Metric Interface & Implementations ################################################################ diff --git a/src/ember/core/utils/eval/evaluators.py b/src/ember/core/utils/eval/evaluators.py index a9578d10..66f3dd3a 100644 --- a/src/ember/core/utils/eval/evaluators.py +++ b/src/ember/core/utils/eval/evaluators.py @@ -7,6 +7,9 @@ from .base_evaluator import EvaluationResult, IEvaluator from .extractors import RegexExtractor +from diversity import compression_ratio +import Levenshtein + T_out = TypeVar("T_out") T_truth = TypeVar("T_truth") @@ -202,6 +205,79 @@ def evaluate( # Composite Evaluator Example +class CosineSimilarityScoringEvaluator(IEvaluator[List[str], None]): + """ + Evaluator to test ensemble outputs -> score them (float) + """ + def evaluate( + self, + system_output: List[str], + **kwargs) -> EvaluationResult: + if system_output is None or len(system_output) == 0: + return EvaluationResult(is_correct=False, score=-1) + + # example I was thinking about: + letter_sum = sum(len(response) for response in system_output) + ratio = compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100) + + return EvaluationResult(is_correct=True, + score=ratio, + metadata = {'responses': system_output}) + + +class CompressionRatioDiversityEvaluator(IEvaluator[List[str], None]): + """ + Evaluator to test ensemble outputs -> score them (float) + """ + def evaluate( + self, + system_output: List[str], + **kwargs) -> EvaluationResult: + if system_output is None or len(system_output) == 0: + return EvaluationResult(is_correct=False, score=-1) + + # example I was thinking about: + letter_sum = sum(len(response) for response in system_output) + ratio = compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100) + + return EvaluationResult(is_correct=True, + score=ratio, + metadata = {'responses': system_output}) + + +class EditDistanceScoringEvaluator: + + def evaluate( + self, + system_output: List[str], + **kwargs) -> EvaluationResult: + if system_output is None or len(system_output) == 0: + return EvaluationResult(is_correct=False, score=-1, metadata={}) + + diversity_score = self.compute_distance(system_output) + + return EvaluationResult(is_correct=True, + score=diversity_score, + metadata = {'responses': system_output}) + + def compute_distance(self, outputs: List[str]) -> float: + n = len(outputs) + if n < 2: + return 0.0 + + total_distance = 0 + pairs = 0 + + for i in range(n): + for j in range(i + 1, n): + dist = Levenshtein.distance(outputs[i], outputs[j]) + max_len = max(len(outputs[i]), len(outputs[j])) + normalized_dist = dist / max_len if max_len > 0 else 0 + total_distance += normalized_dist + pairs += 1 + + return total_distance / pairs if pairs > 0 else 0.0 + class PartialRegexEvaluator(ComposedEvaluator[str, str]): """Evaluator that uses a regex extractor followed by an exact match evaluation. diff --git a/src/ember/examples/diversity_testbench.ipynb b/src/ember/examples/diversity_testbench.ipynb new file mode 100644 index 00000000..63b5bdff --- /dev/null +++ b/src/ember/examples/diversity_testbench.ipynb @@ -0,0 +1,2545 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Diversity Testbench\n", + "\n", + "---\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Ember Package Testing (WIP)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setup Dependencies" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 10, +======= + "execution_count": 33, +>>>>>>> feb7b31 (added embedding model) + "metadata": {}, + "outputs": [], + "source": [ + "import logging, sys, os\n", + "from typing import Dict, Any, List\n", + "\n", + "logging.basicConfig(level=logging.ERROR)" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "os.environ[\"OPENAI_API_KEY\"] = ''" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "openai_key = os.getenv(\"OPENAI_API_KEY\")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, +======= + "execution_count": 34, +>>>>>>> feb7b31 (added embedding model) + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ +<<<<<<< HEAD + "/Users/kathleenge/Desktop/NON/ember-v2\n" +======= + "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\n" + ] + } + ], + "source": [ + "openai_key = os.getenv(\"OPENAI_API_KEY\")\n", + "print(openai_key)" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/root/ember/connor/ember-v2\n" +>>>>>>> feb7b31 (added embedding model) + ] + } + ], + "source": [ + "# fixing dependencies if current path is /src/ember/examples/diversity_testbench.ipynb\n", + "target_dir = 'src/ember/examples'\n", + "if os.getcwd()[-18:] == target_dir:\n", + " os.chdir('../../..')\n", + "print(os.getcwd())\n", + "\n", + "project_root = os.path.abspath(os.path.join(os.getcwd(), \"../../..\"))\n", + "if project_root not in sys.path:\n", + " sys.path.insert(0, project_root)" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 14, +======= + "execution_count": 36, +>>>>>>> feb7b31 (added embedding model) + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ +<<<<<<< HEAD + "/Users/kathleenge/desktop/non/ember-v2\r\n" +======= + "/root/ember/connor/ember-v2\n" +>>>>>>> feb7b31 (added embedding model) + ] + } + ], + "source": [ + "!echo $PWD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "NOTE: things below this are to install required dependencies (only do this the venv)" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 15, +======= + "execution_count": 37, +>>>>>>> feb7b31 (added embedding model) + "metadata": {}, + "outputs": [], + "source": [ + "# %pip install -q -e .\n", + "# %pip install -q google-generativeai==0.7.2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Ember Repo Loads (WIP)" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 16, +======= + "execution_count": 38, +>>>>>>> feb7b31 (added embedding model) + "metadata": {}, + "outputs": [], + "source": [ + "# from ember.core.registry.model.model_module.lm import LMModule, LMModuleConfig\n", + "from ember.core.registry.model.config.settings import initialize_ember\n", + "from ember.core.registry.model.base.services.model_service import ModelService\n", + "from ember.core.registry.model.base.schemas.model_info import ModelInfo\n", + "from ember.core.registry.model.base.schemas.cost import ModelCost, RateLimit\n", + "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", + "\n", + "from ember.core.registry.model import load_model, ChatResponse\n", + "from ember.core.registry.model.base.services.model_service import ModelService" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 17, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "ename": "ValidationError", + "evalue": "4 validation errors for EmberSettings\nregistry.models.3.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.4.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.5.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.6.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[17], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m model_registry \u001b[38;5;241m=\u001b[39m initialize_ember()\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(model_registry\u001b[38;5;241m.\u001b[39mlist_models())\n\u001b[1;32m 3\u001b[0m llm \u001b[38;5;241m=\u001b[39m ModelService(registry\u001b[38;5;241m=\u001b[39mmodel_registry)\n", + "File \u001b[0;32m~/Desktop/NON/ember-v2/src/ember/core/registry/model/config/settings.py:252\u001b[0m, in \u001b[0;36minitialize_ember\u001b[0;34m(config_path, auto_register, auto_discover)\u001b[0m\n\u001b[1;32m 249\u001b[0m settings_obj\u001b[38;5;241m.\u001b[39mregistry\u001b[38;5;241m.\u001b[39mauto_register \u001b[38;5;241m=\u001b[39m auto_register\n\u001b[1;32m 250\u001b[0m settings_obj\u001b[38;5;241m.\u001b[39mregistry\u001b[38;5;241m.\u001b[39mauto_discover \u001b[38;5;241m=\u001b[39m auto_discover\n\u001b[0;32m--> 252\u001b[0m registry_instance: ModelRegistry \u001b[38;5;241m=\u001b[39m _initialize_model_registry(settings\u001b[38;5;241m=\u001b[39msettings_obj)\n\u001b[1;32m 253\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m registry_instance\n", + "File \u001b[0;32m~/Desktop/NON/ember-v2/src/ember/core/registry/model/config/settings.py:184\u001b[0m, in \u001b[0;36m_initialize_model_registry\u001b[0;34m(settings)\u001b[0m\n\u001b[1;32m 181\u001b[0m merged_config \u001b[38;5;241m=\u001b[39m resolve_env_vars(data\u001b[38;5;241m=\u001b[39mmerged_config)\n\u001b[1;32m 182\u001b[0m logger\u001b[38;5;241m.\u001b[39mdebug(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFinal merged config keys: \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28mlist\u001b[39m(merged_config\u001b[38;5;241m.\u001b[39mkeys()))\n\u001b[0;32m--> 184\u001b[0m final_settings: EmberSettings \u001b[38;5;241m=\u001b[39m EmberSettings(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmerged_config)\n\u001b[1;32m 185\u001b[0m registry: ModelRegistry \u001b[38;5;241m=\u001b[39m ModelRegistry(logger\u001b[38;5;241m=\u001b[39mlogger)\n\u001b[1;32m 187\u001b[0m discovered_models: Dict[\u001b[38;5;28mstr\u001b[39m, ModelInfo] \u001b[38;5;241m=\u001b[39m {}\n", + "File \u001b[0;32m~/anaconda3/lib/python3.11/site-packages/pydantic_settings/main.py:176\u001b[0m, in \u001b[0;36mBaseSettings.__init__\u001b[0;34m(__pydantic_self__, _case_sensitive, _nested_model_default_partial_update, _env_prefix, _env_file, _env_file_encoding, _env_ignore_empty, _env_nested_delimiter, _env_nested_max_split, _env_parse_none_str, _env_parse_enums, _cli_prog_name, _cli_parse_args, _cli_settings_source, _cli_parse_none_str, _cli_hide_none_type, _cli_avoid_json, _cli_enforce_required, _cli_use_class_docs_for_groups, _cli_exit_on_error, _cli_prefix, _cli_flag_prefix_char, _cli_implicit_flags, _cli_ignore_unknown_args, _cli_kebab_case, _secrets_dir, **values)\u001b[0m\n\u001b[1;32m 147\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\n\u001b[1;32m 148\u001b[0m __pydantic_self__,\n\u001b[1;32m 149\u001b[0m _case_sensitive: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m|\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 174\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mvalues: Any,\n\u001b[1;32m 175\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 176\u001b[0m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__init__\u001b[39m(\n\u001b[1;32m 177\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m__pydantic_self__\u001b[38;5;241m.\u001b[39m_settings_build_values(\n\u001b[1;32m 178\u001b[0m values,\n\u001b[1;32m 179\u001b[0m _case_sensitive\u001b[38;5;241m=\u001b[39m_case_sensitive,\n\u001b[1;32m 180\u001b[0m _nested_model_default_partial_update\u001b[38;5;241m=\u001b[39m_nested_model_default_partial_update,\n\u001b[1;32m 181\u001b[0m _env_prefix\u001b[38;5;241m=\u001b[39m_env_prefix,\n\u001b[1;32m 182\u001b[0m _env_file\u001b[38;5;241m=\u001b[39m_env_file,\n\u001b[1;32m 183\u001b[0m _env_file_encoding\u001b[38;5;241m=\u001b[39m_env_file_encoding,\n\u001b[1;32m 184\u001b[0m _env_ignore_empty\u001b[38;5;241m=\u001b[39m_env_ignore_empty,\n\u001b[1;32m 185\u001b[0m _env_nested_delimiter\u001b[38;5;241m=\u001b[39m_env_nested_delimiter,\n\u001b[1;32m 186\u001b[0m _env_nested_max_split\u001b[38;5;241m=\u001b[39m_env_nested_max_split,\n\u001b[1;32m 187\u001b[0m _env_parse_none_str\u001b[38;5;241m=\u001b[39m_env_parse_none_str,\n\u001b[1;32m 188\u001b[0m _env_parse_enums\u001b[38;5;241m=\u001b[39m_env_parse_enums,\n\u001b[1;32m 189\u001b[0m _cli_prog_name\u001b[38;5;241m=\u001b[39m_cli_prog_name,\n\u001b[1;32m 190\u001b[0m _cli_parse_args\u001b[38;5;241m=\u001b[39m_cli_parse_args,\n\u001b[1;32m 191\u001b[0m _cli_settings_source\u001b[38;5;241m=\u001b[39m_cli_settings_source,\n\u001b[1;32m 192\u001b[0m _cli_parse_none_str\u001b[38;5;241m=\u001b[39m_cli_parse_none_str,\n\u001b[1;32m 193\u001b[0m _cli_hide_none_type\u001b[38;5;241m=\u001b[39m_cli_hide_none_type,\n\u001b[1;32m 194\u001b[0m _cli_avoid_json\u001b[38;5;241m=\u001b[39m_cli_avoid_json,\n\u001b[1;32m 195\u001b[0m _cli_enforce_required\u001b[38;5;241m=\u001b[39m_cli_enforce_required,\n\u001b[1;32m 196\u001b[0m _cli_use_class_docs_for_groups\u001b[38;5;241m=\u001b[39m_cli_use_class_docs_for_groups,\n\u001b[1;32m 197\u001b[0m _cli_exit_on_error\u001b[38;5;241m=\u001b[39m_cli_exit_on_error,\n\u001b[1;32m 198\u001b[0m _cli_prefix\u001b[38;5;241m=\u001b[39m_cli_prefix,\n\u001b[1;32m 199\u001b[0m _cli_flag_prefix_char\u001b[38;5;241m=\u001b[39m_cli_flag_prefix_char,\n\u001b[1;32m 200\u001b[0m _cli_implicit_flags\u001b[38;5;241m=\u001b[39m_cli_implicit_flags,\n\u001b[1;32m 201\u001b[0m _cli_ignore_unknown_args\u001b[38;5;241m=\u001b[39m_cli_ignore_unknown_args,\n\u001b[1;32m 202\u001b[0m _cli_kebab_case\u001b[38;5;241m=\u001b[39m_cli_kebab_case,\n\u001b[1;32m 203\u001b[0m _secrets_dir\u001b[38;5;241m=\u001b[39m_secrets_dir,\n\u001b[1;32m 204\u001b[0m )\n\u001b[1;32m 205\u001b[0m )\n", + "File \u001b[0;32m~/anaconda3/lib/python3.11/site-packages/pydantic/main.py:214\u001b[0m, in \u001b[0;36mBaseModel.__init__\u001b[0;34m(self, **data)\u001b[0m\n\u001b[1;32m 212\u001b[0m \u001b[38;5;66;03m# `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks\u001b[39;00m\n\u001b[1;32m 213\u001b[0m __tracebackhide__ \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[0;32m--> 214\u001b[0m validated_self \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m__pydantic_validator__\u001b[38;5;241m.\u001b[39mvalidate_python(data, self_instance\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n\u001b[1;32m 215\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m validated_self:\n\u001b[1;32m 216\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[1;32m 217\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mA custom validator is returning a value other than `self`.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 218\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mReturning anything other than `self` from a top level model validator isn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt supported when validating via `__init__`.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 219\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mSee the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 220\u001b[0m stacklevel\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m,\n\u001b[1;32m 221\u001b[0m )\n", + "\u001b[0;31mValidationError\u001b[0m: 4 validation errors for EmberSettings\nregistry.models.3.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.4.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.5.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.6.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error" +======= + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/tmp/ipykernel_374013/1570386974.py:1: DeprecationWarning: initialize_ember() is deprecated. Use initialize_registry() from ember.core.registry.model.initialization instead.\n", + " model_registry = initialize_ember()\n", + "2025-03-21 00:23:33,902 [DEBUG] ConfigManager: Loading configuration...\n", + "2025-03-21 00:23:33,915 [DEBUG] ConfigManager: Configuration loaded successfully\n", + "2025-03-21 00:23:33,923 [INFO] ember.core.registry.model.initialization: Execute model discovery (timeout: 30 seconds per provider, running in parallel)\n", + "2025-03-21 00:23:33,939 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", + "2025-03-21 00:23:33,964 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", + "2025-03-21 00:23:34,021 [DEBUG] ember.core.registry.model.base.registry.discovery: OPENAI_API_KEY found, initialized OpenAIDiscovery successfully\n", + "2025-03-21 00:23:34,026 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", + "2025-03-21 00:23:34,031 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", + "2025-03-21 00:23:34,073 [DEBUG] ember.core.registry.model.base.registry.discovery: ANTHROPIC_API_KEY found, initialized AnthropicDiscovery successfully\n", + "2025-03-21 00:23:34,076 [DEBUG] ember.core.registry.model.base.registry.discovery: GOOGLE_API_KEY found, initialized DeepmindDiscovery successfully\n", + "2025-03-21 00:23:34,077 [INFO] ember.core.registry.model.initialization: Initiating model discovery via ModelDiscoveryService\n", + "2025-03-21 00:23:34,094 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Starting Anthropic model fetch via REST API...\n", + "2025-03-21 00:23:34,107 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Calling Anthropic REST API: https://api.anthropic.com/v1/models with timeout=(2,5)\n", + "2025-03-21 00:23:34,218 [DEBUG] openai._base_client: Request options: {'method': 'get', 'url': '/models', 'post_parser': ._parser at 0x7f16ab00f4c0>, 'json_data': None}\n", + "2025-03-21 00:23:34,253 [DEBUG] urllib3.connectionpool: Starting new HTTPS connection (1): api.anthropic.com:443\n", + "2025-03-21 00:23:34,256 [DEBUG] openai._base_client: Sending HTTP Request: GET https://api.openai.com/v1/models\n", + "2025-03-21 00:23:34,272 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=5.0 socket_options=None\n", + "2025-03-21 00:23:34,333 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 00:23:34,335 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=5.0\n", + "2025-03-21 00:23:34,359 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 00:23:34,362 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:23:34,374 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:23:34,377 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:23:34,380 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:23:34,383 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:23:34,499 [DEBUG] urllib3.connectionpool: https://api.anthropic.com:443 \"GET /v1/models HTTP/1.1\" 401 86\n", + "2025-03-21 00:23:34,506 [ERROR] ember.core.registry.model.providers.anthropic.anthropic_discovery: Error fetching Anthropic models via REST API: 401 Client Error: Unauthorized for url: https://api.anthropic.com/v1/models\n", + "2025-03-21 00:23:34,512 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Using fallback models due to API request error\n", + "2025-03-21 00:23:34,544 [INFO] ember.core.registry.model.base.registry.discovery: Provider DeepmindDiscovery completed in 0.43s\n", + "2025-03-21 00:23:34,546 [INFO] ember.core.registry.model.base.registry.discovery: Provider AnthropicDiscovery completed in 0.45s\n", + "2025-03-21 00:23:34,664 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:23:35 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'openai-version', b'2020-10-01'), (b'x-request-id', b'047be343f630a078753ab850368c8104'), (b'openai-processing-ms', b'213'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=lQoQY2KyrWGyGIl5tZm.yBfn5JGOSCA.AqZNb5sDQZ4-1742541815-1.0.1.1-jkeEwlGMhqCzRiPce_S94AqxyEmbQh2B4RQosPoE7.eFMwL5UwmspCv.OEN88cyk98iKiq0wLvcEGKQdTKIjJrKLMq4kGA32abjIo.do_WM; path=/; expires=Fri, 21-Mar-25 07:53:35 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=E2ZuBrVws6lg65OWU8SjS7lE_GkfGfdTWJ17B8epbyU-1742541815014-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb4661860d03d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:23:34,674 [INFO] httpx: HTTP Request: GET https://api.openai.com/v1/models \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:23:34,677 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:23:34,684 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:23:34,689 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:23:34,694 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:23:34,698 [DEBUG] openai._base_client: HTTP Response: GET https://api.openai.com/v1/models \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 07:23:35 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('openai-version', '2020-10-01'), ('x-request-id', '047be343f630a078753ab850368c8104'), ('openai-processing-ms', '213'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=lQoQY2KyrWGyGIl5tZm.yBfn5JGOSCA.AqZNb5sDQZ4-1742541815-1.0.1.1-jkeEwlGMhqCzRiPce_S94AqxyEmbQh2B4RQosPoE7.eFMwL5UwmspCv.OEN88cyk98iKiq0wLvcEGKQdTKIjJrKLMq4kGA32abjIo.do_WM; path=/; expires=Fri, 21-Mar-25 07:53:35 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=E2ZuBrVws6lg65OWU8SjS7lE_GkfGfdTWJ17B8epbyU-1742541815014-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923bb4661860d03d-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:23:34,702 [DEBUG] openai._base_client: request_id: 047be343f630a078753ab850368c8104\n", + "2025-03-21 00:23:34,722 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Fetched 65 models from OpenAI API\n", + "2025-03-21 00:23:34,724 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Filtered to 43 relevant models\n", + "2025-03-21 00:23:34,729 [INFO] ember.core.registry.model.base.registry.discovery: Provider OpenAIDiscovery completed in 0.65s\n", + "2025-03-21 00:23:34,732 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 43 models from OpenAIDiscovery\n", + "2025-03-21 00:23:34,735 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 5 models from AnthropicDiscovery\n", + "2025-03-21 00:23:34,737 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 32 models from DeepmindDiscovery\n", + "2025-03-21 00:23:34,741 [INFO] ember.core.registry.model.base.registry.discovery: Discovered 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 00:23:34,745 [DEBUG] ember.core.registry.model.initialization: Raw discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 00:23:34,752 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-transcribe discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,757 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-transcribe\n", + "2025-03-21 00:23:34,760 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,763 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-12-17\n", + "2025-03-21 00:23:34,764 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-3 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,765 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-3\n", + "2025-03-21 00:23:34,767 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-2 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,770 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-2\n", + "2025-03-21 00:23:34,772 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,775 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-10-01\n", + "2025-03-21 00:23:34,776 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,778 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-10-01\n", + "2025-03-21 00:23:34,780 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,782 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview\n", + "2025-03-21 00:23:34,783 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-large discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,784 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-large\n", + "2025-03-21 00:23:34,785 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,786 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4\n", + "2025-03-21 00:23:34,789 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-05-13 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,791 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-05-13\n", + "2025-03-21 00:23:34,792 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,794 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview\n", + "2025-03-21 00:23:34,797 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,800 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview\n", + "2025-03-21 00:23:34,803 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct-0914 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,807 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct-0914\n", + "2025-03-21 00:23:34,809 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,811 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview\n", + "2025-03-21 00:23:34,812 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-1106 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,814 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-1106\n", + "2025-03-21 00:23:34,815 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,815 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview\n", + "2025-03-21 00:23:34,817 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,818 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo\n", + "2025-03-21 00:23:34,820 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,823 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-12-17\n", + "2025-03-21 00:23:34,825 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,828 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct\n", + "2025-03-21 00:23:34,832 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,836 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo\n", + "2025-03-21 00:23:34,842 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,846 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-preview\n", + "2025-03-21 00:23:34,848 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,852 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview-2025-03-11\n", + "2025-03-21 00:23:34,856 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,859 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview\n", + "2025-03-21 00:23:34,862 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-0125 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,863 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-0125\n", + "2025-03-21 00:23:34,867 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-08-06 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,868 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-08-06\n", + "2025-03-21 00:23:34,870 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-2024-04-09 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,872 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-2024-04-09\n", + "2025-03-21 00:23:34,873 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-16k discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,874 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-16k\n", + "2025-03-21 00:23:34,875 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,876 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o\n", + "2025-03-21 00:23:34,877 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,878 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview-2024-12-17\n", + "2025-03-21 00:23:34,881 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-1106-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,883 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-1106-preview\n", + "2025-03-21 00:23:34,884 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-ada-002 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,885 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-ada-002\n", + "2025-03-21 00:23:34,889 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0613 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,891 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0613\n", + "2025-03-21 00:23:34,894 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,897 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview\n", + "2025-03-21 00:23:34,899 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview-2025-02-27 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,900 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview-2025-02-27\n", + "2025-03-21 00:23:34,901 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,902 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview-2025-03-11\n", + "2025-03-21 00:23:34,903 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-11-20 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,904 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-11-20\n", + "2025-03-21 00:23:34,905 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-2024-07-18 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,905 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-2024-07-18\n", + "2025-03-21 00:23:34,906 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-tts discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,907 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-tts\n", + "2025-03-21 00:23:34,907 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,908 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini\n", + "2025-03-21 00:23:34,909 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0125-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,912 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0125-preview\n", + "2025-03-21 00:23:34,913 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-transcribe discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,915 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-transcribe\n", + "2025-03-21 00:23:34,916 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-small discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,917 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-small\n", + "2025-03-21 00:23:34,917 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,918 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview-2024-12-17\n", + "2025-03-21 00:23:34,919 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-sonnet discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,920 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-sonnet\n", + "2025-03-21 00:23:34,921 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-opus discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,922 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-opus\n", + "2025-03-21 00:23:34,925 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-haiku discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,927 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-haiku\n", + "2025-03-21 00:23:34,928 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.5-sonnet discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,929 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.5-sonnet\n", + "2025-03-21 00:23:34,931 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.7-sonnet discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,932 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.7-sonnet\n", + "2025-03-21 00:23:34,934 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.0-pro-vision-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,935 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.0-pro-vision-latest\n", + "2025-03-21 00:23:34,936 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-pro-vision discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,937 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-pro-vision\n", + "2025-03-21 00:23:34,938 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,940 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-latest\n", + "2025-03-21 00:23:34,940 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,941 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-001\n", + "2025-03-21 00:23:34,942 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-002 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,943 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-002\n", + "2025-03-21 00:23:34,944 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,946 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro\n", + "2025-03-21 00:23:34,947 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,947 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-latest\n", + "2025-03-21 00:23:34,949 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,951 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001\n", + "2025-03-21 00:23:34,952 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001-tuning discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,953 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001-tuning\n", + "2025-03-21 00:23:34,955 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,956 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash\n", + "2025-03-21 00:23:34,957 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-002 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,958 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-002\n", + "2025-03-21 00:23:34,959 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,960 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b\n", + "2025-03-21 00:23:34,961 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,965 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-001\n", + "2025-03-21 00:23:34,966 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,967 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-latest\n", + "2025-03-21 00:23:34,969 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0827 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,970 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0827\n", + "2025-03-21 00:23:34,970 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0924 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,971 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0924\n", + "2025-03-21 00:23:34,973 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,975 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp\n", + "2025-03-21 00:23:34,976 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,977 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash\n", + "2025-03-21 00:23:34,978 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,980 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-001\n", + "2025-03-21 00:23:34,982 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp-image-generation discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,983 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp-image-generation\n", + "2025-03-21 00:23:34,985 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,986 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-001\n", + "2025-03-21 00:23:34,987 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,987 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite\n", + "2025-03-21 00:23:34,989 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview-02-05 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,989 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview-02-05\n", + "2025-03-21 00:23:34,990 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,991 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview\n", + "2025-03-21 00:23:34,993 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,994 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp\n", + "2025-03-21 00:23:34,997 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp-02-05 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:34,998 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp-02-05\n", + "2025-03-21 00:23:35,000 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-exp-1206 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:35,001 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-exp-1206\n", + "2025-03-21 00:23:35,003 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-01-21 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:35,004 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-01-21\n", + "2025-03-21 00:23:35,004 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:35,005 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp\n", + "2025-03-21 00:23:35,006 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-1219 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:35,008 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-1219\n", + "2025-03-21 00:23:35,011 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/learnlm-1.5-pro-experimental discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:35,013 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/learnlm-1.5-pro-experimental\n", + "2025-03-21 00:23:35,014 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemma-3-27b-it discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 00:23:35,015 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemma-3-27b-it\n", + "2025-03-21 00:23:35,017 [DEBUG] ember.core.registry.model.initialization: Merged discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 00:23:35,018 [INFO] ember.core.registry.model.initialization: Registering 80 models from discovery\n", + "2025-03-21 00:23:35,022 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-transcribe (provider: Openai)\n", + "2025-03-21 00:23:35,025 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", + "2025-03-21 00:23:35,027 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", + "2025-03-21 00:23:35,031 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 00:23:35,032 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:23:35,035 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:23:35,037 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-3 (provider: Openai)\n", + "2025-03-21 00:23:35,038 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", + "2025-03-21 00:23:35,039 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", + "2025-03-21 00:23:35,040 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-2 (provider: Openai)\n", + "2025-03-21 00:23:35,041 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", + "2025-03-21 00:23:35,042 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", + "2025-03-21 00:23:35,043 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-10-01 (provider: Openai)\n", + "2025-03-21 00:23:35,044 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", + "2025-03-21 00:23:35,045 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", + "2025-03-21 00:23:35,047 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-10-01 (provider: Openai)\n", + "2025-03-21 00:23:35,048 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", + "2025-03-21 00:23:35,050 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", + "2025-03-21 00:23:35,051 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview (provider: Openai)\n", + "2025-03-21 00:23:35,056 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", + "2025-03-21 00:23:35,057 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", + "2025-03-21 00:23:35,059 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-large (provider: Openai)\n", + "2025-03-21 00:23:35,063 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", + "2025-03-21 00:23:35,064 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", + "2025-03-21 00:23:35,066 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4 (provider: Openai)\n", + "2025-03-21 00:23:35,067 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", + "2025-03-21 00:23:35,068 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", + "2025-03-21 00:23:35,069 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-05-13 (provider: Openai)\n", + "2025-03-21 00:23:35,073 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", + "2025-03-21 00:23:35,075 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", + "2025-03-21 00:23:35,076 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview (provider: Openai)\n", + "2025-03-21 00:23:35,077 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", + "2025-03-21 00:23:35,078 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", + "2025-03-21 00:23:35,079 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview (provider: Openai)\n", + "2025-03-21 00:23:35,080 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", + "2025-03-21 00:23:35,081 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", + "2025-03-21 00:23:35,083 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct-0914 (provider: Openai)\n", + "2025-03-21 00:23:35,083 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", + "2025-03-21 00:23:35,088 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", + "2025-03-21 00:23:35,090 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview (provider: Openai)\n", + "2025-03-21 00:23:35,091 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", + "2025-03-21 00:23:35,092 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", + "2025-03-21 00:23:35,093 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-1106 (provider: Openai)\n", + "2025-03-21 00:23:35,096 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", + "2025-03-21 00:23:35,097 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", + "2025-03-21 00:23:35,098 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview (provider: Openai)\n", + "2025-03-21 00:23:35,098 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", + "2025-03-21 00:23:35,099 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", + "2025-03-21 00:23:35,101 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo (provider: Openai)\n", + "2025-03-21 00:23:35,102 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", + "2025-03-21 00:23:35,104 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", + "2025-03-21 00:23:35,108 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 00:23:35,109 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:23:35,111 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:23:35,114 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct (provider: Openai)\n", + "2025-03-21 00:23:35,115 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", + "2025-03-21 00:23:35,116 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", + "2025-03-21 00:23:35,118 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo (provider: Openai)\n", + "2025-03-21 00:23:35,119 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", + "2025-03-21 00:23:35,120 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", + "2025-03-21 00:23:35,121 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-preview (provider: Openai)\n", + "2025-03-21 00:23:35,122 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", + "2025-03-21 00:23:35,123 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", + "2025-03-21 00:23:35,124 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview-2025-03-11 (provider: Openai)\n", + "2025-03-21 00:23:35,126 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 00:23:35,127 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 00:23:35,128 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview (provider: Openai)\n", + "2025-03-21 00:23:35,130 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", + "2025-03-21 00:23:35,131 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", + "2025-03-21 00:23:35,132 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-0125 (provider: Openai)\n", + "2025-03-21 00:23:35,132 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", + "2025-03-21 00:23:35,133 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", + "2025-03-21 00:23:35,135 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-08-06 (provider: Openai)\n", + "2025-03-21 00:23:35,136 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", + "2025-03-21 00:23:35,136 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", + "2025-03-21 00:23:35,137 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-2024-04-09 (provider: Openai)\n", + "2025-03-21 00:23:35,143 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", + "2025-03-21 00:23:35,144 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", + "2025-03-21 00:23:35,145 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-16k (provider: Openai)\n", + "2025-03-21 00:23:35,147 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", + "2025-03-21 00:23:35,148 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", + "2025-03-21 00:23:35,148 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o (provider: Openai)\n", + "2025-03-21 00:23:35,149 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", + "2025-03-21 00:23:35,150 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", + "2025-03-21 00:23:35,151 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 00:23:35,152 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:23:35,155 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:23:35,157 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-1106-preview (provider: Openai)\n", + "2025-03-21 00:23:35,159 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", + "2025-03-21 00:23:35,160 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", + "2025-03-21 00:23:35,163 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-ada-002 (provider: Openai)\n", + "2025-03-21 00:23:35,168 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", + "2025-03-21 00:23:35,170 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", + "2025-03-21 00:23:35,172 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0613 (provider: Openai)\n", + "2025-03-21 00:23:35,173 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", + "2025-03-21 00:23:35,174 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", + "2025-03-21 00:23:35,174 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview (provider: Openai)\n", + "2025-03-21 00:23:35,176 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", + "2025-03-21 00:23:35,177 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", + "2025-03-21 00:23:35,178 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview-2025-02-27 (provider: Openai)\n", + "2025-03-21 00:23:35,179 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", + "2025-03-21 00:23:35,183 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", + "2025-03-21 00:23:35,184 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview-2025-03-11 (provider: Openai)\n", + "2025-03-21 00:23:35,185 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 00:23:35,187 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 00:23:35,189 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-11-20 (provider: Openai)\n", + "2025-03-21 00:23:35,190 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", + "2025-03-21 00:23:35,192 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", + "2025-03-21 00:23:35,193 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-2024-07-18 (provider: Openai)\n", + "2025-03-21 00:23:35,194 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", + "2025-03-21 00:23:35,196 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", + "2025-03-21 00:23:35,197 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-tts (provider: Openai)\n", + "2025-03-21 00:23:35,200 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", + "2025-03-21 00:23:35,202 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", + "2025-03-21 00:23:35,203 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini (provider: Openai)\n", + "2025-03-21 00:23:35,204 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", + "2025-03-21 00:23:35,205 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", + "2025-03-21 00:23:35,208 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0125-preview (provider: Openai)\n", + "2025-03-21 00:23:35,209 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", + "2025-03-21 00:23:35,210 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", + "2025-03-21 00:23:35,212 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-transcribe (provider: Openai)\n", + "2025-03-21 00:23:35,213 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", + "2025-03-21 00:23:35,215 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", + "2025-03-21 00:23:35,217 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-small (provider: Openai)\n", + "2025-03-21 00:23:35,220 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", + "2025-03-21 00:23:35,222 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", + "2025-03-21 00:23:35,224 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 00:23:35,226 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:23:35,228 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 00:23:35,230 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-sonnet (provider: Anthropic)\n", + "2025-03-21 00:23:35,232 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", + "2025-03-21 00:23:35,233 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", + "2025-03-21 00:23:35,235 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-opus (provider: Anthropic)\n", + "2025-03-21 00:23:35,237 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", + "2025-03-21 00:23:35,239 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", + "2025-03-21 00:23:35,241 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-haiku (provider: Anthropic)\n", + "2025-03-21 00:23:35,242 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", + "2025-03-21 00:23:35,252 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", + "2025-03-21 00:23:35,260 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.5-sonnet (provider: Anthropic)\n", + "2025-03-21 00:23:35,261 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", + "2025-03-21 00:23:35,262 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", + "2025-03-21 00:23:35,263 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.7-sonnet (provider: Anthropic)\n", + "2025-03-21 00:23:35,265 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", + "2025-03-21 00:23:35,266 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", + "2025-03-21 00:23:35,266 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.0-pro-vision-latest (provider: Google)\n", + "2025-03-21 00:23:35,267 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", + "2025-03-21 00:23:35,268 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", + "2025-03-21 00:23:35,269 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-pro-vision (provider: Google)\n", + "2025-03-21 00:23:35,270 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", + "2025-03-21 00:23:35,270 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", + "2025-03-21 00:23:35,271 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-latest (provider: Google)\n", + "2025-03-21 00:23:35,271 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", + "2025-03-21 00:23:35,273 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", + "2025-03-21 00:23:35,275 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-001 (provider: Google)\n", + "2025-03-21 00:23:35,276 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", + "2025-03-21 00:23:35,277 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", + "2025-03-21 00:23:35,278 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-002 (provider: Google)\n", + "2025-03-21 00:23:35,280 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", + "2025-03-21 00:23:35,281 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", + "2025-03-21 00:23:35,283 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro (provider: Google)\n", + "2025-03-21 00:23:35,285 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", + "2025-03-21 00:23:35,285 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", + "2025-03-21 00:23:35,286 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-latest (provider: Google)\n", + "2025-03-21 00:23:35,287 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", + "2025-03-21 00:23:35,288 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", + "2025-03-21 00:23:35,290 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001 (provider: Google)\n", + "2025-03-21 00:23:35,290 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", + "2025-03-21 00:23:35,291 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", + "2025-03-21 00:23:35,292 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001-tuning (provider: Google)\n", + "2025-03-21 00:23:35,292 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", + "2025-03-21 00:23:35,293 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", + "2025-03-21 00:23:35,296 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash (provider: Google)\n", + "2025-03-21 00:23:35,305 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", + "2025-03-21 00:23:35,307 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", + "2025-03-21 00:23:35,308 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-002 (provider: Google)\n", + "2025-03-21 00:23:35,309 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", + "2025-03-21 00:23:35,310 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", + "2025-03-21 00:23:35,312 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b (provider: Google)\n", + "2025-03-21 00:23:35,314 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", + "2025-03-21 00:23:35,315 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", + "2025-03-21 00:23:35,318 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-001 (provider: Google)\n", + "2025-03-21 00:23:35,319 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", + "2025-03-21 00:23:35,321 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", + "2025-03-21 00:23:35,325 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-latest (provider: Google)\n", + "2025-03-21 00:23:35,326 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", + "2025-03-21 00:23:35,328 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", + "2025-03-21 00:23:35,330 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0827 (provider: Google)\n", + "2025-03-21 00:23:35,332 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", + "2025-03-21 00:23:35,335 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", + "2025-03-21 00:23:35,338 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0924 (provider: Google)\n", + "2025-03-21 00:23:35,340 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", + "2025-03-21 00:23:35,341 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", + "2025-03-21 00:23:35,344 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp (provider: Google)\n", + "2025-03-21 00:23:35,352 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", + "2025-03-21 00:23:35,354 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", + "2025-03-21 00:23:35,357 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash (provider: Google)\n", + "2025-03-21 00:23:35,359 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", + "2025-03-21 00:23:35,362 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", + "2025-03-21 00:23:35,365 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-001 (provider: Google)\n", + "2025-03-21 00:23:35,369 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", + "2025-03-21 00:23:35,372 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", + "2025-03-21 00:23:35,374 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp-image-generation (provider: Google)\n", + "2025-03-21 00:23:35,376 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", + "2025-03-21 00:23:35,378 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", + "2025-03-21 00:23:35,380 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-001 (provider: Google)\n", + "2025-03-21 00:23:35,382 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", + "2025-03-21 00:23:35,384 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", + "2025-03-21 00:23:35,386 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite (provider: Google)\n", + "2025-03-21 00:23:35,388 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", + "2025-03-21 00:23:35,390 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", + "2025-03-21 00:23:35,391 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview-02-05 (provider: Google)\n", + "2025-03-21 00:23:35,393 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", + "2025-03-21 00:23:35,399 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", + "2025-03-21 00:23:35,405 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview (provider: Google)\n", + "2025-03-21 00:23:35,409 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", + "2025-03-21 00:23:35,411 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", + "2025-03-21 00:23:35,413 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp (provider: Google)\n", + "2025-03-21 00:23:35,418 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", + "2025-03-21 00:23:35,422 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", + "2025-03-21 00:23:35,424 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp-02-05 (provider: Google)\n", + "2025-03-21 00:23:35,427 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", + "2025-03-21 00:23:35,431 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", + "2025-03-21 00:23:35,434 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-exp-1206 (provider: Google)\n", + "2025-03-21 00:23:35,435 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", + "2025-03-21 00:23:35,437 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", + "2025-03-21 00:23:35,439 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-01-21 (provider: Google)\n", + "2025-03-21 00:23:35,440 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", + "2025-03-21 00:23:35,440 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", + "2025-03-21 00:23:35,442 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp (provider: Google)\n", + "2025-03-21 00:23:35,443 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", + "2025-03-21 00:23:35,444 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", + "2025-03-21 00:23:35,445 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-1219 (provider: Google)\n", + "2025-03-21 00:23:35,446 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", + "2025-03-21 00:23:35,447 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", + "2025-03-21 00:23:35,448 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/learnlm-1.5-pro-experimental (provider: Google)\n", + "2025-03-21 00:23:35,449 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", + "2025-03-21 00:23:35,450 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", + "2025-03-21 00:23:35,451 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemma-3-27b-it (provider: Google)\n", + "2025-03-21 00:23:35,454 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", + "2025-03-21 00:23:35,455 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", + "2025-03-21 00:23:35,456 [INFO] ember.core.registry.model.initialization: Registration summary: 80 new, 0 skipped, 0 failed\n", + "2025-03-21 00:23:35,457 [INFO] ember.core.registry.model.initialization: Successfully discovered and registered 80 new models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 00:23:35,458 [INFO] ember.core.registry.model.initialization: Discovered 80 new models in 1.53s: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13'] and 70 more\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n" +>>>>>>> feb7b31 (added embedding model) + ] + } + ], + "source": [ + "model_registry = initialize_ember()\n", + "print(model_registry.list_models())\n", + "llm = ModelService(registry=model_registry)" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 63, + "metadata": { + "scrolled": true + }, +======= + "execution_count": 8, + "metadata": {}, +>>>>>>> feb7b31 (added embedding model) + "outputs": [ + { + "data": { + "text/plain": [ + "['openai:gpt-4o-mini-transcribe',\n", + " 'openai:gpt-4o-audio-preview-2024-12-17',\n", + " 'openai:dall-e-3',\n", + " 'openai:dall-e-2',\n", + " 'openai:gpt-4o-audio-preview-2024-10-01',\n", + " 'openai:gpt-4o-realtime-preview-2024-10-01',\n", + " 'openai:gpt-4o-audio-preview',\n", + " 'openai:text-embedding-3-large',\n", + " 'openai:gpt-4',\n", + " 'openai:gpt-4o-2024-05-13',\n", + " 'openai:gpt-4o-realtime-preview',\n", + " 'openai:gpt-4o-mini-audio-preview',\n", + " 'openai:gpt-3.5-turbo-instruct-0914',\n", + " 'openai:gpt-4o-mini-search-preview',\n", + " 'openai:gpt-3.5-turbo-1106',\n", + " 'openai:gpt-4o-search-preview',\n", + " 'openai:gpt-4-turbo',\n", + " 'openai:gpt-4o-realtime-preview-2024-12-17',\n", + " 'openai:gpt-3.5-turbo-instruct',\n", + " 'openai:gpt-3.5-turbo',\n", + " 'openai:gpt-4-turbo-preview',\n", + " 'openai:gpt-4o-mini-search-preview-2025-03-11',\n", + " 'openai:gpt-4o-mini-realtime-preview',\n", + " 'openai:gpt-3.5-turbo-0125',\n", + " 'openai:gpt-4o-2024-08-06',\n", + " 'openai:gpt-4-turbo-2024-04-09',\n", + " 'openai:gpt-3.5-turbo-16k',\n", + " 'openai:gpt-4o',\n", + " 'openai:gpt-4o-mini-realtime-preview-2024-12-17',\n", + " 'openai:gpt-4-1106-preview',\n", + " 'openai:text-embedding-ada-002',\n", + " 'openai:gpt-4-0613',\n", + " 'openai:gpt-4.5-preview',\n", + " 'openai:gpt-4.5-preview-2025-02-27',\n", + " 'openai:gpt-4o-search-preview-2025-03-11',\n", + " 'openai:gpt-4o-2024-11-20',\n", + " 'openai:gpt-4o-mini-2024-07-18',\n", + " 'openai:gpt-4o-mini-tts',\n", + " 'openai:gpt-4o-mini',\n", + " 'openai:gpt-4-0125-preview',\n", + " 'openai:gpt-4o-transcribe',\n", + " 'openai:text-embedding-3-small',\n", + " 'openai:gpt-4o-mini-audio-preview-2024-12-17',\n", + " 'anthropic:claude-3-sonnet',\n", + " 'anthropic:claude-3-opus',\n", + " 'anthropic:claude-3-haiku',\n", + " 'anthropic:claude-3.5-sonnet',\n", + " 'anthropic:claude-3.7-sonnet',\n", + " 'google:models/gemini-1.0-pro-vision-latest',\n", + " 'google:models/gemini-pro-vision',\n", + " 'google:models/gemini-1.5-pro-latest',\n", + " 'google:models/gemini-1.5-pro-001',\n", + " 'google:models/gemini-1.5-pro-002',\n", + " 'google:models/gemini-1.5-pro',\n", + " 'google:models/gemini-1.5-flash-latest',\n", + " 'google:models/gemini-1.5-flash-001',\n", + " 'google:models/gemini-1.5-flash-001-tuning',\n", + " 'google:models/gemini-1.5-flash',\n", + " 'google:models/gemini-1.5-flash-002',\n", + " 'google:models/gemini-1.5-flash-8b',\n", + " 'google:models/gemini-1.5-flash-8b-001',\n", + " 'google:models/gemini-1.5-flash-8b-latest',\n", + " 'google:models/gemini-1.5-flash-8b-exp-0827',\n", + " 'google:models/gemini-1.5-flash-8b-exp-0924',\n", + " 'google:models/gemini-2.0-flash-exp',\n", + " 'google:models/gemini-2.0-flash',\n", + " 'google:models/gemini-2.0-flash-001',\n", + " 'google:models/gemini-2.0-flash-exp-image-generation',\n", + " 'google:models/gemini-2.0-flash-lite-001',\n", + " 'google:models/gemini-2.0-flash-lite',\n", + " 'google:models/gemini-2.0-flash-lite-preview-02-05',\n", + " 'google:models/gemini-2.0-flash-lite-preview',\n", + " 'google:models/gemini-2.0-pro-exp',\n", + " 'google:models/gemini-2.0-pro-exp-02-05',\n", + " 'google:models/gemini-exp-1206',\n", + " 'google:models/gemini-2.0-flash-thinking-exp-01-21',\n", + " 'google:models/gemini-2.0-flash-thinking-exp',\n", + " 'google:models/gemini-2.0-flash-thinking-exp-1219',\n", + " 'google:models/learnlm-1.5-pro-experimental',\n", + " 'google:models/gemma-3-27b-it']" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model_registry.list_models()" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "model_ids: List[str] = [\n", + " \"openai:o1\",\n", + " \"openai:gpt-4o\",\n", + " \"openai:gpt-4o-mini\",\n", + " # \"anthropic:claude-3.5-sonnet\", # API key not working\n", + " # \"invalid:model\", # Expected to trigger an error.\n", + " # \"google:model/gemini-1.5-pro\", # need to fix model alignment\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:18:22,993 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", + "2025-03-21 00:18:22,994 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o' using provider class 'OpenAIModel'.\n", + "2025-03-21 00:18:22,995 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o\n", + "2025-03-21 00:18:22,997 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:18:23,001 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", + "2025-03-21 00:18:23,005 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:18:23,032 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:18:23,033 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:18:23,035 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "➡️ Testing model: openai:o1\n", + "❌ Error with model openai:o1: Model 'openai:o1' not found. Available models:\n", + "- openai:gpt-4o-mini-transcribe\n", + "- openai:gpt-4o-audio-preview-2024-12-17\n", + "- openai:dall-e-3\n", + "- openai:dall-e-2\n", + "- openai:gpt-4o-audio-preview-2024-10-01\n", + "- openai:gpt-4o-realtime-preview-2024-10-01\n", + "- openai:gpt-4o-audio-preview\n", + "- openai:text-embedding-3-large\n", + "- openai:gpt-4\n", + "- openai:gpt-4o-2024-05-13\n", + "- openai:gpt-4o-realtime-preview\n", + "- openai:gpt-4o-mini-audio-preview\n", + "- openai:gpt-3.5-turbo-instruct-0914\n", + "- openai:gpt-4o-mini-search-preview\n", + "- openai:gpt-3.5-turbo-1106\n", + "- openai:gpt-4o-search-preview\n", + "- openai:gpt-4-turbo\n", + "- openai:gpt-4o-realtime-preview-2024-12-17\n", + "- openai:gpt-3.5-turbo-instruct\n", + "- openai:gpt-3.5-turbo\n", + "- openai:gpt-4-turbo-preview\n", + "- openai:gpt-4o-mini-search-preview-2025-03-11\n", + "- openai:gpt-4o-mini-realtime-preview\n", + "- openai:gpt-3.5-turbo-0125\n", + "- openai:gpt-4o-2024-08-06\n", + "- openai:gpt-4-turbo-2024-04-09\n", + "- openai:gpt-3.5-turbo-16k\n", + "- openai:gpt-4o\n", + "- openai:gpt-4o-mini-realtime-preview-2024-12-17\n", + "- openai:gpt-4-1106-preview\n", + "- openai:text-embedding-ada-002\n", + "- openai:gpt-4-0613\n", + "- openai:gpt-4.5-preview\n", + "- openai:gpt-4.5-preview-2025-02-27\n", + "- openai:gpt-4o-search-preview-2025-03-11\n", + "- openai:gpt-4o-2024-11-20\n", + "- openai:gpt-4o-mini-2024-07-18\n", + "- openai:gpt-4o-mini-tts\n", + "- openai:gpt-4o-mini\n", + "- openai:gpt-4-0125-preview\n", + "- openai:gpt-4o-transcribe\n", + "- openai:text-embedding-3-small\n", + "- openai:gpt-4o-mini-audio-preview-2024-12-17\n", + "- anthropic:claude-3-sonnet\n", + "- anthropic:claude-3-opus\n", + "- anthropic:claude-3-haiku\n", + "- anthropic:claude-3.5-sonnet\n", + "- anthropic:claude-3.7-sonnet\n", + "- google:models/gemini-1.0-pro-vision-latest\n", + "- google:models/gemini-pro-vision\n", + "- google:models/gemini-1.5-pro-latest\n", + "- google:models/gemini-1.5-pro-001\n", + "- google:models/gemini-1.5-pro-002\n", + "- google:models/gemini-1.5-pro\n", + "- google:models/gemini-1.5-flash-latest\n", + "- google:models/gemini-1.5-flash-001\n", + "- google:models/gemini-1.5-flash-001-tuning\n", + "- google:models/gemini-1.5-flash\n", + "- google:models/gemini-1.5-flash-002\n", + "- google:models/gemini-1.5-flash-8b\n", + "- google:models/gemini-1.5-flash-8b-001\n", + "- google:models/gemini-1.5-flash-8b-latest\n", + "- google:models/gemini-1.5-flash-8b-exp-0827\n", + "- google:models/gemini-1.5-flash-8b-exp-0924\n", + "- google:models/gemini-2.0-flash-exp\n", + "- google:models/gemini-2.0-flash\n", + "- google:models/gemini-2.0-flash-001\n", + "- google:models/gemini-2.0-flash-exp-image-generation\n", + "- google:models/gemini-2.0-flash-lite-001\n", + "- google:models/gemini-2.0-flash-lite\n", + "- google:models/gemini-2.0-flash-lite-preview-02-05\n", + "- google:models/gemini-2.0-flash-lite-preview\n", + "- google:models/gemini-2.0-pro-exp\n", + "- google:models/gemini-2.0-pro-exp-02-05\n", + "- google:models/gemini-exp-1206\n", + "- google:models/gemini-2.0-flash-thinking-exp-01-21\n", + "- google:models/gemini-2.0-flash-thinking-exp\n", + "- google:models/gemini-2.0-flash-thinking-exp-1219\n", + "- google:models/learnlm-1.5-pro-experimental\n", + "- google:models/gemma-3-27b-it\n", + "➡️ Testing model: openai:gpt-4o\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:18:23,052 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 00:18:23,053 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", + "2025-03-21 00:18:23,068 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 00:18:23,069 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:18:23,071 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:18:23,072 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:18:23,075 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:18:23,078 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:18:24,532 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:18:24 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1350'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_4484c1b7b2d43adb83ccc149b107da95'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=6lh2Yp5h0FaYvXxpQQAxO9Jt6HDCdmUeuY4.kM2i6ps-1742541504-1.0.1.1-_IHIr..1OAWdoybv_Qs3tz4oLMWDWudghLRy7.RfguO5RHXiKnjZ_j3p3t6MOuUfyuRPgEE7hksYaVr_aZjLjYeXWFZh8PG6vgZ3yCPlnOk; path=/; expires=Fri, 21-Mar-25 07:48:24 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=DanRUEufXKZ7K8DxP_7kPrhHNdJOhS0UH.jyo2WiVto-1742541504892-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923baccc7a771566-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:18:24,535 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:18:24,536 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:18:24,594 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:18:24,596 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:18:24,598 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:18:24,601 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 07:18:24 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('access-control-expose-headers', 'X-Request-ID'), ('openai-organization', 'user-iqhmndueuqg2ljzblqkr2tgh'), ('openai-processing-ms', '1350'), ('openai-version', '2020-10-01'), ('x-ratelimit-limit-requests', '50000'), ('x-ratelimit-limit-tokens', '150000000'), ('x-ratelimit-remaining-requests', '49999'), ('x-ratelimit-remaining-tokens', '149999987'), ('x-ratelimit-reset-requests', '1ms'), ('x-ratelimit-reset-tokens', '0s'), ('x-request-id', 'req_4484c1b7b2d43adb83ccc149b107da95'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=6lh2Yp5h0FaYvXxpQQAxO9Jt6HDCdmUeuY4.kM2i6ps-1742541504-1.0.1.1-_IHIr..1OAWdoybv_Qs3tz4oLMWDWudghLRy7.RfguO5RHXiKnjZ_j3p3t6MOuUfyuRPgEE7hksYaVr_aZjLjYeXWFZh8PG6vgZ3yCPlnOk; path=/; expires=Fri, 21-Mar-25 07:48:24 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=DanRUEufXKZ7K8DxP_7kPrhHNdJOhS0UH.jyo2WiVto-1742541504892-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923baccc7a771566-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:18:24,604 [DEBUG] openai._base_client: request_id: req_4484c1b7b2d43adb83ccc149b107da95\n", + "2025-03-21 00:18:24,615 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:18:24,624 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:18:24,631 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:18:24,637 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:18:24,641 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:18:24,646 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:18:24,654 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:18:24,660 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🛎️ Service response from openai:gpt-4o:\n", + "Quantum computing leverages quantum mechanics principles, using qubits that exist in multiple states simultaneously. This allows for immense parallel processing power, enabling computations beyond classical computers' capabilities. Entanglement and superposition enhance efficiency, promising breakthroughs in cryptography, optimization, and complex problem-solving. It's transformative, yet still largely experimental.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:18:24,922 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:18:25 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'227'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999989'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_a4d75e258ae593c95536f95bfb4b46d8'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bacd65ade1566-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:18:24,924 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:18:24,925 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:18:24,927 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:18:24,928 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:18:24,929 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:18:24,930 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:18:25 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '227', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999989', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_a4d75e258ae593c95536f95bfb4b46d8', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bacd65ade1566-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:18:24,931 [DEBUG] openai._base_client: request_id: req_a4d75e258ae593c95536f95bfb4b46d8\n", + "2025-03-21 00:18:24,932 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", + "2025-03-21 00:18:24,933 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o-mini' using provider class 'OpenAIModel'.\n", + "2025-03-21 00:18:24,933 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o-mini\n", + "2025-03-21 00:18:24,934 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:18:24,938 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:18:24,939 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:18:24,940 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:18:24,941 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:18:24,943 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:18:24,944 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:18:24,945 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🎯 Direct response from openai:gpt-4o:\n", + "The capital of France is Paris.\n", + "\n", + "➡️ Testing model: openai:gpt-4o-mini\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:18:26,197 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:18:26 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1201'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999988'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_5e2501f946b9db85f3fe2255e49f7894'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bacd83c6f1566-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:18:26,201 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:18:26,204 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:18:26,210 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:18:26,213 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:18:26,215 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:18:26,218 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:18:26 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '1201', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999988', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_5e2501f946b9db85f3fe2255e49f7894', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bacd83c6f1566-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:18:26,221 [DEBUG] openai._base_client: request_id: req_5e2501f946b9db85f3fe2255e49f7894\n", + "2025-03-21 00:18:26,225 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:18:26,235 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:18:26,238 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:18:26,240 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:18:26,245 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:18:26,247 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:18:26,251 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:18:26,253 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🛎️ Service response from openai:gpt-4o-mini:\n", + "Quantum computing harnesses the principles of quantum mechanics to process information. Unlike classical bits, quantum bits (qubits) can exist in multiple states simultaneously, enabling parallel computations. This potential for massive parallelism allows quantum computers to solve complex problems, such as optimization and cryptography, much faster than traditional computers can.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:18:26,743 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:18:27 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'450'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_7c748282fa721b7ab5121245acddd69d'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'CF-Cache-Status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bace05afe1566-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:18:26,745 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:18:26,746 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:18:26,752 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:18:26,753 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:18:26,755 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:18:26,756 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:18:27 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '450', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_7c748282fa721b7ab5121245acddd69d', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bace05afe1566-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:18:26,757 [DEBUG] openai._base_client: request_id: req_7c748282fa721b7ab5121245acddd69d\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🎯 Direct response from openai:gpt-4o-mini:\n", + "The capital of France is Paris.\n", + "\n" + ] + } + ], + "source": [ + "for model_id in model_ids:\n", + " try:\n", + " print(f\"➡️ Testing model: {model_id}\")\n", + "\n", + " # Two usage styles are demonstrated below:\n", + " # 1. Service-based invocation: Recommended for automatic usage tracking.\n", + " service_response: ChatResponse = llm.invoke_model(\n", + " model_id=model_id,\n", + " prompt=\"Explain quantum computing in 50 words\",\n", + " )\n", + " print(f\"🛎️ Service response from {model_id}:\\n{service_response.data}\\n\")\n", + "\n", + " # 2. Direct model instance usage: Useful for more granular or PyTorch-like workflows.\n", + " model = load_model(model_id=model_id, registry=model_registry)\n", + " direct_response: ChatResponse = model(\n", + " prompt=\"What's the capital of France?\"\n", + " )\n", + " print(f\"🎯 Direct response from {model_id}:\\n{direct_response.data}\\n\")\n", + "\n", + " except Exception as error:\n", + " print(f\"❌ Error with model {model_id}: {str(error)}\")\n", + " continue" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "# Register an OpenAI GPT-4o model\n", + "# openai_info = ModelInfo(\n", + "# model_id=\"openai:gpt-4o\",\n", + "# model_name=\"gpt-4o\",\n", + "# cost=ModelCost(input_cost_per_thousand=0.03, output_cost_per_thousand=0.06),\n", + "# rate_limit=RateLimit(tokens_per_minute=80000, requests_per_minute=5000),\n", + "# provider=ProviderInfo(name=\"OpenAI\", default_api_key=openai_key),\n", + "# api_key=openai_key,\n", + "# )\n", + "# model_registry.register_model(openai_info)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:19:41,794 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:19:41,802 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello!'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:19:41,807 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:19:41,811 [DEBUG] httpcore.connection: close.started\n", + "2025-03-21 00:19:41,813 [DEBUG] httpcore.connection: close.complete\n", + "2025-03-21 00:19:41,815 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", + "2025-03-21 00:19:41,885 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 00:19:41,886 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", + "2025-03-21 00:19:41,902 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 00:19:41,904 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:19:41,906 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:19:41,907 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:19:41,909 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:19:41,910 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:19:42,443 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:19:42 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'452'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999996'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_43be483befeb53bf9ac56f8f100d5aae'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923baeb93b086459-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:19:42,444 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:19:42,445 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:19:42,448 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:19:42,448 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:19:42,449 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:19:42,451 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:19:42 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '452', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999996', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_43be483befeb53bf9ac56f8f100d5aae', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923baeb93b086459-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:19:42,451 [DEBUG] openai._base_client: request_id: req_43be483befeb53bf9ac56f8f100d5aae\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello! How can I assist you today?\n" + ] + } + ], + "source": [ + "response = llm(prompt=\"Hello!\", model_id=\"openai:gpt-4o\")\n", + "print(response.data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "---\n", + "\n", + "## Neural Similarity Scoring - Cosine Similarity (WIP)\n", + "\n", + "- from `src/ember/core/utils/embedding_utils.py`\n", + "- from jason\n", + "- need to merge" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q openai" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 7, +======= + "execution_count": 50, +>>>>>>> feb7b31 (added embedding model) + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from abc import ABC, abstractmethod\n", + "from typing import List, Protocol\n", + "import math\n", + "\n", + "import openai\n", + "import os\n", + "\n", + "\n", + "################################################################\n", + "# 1) Embedding Model Interfaces & Implementations\n", + "################################################################\n", + "\n", + "\n", + "class EmbeddingModel(Protocol):\n", + " \"\"\"Interface for embedding models.\n", + "\n", + " This protocol defines the minimal interface required to compute a text\n", + " embedding. Implementations may use local models, external APIs, or custom\n", + " neural networks.\n", + "\n", + " Methods:\n", + " embed_text: Compute the embedding for a given text.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Computes the embedding vector for the provided text.\n", + "\n", + " Args:\n", + " text (str): The text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding vector.\n", + " \"\"\"\n", + " ...\n", + "\n", + "class Text_Embedding_3_EmbeddingModel(EmbeddingModel):\n", + " \"\"\"Interface for embedding models.\n", + "\n", + " This protocol defines the minimal interface required to compute a text\n", + " embedding. Implementations may use local models, external APIs, or custom\n", + " neural networks.\n", + "\n", + " Methods:\n", + " embed_text: Compute the embedding for a given text.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Computes the embedding vector for the provided text.\n", + "\n", + " Args:\n", + " text (str): The text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding vector.\n", + " \"\"\"\n", + " response = llm(model_id=\"openai:text-embedding-3-small\", prompt=text)\n", + "\n", + " # response = openai.Embedding.create(\n", + " # model=\"text-embedding-3\",\n", + " # input=text\n", + " # )\n", + " return response.data\n", + "\n", + "\n", + "class MockEmbeddingModel:\n", + " \"\"\"Mock implementation of an embedding model using naive ASCII encoding.\n", + "\n", + " This simple model converts each character in the text to a normalized ASCII\n", + " value. It is intended solely for demonstration and testing purposes.\n", + "\n", + " Methods:\n", + " embed_text: Converts text to a sequence of normalized ASCII values.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Embeds text by converting each character to its normalized ASCII code.\n", + "\n", + " Args:\n", + " text (str): The input text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding. Returns an\n", + " empty list if the text is empty.\n", + " \"\"\"\n", + " if not text:\n", + " return []\n", + " return [ord(ch) / 256.0 for ch in text]\n", + "\n", + "\n", + "################################################################\n", + "# 2) Similarity Metric Interface & Implementations\n", + "################################################################\n", + "\n", + "\n", + "class SimilarityMetric(ABC):\n", + " \"\"\"Abstract base class for computing similarity between embedding vectors.\n", + "\n", + " Subclasses must implement the similarity method to calculate a similarity\n", + " score between two vectors.\n", + " \"\"\"\n", + "\n", + " @abstractmethod\n", + " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", + " \"\"\"Calculates the similarity between two embedding vectors.\n", + "\n", + " Args:\n", + " vec_a (List[float]): The first embedding vector.\n", + " vec_b (List[float]): The second embedding vector.\n", + "\n", + " Returns:\n", + " float: The similarity score, typically in the range [0, 1] or [-1, 1].\n", + " \"\"\"\n", + " ...\n", + "\n", + "\n", + "class CosineSimilarity(SimilarityMetric):\n", + " \"\"\"Implementation of cosine similarity for embedding vectors.\n", + "\n", + " The cosine similarity is defined as:\n", + " similarity(a, b) = (a · b) / (||a|| * ||b||)\n", + "\n", + " Returns 0.0 if either vector is empty or if any vector's norm is zero.\n", + " \"\"\"\n", + "\n", + " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", + " \"\"\"Computes cosine similarity between two embedding vectors.\n", + "\n", + " Args:\n", + " vec_a (List[float]): The first embedding vector.\n", + " vec_b (List[float]): The second embedding vector.\n", + "\n", + " Returns:\n", + " float: The cosine similarity score.\n", + " \"\"\"\n", + " if not vec_a or not vec_b:\n", + " return 0.0\n", + "\n", + " dot_product: float = sum(a * b for a, b in zip(vec_a, vec_b))\n", + " norm_a: float = math.sqrt(sum(a * a for a in vec_a))\n", + " norm_b: float = math.sqrt(sum(b * b for b in vec_b))\n", + " if norm_a == 0 or norm_b == 0:\n", + " return 0.0\n", + "\n", + " return dot_product / (norm_a * norm_b)\n", + "\n", + "\n", + "################################################################\n", + "# 3) High-Level Utility Function\n", + "################################################################\n", + "\n", + "\n", + "def calculate_text_similarity(\n", + " text1: str, text2: str, model: EmbeddingModel, metric: SimilarityMetric\n", + ") -> float:\n", + " \"\"\"Calculates text similarity using an embedding model and a similarity metric.\n", + "\n", + " This function generates embeddings for the provided texts and then computes a\n", + " similarity score using the given similarity metric.\n", + "\n", + " Args:\n", + " text1 (str): The first text string.\n", + " text2 (str): The second text string.\n", + " model (EmbeddingModel): An instance conforming to the embedding model interface.\n", + " metric (SimilarityMetric): An instance implementing a similarity metric.\n", + "\n", + " Returns:\n", + " float: The computed similarity score.\n", + " \"\"\"\n", + " embedding1: List[float] = model.embed_text(text=text1)\n", + " embedding2: List[float] = model.embed_text(text=text2)\n", + " return metric.similarity(vec_a=embedding1, vec_b=embedding2)" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 8, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'mock_model' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[8], line 8\u001b[0m\n\u001b[1;32m 4\u001b[0m text_a: \u001b[38;5;28mstr\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHello world!\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 5\u001b[0m text_b: \u001b[38;5;28mstr\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHello, world??\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 7\u001b[0m score: \u001b[38;5;28mfloat\u001b[39m \u001b[38;5;241m=\u001b[39m calculate_text_similarity(\n\u001b[0;32m----> 8\u001b[0m text1\u001b[38;5;241m=\u001b[39mtext_a, text2\u001b[38;5;241m=\u001b[39mtext_b, model\u001b[38;5;241m=\u001b[39mmock_model, metric\u001b[38;5;241m=\u001b[39mcosine\n\u001b[1;32m 9\u001b[0m )\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSimilarity between \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_a\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m and \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_b\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mscore\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n", + "\u001b[0;31mNameError\u001b[0m: name 'mock_model' is not defined" +======= + "execution_count": 51, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:37:15,471 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", + "2025-03-21 00:37:15,475 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:text-embedding-3-large' using provider class 'OpenAIModel'.\n", + "2025-03-21 00:37:15,482 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:text-embedding-3-large\n", + "2025-03-21 00:37:15,528 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:37:15,661 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello world!'}], 'model': 'text-embedding-3-large', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:37:15,695 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:37:15,708 [DEBUG] httpcore.connection: close.started\n", + "2025-03-21 00:37:15,744 [DEBUG] httpcore.connection: close.complete\n", + "2025-03-21 00:37:15,752 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", + "2025-03-21 00:37:15,775 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 00:37:15,779 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", + "2025-03-21 00:37:15,866 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 00:37:15,870 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:37:15,876 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:37:15,877 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:37:15,878 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:37:15,879 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:37:15,920 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 403, b'Forbidden', [(b'Date', b'Fri, 21 Mar 2025 07:37:16 GMT'), (b'Content-Type', b'application/json; charset=utf-8'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'vary', b'Origin'), (b'x-request-id', b'req_536398e43921f797ac509114aae14cae'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=rQJfdoJ54KiOHVXBfvUG99o9mGLvvP81mJe39yApwnM-1742542636-1.0.1.1-t6_oW0vtdbrhY.lVxM0S223ktjIO_SQ4ohXzxKRtCabBNZWZq9TEun6DIfyIJAlK77DPrCUMENp6Wkwrxd67RJJmb35J0Piu0S8e7F2TFoE; path=/; expires=Fri, 21-Mar-25 08:07:16 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bc87478aaeb35-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:37:15,936 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 403 Forbidden\"\n", + "2025-03-21 00:37:15,943 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:37:15,948 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:37:15,952 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:37:15,955 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:37:15,958 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"403 Forbidden\" Headers({'date': 'Fri, 21 Mar 2025 07:37:16 GMT', 'content-type': 'application/json; charset=utf-8', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'vary': 'Origin', 'x-request-id': 'req_536398e43921f797ac509114aae14cae', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'set-cookie': '__cf_bm=rQJfdoJ54KiOHVXBfvUG99o9mGLvvP81mJe39yApwnM-1742542636-1.0.1.1-t6_oW0vtdbrhY.lVxM0S223ktjIO_SQ4ohXzxKRtCabBNZWZq9TEun6DIfyIJAlK77DPrCUMENp6Wkwrxd67RJJmb35J0Piu0S8e7F2TFoE; path=/; expires=Fri, 21-Mar-25 08:07:16 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bc87478aaeb35-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:37:15,962 [DEBUG] openai._base_client: request_id: req_536398e43921f797ac509114aae14cae\n", + "2025-03-21 00:37:15,972 [DEBUG] openai._base_client: Encountered httpx.HTTPStatusError\n", + "Traceback (most recent call last):\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1040, in _request\n", + " response.raise_for_status()\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/httpx/_models.py\", line 763, in raise_for_status\n", + " raise HTTPStatusError(message, request=request, response=self)\n", + "httpx.HTTPStatusError: Client error '403 Forbidden' for url 'https://api.openai.com/v1/chat/completions'\n", + "For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/403\n", + "2025-03-21 00:37:16,136 [DEBUG] openai._base_client: Not retrying\n", + "2025-03-21 00:37:16,137 [DEBUG] openai._base_client: Re-raising status error\n", + "2025-03-21 00:37:16,151 [ERROR] ember.core.registry.model.providers.openai.openai_provider: Unexpected error in OpenAIModel.forward()\n", + "Traceback (most recent call last):\n", + " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 389, in forward\n", + " response: Any = self.client.chat.completions.create(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 275, in wrapper\n", + " return func(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 829, in create\n", + " return self._post(\n", + " ^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1280, in post\n", + " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 957, in request\n", + " return self._request(\n", + " ^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1061, in _request\n", + " raise self._make_status_error_from_response(err.response) from None\n", + "openai.PermissionDeniedError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", + "2025-03-21 00:37:17,168 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:37:17,172 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello world!'}], 'model': 'text-embedding-3-large', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:37:17,174 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:37:17,175 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:37:17,176 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:37:17,178 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:37:17,182 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:37:17,184 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:37:17,215 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 403, b'Forbidden', [(b'Date', b'Fri, 21 Mar 2025 07:37:17 GMT'), (b'Content-Type', b'application/json; charset=utf-8'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'vary', b'Origin'), (b'x-request-id', b'req_b1c4e773cbe460c0c8d22a74e4869095'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bc87c9c9beb35-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:37:17,219 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 403 Forbidden\"\n", + "2025-03-21 00:37:17,222 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:37:17,227 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:37:17,231 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:37:17,235 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:37:17,242 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"403 Forbidden\" Headers({'date': 'Fri, 21 Mar 2025 07:37:17 GMT', 'content-type': 'application/json; charset=utf-8', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'vary': 'Origin', 'x-request-id': 'req_b1c4e773cbe460c0c8d22a74e4869095', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bc87c9c9beb35-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:37:17,245 [DEBUG] openai._base_client: request_id: req_b1c4e773cbe460c0c8d22a74e4869095\n", + "2025-03-21 00:37:17,248 [DEBUG] openai._base_client: Encountered httpx.HTTPStatusError\n", + "Traceback (most recent call last):\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1040, in _request\n", + " response.raise_for_status()\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/httpx/_models.py\", line 763, in raise_for_status\n", + " raise HTTPStatusError(message, request=request, response=self)\n", + "httpx.HTTPStatusError: Client error '403 Forbidden' for url 'https://api.openai.com/v1/chat/completions'\n", + "For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/403\n", + "2025-03-21 00:37:17,253 [DEBUG] openai._base_client: Not retrying\n", + "2025-03-21 00:37:17,258 [DEBUG] openai._base_client: Re-raising status error\n", + "2025-03-21 00:37:17,261 [ERROR] ember.core.registry.model.providers.openai.openai_provider: Unexpected error in OpenAIModel.forward()\n", + "Traceback (most recent call last):\n", + " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 389, in forward\n", + " response: Any = self.client.chat.completions.create(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 275, in wrapper\n", + " return func(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 829, in create\n", + " return self._post(\n", + " ^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1280, in post\n", + " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 957, in request\n", + " return self._request(\n", + " ^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1061, in _request\n", + " raise self._make_status_error_from_response(err.response) from None\n", + "openai.PermissionDeniedError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", + "2025-03-21 00:37:19,267 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:37:19,277 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello world!'}], 'model': 'text-embedding-3-large', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:37:19,281 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:37:19,284 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:37:19,288 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:37:19,290 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:37:19,295 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:37:19,298 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:37:19,332 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 403, b'Forbidden', [(b'Date', b'Fri, 21 Mar 2025 07:37:19 GMT'), (b'Content-Type', b'application/json; charset=utf-8'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'vary', b'Origin'), (b'x-request-id', b'req_e3547707ff5d4f5809990164a72b6a49'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bc889cd03eb35-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:37:19,334 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 403 Forbidden\"\n", + "2025-03-21 00:37:19,339 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:37:19,346 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:37:19,349 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:37:19,351 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:37:19,354 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"403 Forbidden\" Headers({'date': 'Fri, 21 Mar 2025 07:37:19 GMT', 'content-type': 'application/json; charset=utf-8', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'vary': 'Origin', 'x-request-id': 'req_e3547707ff5d4f5809990164a72b6a49', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bc889cd03eb35-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:37:19,355 [DEBUG] openai._base_client: request_id: req_e3547707ff5d4f5809990164a72b6a49\n", + "2025-03-21 00:37:19,362 [DEBUG] openai._base_client: Encountered httpx.HTTPStatusError\n", + "Traceback (most recent call last):\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1040, in _request\n", + " response.raise_for_status()\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/httpx/_models.py\", line 763, in raise_for_status\n", + " raise HTTPStatusError(message, request=request, response=self)\n", + "httpx.HTTPStatusError: Client error '403 Forbidden' for url 'https://api.openai.com/v1/chat/completions'\n", + "For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/403\n", + "2025-03-21 00:37:19,368 [DEBUG] openai._base_client: Not retrying\n", + "2025-03-21 00:37:19,374 [DEBUG] openai._base_client: Re-raising status error\n", + "2025-03-21 00:37:19,377 [ERROR] ember.core.registry.model.providers.openai.openai_provider: Unexpected error in OpenAIModel.forward()\n", + "Traceback (most recent call last):\n", + " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 389, in forward\n", + " response: Any = self.client.chat.completions.create(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 275, in wrapper\n", + " return func(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 829, in create\n", + " return self._post(\n", + " ^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1280, in post\n", + " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 957, in request\n", + " return self._request(\n", + " ^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1061, in _request\n", + " raise self._make_status_error_from_response(err.response) from None\n", + "openai.PermissionDeniedError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", + "2025-03-21 00:37:19,380 [ERROR] ModelService: Error invoking model 'openai:text-embedding-3-large'.\n", + "Traceback (most recent call last):\n", + " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 389, in forward\n", + " response: Any = self.client.chat.completions.create(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 275, in wrapper\n", + " return func(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 829, in create\n", + " return self._post(\n", + " ^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1280, in post\n", + " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 957, in request\n", + " return self._request(\n", + " ^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1061, in _request\n", + " raise self._make_status_error_from_response(err.response) from None\n", + "openai.PermissionDeniedError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", + "\n", + "The above exception was the direct cause of the following exception:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/root/ember/jared/ember/src/ember/core/registry/model/base/services/model_service.py\", line 106, in _invoke\n", + " response = model(prompt=prompt, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/base_provider.py\", line 182, in __call__\n", + " return self.forward(request=chat_request)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 336, in wrapped_f\n", + " return copy(f, *args, **kw)\n", + " ^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 475, in __call__\n", + " do = self.iter(retry_state=retry_state)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 376, in iter\n", + " result = action(retry_state)\n", + " ^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 418, in exc_check\n", + " raise retry_exc.reraise()\n", + " ^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 185, in reraise\n", + " raise self.last_attempt.result()\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/concurrent/futures/_base.py\", line 449, in result\n", + " return self.__get_result()\n", + " ^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n", + " raise self._exception\n", + " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 478, in __call__\n", + " result = fn(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^\n", + " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 406, in forward\n", + " raise ProviderAPIError(str(exc)) from exc\n", + "ember.core.exceptions.ProviderAPIError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n" + ] + }, + { + "ename": "ProviderAPIError", + "evalue": "Error invoking model openai:text-embedding-3-large", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mPermissionDeniedError\u001b[39m Traceback (most recent call last)", + "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py:389\u001b[39m, in \u001b[36mOpenAIModel.forward\u001b[39m\u001b[34m(self, request)\u001b[39m\n\u001b[32m 388\u001b[39m timeout = openai_kwargs.pop(\u001b[33m\"\u001b[39m\u001b[33mtimeout\u001b[39m\u001b[33m\"\u001b[39m, \u001b[32m30\u001b[39m)\n\u001b[32m--> \u001b[39m\u001b[32m389\u001b[39m response: Any = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mclient\u001b[49m\u001b[43m.\u001b[49m\u001b[43mchat\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcompletions\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcreate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 390\u001b[39m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mmodel_info\u001b[49m\u001b[43m.\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 391\u001b[39m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 392\u001b[39m \u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mopenai_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 393\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 394\u001b[39m content: \u001b[38;5;28mstr\u001b[39m = response.choices[\u001b[32m0\u001b[39m].message.content.strip()\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py:275\u001b[39m, in \u001b[36mrequired_args..inner..wrapper\u001b[39m\u001b[34m(*args, **kwargs)\u001b[39m\n\u001b[32m 274\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(msg)\n\u001b[32m--> \u001b[39m\u001b[32m275\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py:829\u001b[39m, in \u001b[36mCompletions.create\u001b[39m\u001b[34m(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, response_format, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)\u001b[39m\n\u001b[32m 828\u001b[39m validate_response_format(response_format)\n\u001b[32m--> \u001b[39m\u001b[32m829\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_post\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 830\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43m/chat/completions\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[32m 831\u001b[39m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmaybe_transform\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 832\u001b[39m \u001b[43m \u001b[49m\u001b[43m{\u001b[49m\n\u001b[32m 833\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmessages\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 834\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmodel\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 835\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43maudio\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43maudio\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 836\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mfrequency_penalty\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrequency_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 837\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mfunction_call\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfunction_call\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 838\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mfunctions\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfunctions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 839\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mlogit_bias\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mlogit_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 840\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mlogprobs\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mlogprobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 841\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmax_completion_tokens\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_completion_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 842\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmax_tokens\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 843\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmetadata\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 844\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmodalities\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodalities\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 845\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mn\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 846\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mparallel_tool_calls\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mparallel_tool_calls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 847\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mprediction\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mprediction\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 848\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mpresence_penalty\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mpresence_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 849\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mresponse_format\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mresponse_format\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 850\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mseed\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mseed\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 851\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mservice_tier\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mservice_tier\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 852\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstop\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 853\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstore\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstore\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 854\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstream\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 855\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstream_options\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 856\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtemperature\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtemperature\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 857\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtool_choice\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtool_choice\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 858\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtools\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 859\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtop_logprobs\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtop_logprobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 860\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtop_p\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtop_p\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 861\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43muser\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43muser\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 862\u001b[39m \u001b[43m \u001b[49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 863\u001b[39m \u001b[43m \u001b[49m\u001b[43mcompletion_create_params\u001b[49m\u001b[43m.\u001b[49m\u001b[43mCompletionCreateParams\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 864\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 865\u001b[39m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmake_request_options\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 866\u001b[39m \u001b[43m \u001b[49m\u001b[43mextra_headers\u001b[49m\u001b[43m=\u001b[49m\u001b[43mextra_headers\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mextra_query\u001b[49m\u001b[43m=\u001b[49m\u001b[43mextra_query\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mextra_body\u001b[49m\u001b[43m=\u001b[49m\u001b[43mextra_body\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout\u001b[49m\n\u001b[32m 867\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 868\u001b[39m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m=\u001b[49m\u001b[43mChatCompletion\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 869\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 870\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m=\u001b[49m\u001b[43mStream\u001b[49m\u001b[43m[\u001b[49m\u001b[43mChatCompletionChunk\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 871\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py:1280\u001b[39m, in \u001b[36mSyncAPIClient.post\u001b[39m\u001b[34m(self, path, cast_to, body, options, files, stream, stream_cls)\u001b[39m\n\u001b[32m 1277\u001b[39m opts = FinalRequestOptions.construct(\n\u001b[32m 1278\u001b[39m method=\u001b[33m\"\u001b[39m\u001b[33mpost\u001b[39m\u001b[33m\"\u001b[39m, url=path, json_data=body, files=to_httpx_files(files), **options\n\u001b[32m 1279\u001b[39m )\n\u001b[32m-> \u001b[39m\u001b[32m1280\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m cast(ResponseT, \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mopts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m)\u001b[49m)\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py:957\u001b[39m, in \u001b[36mSyncAPIClient.request\u001b[39m\u001b[34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[39m\n\u001b[32m 955\u001b[39m retries_taken = \u001b[32m0\u001b[39m\n\u001b[32m--> \u001b[39m\u001b[32m957\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 958\u001b[39m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 959\u001b[39m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[43m=\u001b[49m\u001b[43moptions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 960\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 961\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 962\u001b[39m \u001b[43m \u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[43m=\u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 963\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py:1061\u001b[39m, in \u001b[36mSyncAPIClient._request\u001b[39m\u001b[34m(self, cast_to, options, retries_taken, stream, stream_cls)\u001b[39m\n\u001b[32m 1060\u001b[39m log.debug(\u001b[33m\"\u001b[39m\u001b[33mRe-raising status error\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m-> \u001b[39m\u001b[32m1061\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m._make_status_error_from_response(err.response) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 1063\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._process_response(\n\u001b[32m 1064\u001b[39m cast_to=cast_to,\n\u001b[32m 1065\u001b[39m options=options,\n\u001b[32m (...)\u001b[39m\u001b[32m 1069\u001b[39m retries_taken=retries_taken,\n\u001b[32m 1070\u001b[39m )\n", + "\u001b[31mPermissionDeniedError\u001b[39m: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}", + "\nThe above exception was the direct cause of the following exception:\n", + "\u001b[31mProviderAPIError\u001b[39m Traceback (most recent call last)", + "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/base/services/model_service.py:106\u001b[39m, in \u001b[36mModelService._invoke\u001b[39m\u001b[34m(self, model_id, prompt, **kwargs)\u001b[39m\n\u001b[32m 105\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m106\u001b[39m response = \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m=\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 107\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exc:\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/providers/base_provider.py:182\u001b[39m, in \u001b[36mBaseProviderModel.__call__\u001b[39m\u001b[34m(self, prompt, **kwargs)\u001b[39m\n\u001b[32m 181\u001b[39m chat_request: ChatRequest = ChatRequest(prompt=prompt, **kwargs)\n\u001b[32m--> \u001b[39m\u001b[32m182\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m=\u001b[49m\u001b[43mchat_request\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:336\u001b[39m, in \u001b[36mBaseRetrying.wraps..wrapped_f\u001b[39m\u001b[34m(*args, **kw)\u001b[39m\n\u001b[32m 335\u001b[39m wrapped_f.statistics = copy.statistics \u001b[38;5;66;03m# type: ignore[attr-defined]\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m336\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcopy\u001b[49m\u001b[43m(\u001b[49m\u001b[43mf\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkw\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:475\u001b[39m, in \u001b[36mRetrying.__call__\u001b[39m\u001b[34m(self, fn, *args, **kwargs)\u001b[39m\n\u001b[32m 474\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m475\u001b[39m do = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43miter\u001b[49m\u001b[43m(\u001b[49m\u001b[43mretry_state\u001b[49m\u001b[43m=\u001b[49m\u001b[43mretry_state\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 476\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(do, DoAttempt):\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:376\u001b[39m, in \u001b[36mBaseRetrying.iter\u001b[39m\u001b[34m(self, retry_state)\u001b[39m\n\u001b[32m 375\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m action \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m.iter_state.actions:\n\u001b[32m--> \u001b[39m\u001b[32m376\u001b[39m result = \u001b[43maction\u001b[49m\u001b[43m(\u001b[49m\u001b[43mretry_state\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 377\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m result\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:418\u001b[39m, in \u001b[36mBaseRetrying._post_stop_check_actions..exc_check\u001b[39m\u001b[34m(rs)\u001b[39m\n\u001b[32m 417\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.reraise:\n\u001b[32m--> \u001b[39m\u001b[32m418\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[43mretry_exc\u001b[49m\u001b[43m.\u001b[49m\u001b[43mreraise\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 419\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m retry_exc \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mfut\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mexception\u001b[39;00m()\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:185\u001b[39m, in \u001b[36mRetryError.reraise\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 184\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.last_attempt.failed:\n\u001b[32m--> \u001b[39m\u001b[32m185\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mlast_attempt\u001b[49m\u001b[43m.\u001b[49m\u001b[43mresult\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 186\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/concurrent/futures/_base.py:449\u001b[39m, in \u001b[36mFuture.result\u001b[39m\u001b[34m(self, timeout)\u001b[39m\n\u001b[32m 448\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m._state == FINISHED:\n\u001b[32m--> \u001b[39m\u001b[32m449\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m__get_result\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 451\u001b[39m \u001b[38;5;28mself\u001b[39m._condition.wait(timeout)\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/concurrent/futures/_base.py:401\u001b[39m, in \u001b[36mFuture.__get_result\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 400\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m401\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m._exception\n\u001b[32m 402\u001b[39m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[32m 403\u001b[39m \u001b[38;5;66;03m# Break a reference cycle with the exception in self._exception\u001b[39;00m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:478\u001b[39m, in \u001b[36mRetrying.__call__\u001b[39m\u001b[34m(self, fn, *args, **kwargs)\u001b[39m\n\u001b[32m 477\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m478\u001b[39m result = \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 479\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m: \u001b[38;5;66;03m# noqa: B902\u001b[39;00m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py:406\u001b[39m, in \u001b[36mOpenAIModel.forward\u001b[39m\u001b[34m(self, request)\u001b[39m\n\u001b[32m 405\u001b[39m logger.exception(\u001b[33m\"\u001b[39m\u001b[33mUnexpected error in OpenAIModel.forward()\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m--> \u001b[39m\u001b[32m406\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m ProviderAPIError(\u001b[38;5;28mstr\u001b[39m(exc)) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mexc\u001b[39;00m\n", + "\u001b[31mProviderAPIError\u001b[39m: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}", + "\nThe above exception was the direct cause of the following exception:\n", + "\u001b[31mProviderAPIError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[51]\u001b[39m\u001b[32m, line 7\u001b[39m\n\u001b[32m 4\u001b[39m text_a: \u001b[38;5;28mstr\u001b[39m = \u001b[33m\"\u001b[39m\u001b[33mHello world!\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 5\u001b[39m text_b: \u001b[38;5;28mstr\u001b[39m = \u001b[33m\"\u001b[39m\u001b[33mHello, world??\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m7\u001b[39m score: \u001b[38;5;28mfloat\u001b[39m = \u001b[43mcalculate_text_similarity\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 8\u001b[39m \u001b[43m \u001b[49m\u001b[43mtext1\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtext_a\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtext2\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtext_b\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m=\u001b[49m\u001b[43mopenai_embedding_model\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmetric\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcosine\u001b[49m\n\u001b[32m 9\u001b[39m \u001b[43m)\u001b[49m\n\u001b[32m 10\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mSimilarity between \u001b[39m\u001b[33m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_a\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m'\u001b[39m\u001b[33m and \u001b[39m\u001b[33m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_b\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mscore\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[50]\u001b[39m\u001b[32m, line 171\u001b[39m, in \u001b[36mcalculate_text_similarity\u001b[39m\u001b[34m(text1, text2, model, metric)\u001b[39m\n\u001b[32m 154\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mcalculate_text_similarity\u001b[39m(\n\u001b[32m 155\u001b[39m text1: \u001b[38;5;28mstr\u001b[39m, text2: \u001b[38;5;28mstr\u001b[39m, model: EmbeddingModel, metric: SimilarityMetric\n\u001b[32m 156\u001b[39m ) -> \u001b[38;5;28mfloat\u001b[39m:\n\u001b[32m 157\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"Calculates text similarity using an embedding model and a similarity metric.\u001b[39;00m\n\u001b[32m 158\u001b[39m \n\u001b[32m 159\u001b[39m \u001b[33;03m This function generates embeddings for the provided texts and then computes a\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 169\u001b[39m \u001b[33;03m float: The computed similarity score.\u001b[39;00m\n\u001b[32m 170\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m171\u001b[39m embedding1: List[\u001b[38;5;28mfloat\u001b[39m] = \u001b[43mmodel\u001b[49m\u001b[43m.\u001b[49m\u001b[43membed_text\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtext\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtext1\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 172\u001b[39m embedding2: List[\u001b[38;5;28mfloat\u001b[39m] = model.embed_text(text=text2)\n\u001b[32m 173\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m metric.similarity(vec_a=embedding1, vec_b=embedding2)\n", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[50]\u001b[39m\u001b[32m, line 58\u001b[39m, in \u001b[36mText_Embedding_3_EmbeddingModel.embed_text\u001b[39m\u001b[34m(self, text)\u001b[39m\n\u001b[32m 49\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34membed_text\u001b[39m(\u001b[38;5;28mself\u001b[39m, text: \u001b[38;5;28mstr\u001b[39m) -> List[\u001b[38;5;28mfloat\u001b[39m]:\n\u001b[32m 50\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"Computes the embedding vector for the provided text.\u001b[39;00m\n\u001b[32m 51\u001b[39m \n\u001b[32m 52\u001b[39m \u001b[33;03m Args:\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 56\u001b[39m \u001b[33;03m List[float]: A list of floats representing the embedding vector.\u001b[39;00m\n\u001b[32m 57\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m58\u001b[39m response = \u001b[43mllm\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_id\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mopenai:text-embedding-3-large\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtext\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 60\u001b[39m \u001b[38;5;66;03m# response = openai.Embedding.create(\u001b[39;00m\n\u001b[32m 61\u001b[39m \u001b[38;5;66;03m# model=\"text-embedding-3\",\u001b[39;00m\n\u001b[32m 62\u001b[39m \u001b[38;5;66;03m# input=text\u001b[39;00m\n\u001b[32m 63\u001b[39m \u001b[38;5;66;03m# )\u001b[39;00m\n\u001b[32m 64\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m response.data\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/base/services/model_service.py:100\u001b[39m, in \u001b[36mModelService.invoke_model\u001b[39m\u001b[34m(self, model_id, prompt, **kwargs)\u001b[39m\n\u001b[32m 98\u001b[39m response = \u001b[38;5;28mself\u001b[39m._invoke(model_id, prompt, **kwargs)\n\u001b[32m 99\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m100\u001b[39m response = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_invoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 101\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m response\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/base/services/model_service.py:109\u001b[39m, in \u001b[36mModelService._invoke\u001b[39m\u001b[34m(self, model_id, prompt, **kwargs)\u001b[39m\n\u001b[32m 107\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exc:\n\u001b[32m 108\u001b[39m \u001b[38;5;28mself\u001b[39m._logger.exception(\u001b[33m\"\u001b[39m\u001b[33mError invoking model \u001b[39m\u001b[33m'\u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m'\u001b[39m\u001b[33m.\u001b[39m\u001b[33m\"\u001b[39m, model_id)\n\u001b[32m--> \u001b[39m\u001b[32m109\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m ProviderAPIError(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mError invoking model \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mexc\u001b[39;00m\n\u001b[32m 111\u001b[39m metric_counter = \u001b[38;5;28mself\u001b[39m._metrics.get(\u001b[33m\"\u001b[39m\u001b[33mmodel_invocations\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 112\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m metric_counter \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n", + "\u001b[31mProviderAPIError\u001b[39m: Error invoking model openai:text-embedding-3-large" +>>>>>>> feb7b31 (added embedding model) + ] + } + ], + "source": [ + "openai_embedding_model = Text_Embedding_3_EmbeddingModel()\n", + "cosine: CosineSimilarity = CosineSimilarity()\n", + "\n", + "text_a: str = \"Hello world!\"\n", + "text_b: str = \"Hello, world??\"\n", + "\n", + "score: float = calculate_text_similarity(\n", + " text1=text_a, text2=text_b, model=openai_embedding_model, metric=cosine\n", + ")\n", + "print(f\"Similarity between '{text_a}' and '{text_b}': {score}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "---\n", + "\n", + "## Compression Ratio (WIP)\n", + "\n", + "from `src/ember/core/utils/eval/evaluators.py`" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n", + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q diversity==0.2.0\n", + "%pip install -q spacy==3.8.4" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "import re\n", + "import subprocess\n", + "from typing import Any, Dict, TypeVar, Optional, List, Generic, Callable, Union\n", + "\n", + "from ember.core.utils.eval.base_evaluator import IEvaluator, EvaluationResult\n", + "from ember.core.utils.eval.extractors import RegexExtractor\n", + "\n", + "from diversity import compression_ratio\n", + "\n", + "T_out = TypeVar(\"T_out\")\n", + "T_truth = TypeVar(\"T_truth\")\n", + "\n", + "\n", + "class ComposedEvaluator(IEvaluator[T_out, T_truth], Generic[T_out, T_truth]):\n", + " \"\"\"Combines an output extractor with an evaluator for the extracted data.\n", + "\n", + " This evaluator first transforms the system output using the provided extractor,\n", + " then evaluates the extracted value using the specified base evaluator.\n", + "\n", + " Args:\n", + " extractor: An object with an `extract` method to process the system output.\n", + " base_evaluator (IEvaluator): An evaluator that processes the extracted output.\n", + "\n", + " Returns:\n", + " EvaluationResult: The result of the evaluation.\n", + " \"\"\"\n", + "\n", + " def __init__(\n", + " self,\n", + " extractor: Any, # Expecting an extractor with an `extract` method.\n", + " base_evaluator: IEvaluator[Any, Any],\n", + " ) -> None:\n", + " self.extractor = extractor\n", + " self.base_evaluator = base_evaluator\n", + "\n", + " def evaluate(\n", + " self, system_output: T_out, correct_answer: Any, **kwargs: Any\n", + " ) -> EvaluationResult:\n", + " \"\"\"Evaluates the provided system output against the correct answer.\n", + "\n", + " Args:\n", + " system_output (T_out): The raw output generated by the system.\n", + " correct_answer (Any): The expected correct answer.\n", + " **kwargs: Additional keyword arguments for extraction or evaluation.\n", + "\n", + " Returns:\n", + " EvaluationResult: The result of evaluating the extracted value.\n", + " \"\"\"\n", + " extracted_value = self.extractor.extract(system_output, **kwargs)\n", + " return self.base_evaluator.evaluate(extracted_value, correct_answer, **kwargs)\n", + "\n", + "\n", + "# Basic Evaluators\n", + "\n", + "\n", + "class ExactMatchEaluator(IEvaluator[str, str]):\n", + " \"\"\"Evaluator to check for an exact match between two strings,\n", + " ignoring differences in whitespace and case.\n", + "\n", + " Example:\n", + " evaluator = ExactMatchEvaluator()\n", + " result = evaluator.evaluate(\"Hello World\", \"hello world\")\n", + "\n", + " Args:\n", + " compare_fn (Optional[Callable[[str, str], bool]]): Optional custom comparison function.\n", + " If not provided, strings are normalized (whitespace removed, lowercase) before comparison.\n", + "\n", + " Returns:\n", + " EvaluationResult: The result containing a correctness flag and a score.\n", + " \"\"\"\n", + "\n", + " def __init__(self, compare_fn: Optional[Callable[[str, str], bool]] = None) -> None:\n", + " self.compare_fn = compare_fn or self._default_compare\n", + "\n", + " def _default_compare(self, str1: str, str2: str) -> bool:\n", + " \"\"\"Default string comparison function that ignores case and whitespace.\n", + "\n", + " Args:\n", + " str1 (str): First string to compare\n", + " str2 (str): Second string to compare\n", + "\n", + " Returns:\n", + " bool: True if strings match after normalization\n", + " \"\"\"\n", + " return str1.strip().lower() == str2.strip().lower()\n", + "\n", + " def evaluate(\n", + " self, system_output: str, correct_answer: str, **kwargs: Any\n", + " ) -> EvaluationResult:\n", + " \"\"\"Evaluates whether a system output exactly matches the correct answer.\n", + "\n", + " Args:\n", + " system_output (str): The system-generated string.\n", + " correct_answer (str): The expected answer string.\n", + " **kwargs: Additional keyword arguments (unused).\n", + "\n", + " Returns:\n", + " EvaluationResult: An object with `is_correct` set to True if the normalized strings match,\n", + " along with a corresponding score.\n", + " \"\"\"\n", + " is_correct = self.compare_fn(system_output, correct_answer)\n", + " score = 1.0 if is_correct else 0.0\n", + " return EvaluationResult(is_correct=is_correct, score=score)\n", + "\n", + "class DiversityScoringEvaluator(IEvaluator[List[str], None]):\n", + " \"\"\"\n", + " Evaluator to test ensemble outputs -> score them (float)\n", + " \"\"\"\n", + " def evaluate(\n", + " self, \n", + " system_output: List[str], \n", + " **kwargs) -> EvaluationResult:\n", + " if system_output is None or len(system_output) == 0:\n", + " return EvaluationResult(is_correct=False, score=-1)\n", + "\n", + " # current compression ratio formula\n", + " # TODO: update scoring function to make it better\n", + " # -> like use token count\n", + "\n", + " # example I was thinking about:\n", + " letter_sum = sum(len(response) for response in system_output)\n", + " ratio = compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100)\n", + " # ratio = compression_ratio(system_output, algorithm='gzip',verbose=True)\n", + " return EvaluationResult(is_correct=True,score=ratio,metadata = {'responses': system_output})\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Edit Distance (WIP)\n", + "- need to merge" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "collapsed": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q python-Levenshtein" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 1, +======= + "execution_count": 21, +>>>>>>> feb7b31 (added embedding model) + "metadata": {}, + "outputs": [], + "source": [ + "import Levenshtein\n", + "from typing import List\n", + "from dataclasses import dataclass\n", + "\n", + "@dataclass\n", + "class EvaluationResult:\n", + " is_correct: bool\n", + " score: float\n", + " metadata: dict\n", + "\n", + "class EditDistanceScoringEvaluator:\n", + "\n", + " def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult:\n", + " if system_output is None or len(system_output) == 0:\n", + " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", + "\n", + " diversity_score = self.compute_distance(system_output)\n", + "\n", + " return EvaluationResult(\n", + " is_correct=True, \n", + " score=diversity_score,\n", + " metadata={'responses': system_output}\n", + " )\n", + "\n", + " def compute_distance(self, outputs: List[str]) -> float:\n", + " n = len(outputs)\n", + " if n < 2:\n", + " return 0.0\n", + "\n", + " total_distance = 0\n", + " pairs = 0\n", + "\n", + " for i in range(n):\n", + " for j in range(i + 1, n):\n", + " dist = Levenshtein.distance(outputs[i], outputs[j])\n", + " max_len = max(len(outputs[i]), len(outputs[j]))\n", + " normalized_dist = dist / max_len if max_len > 0 else 0 \n", + " total_distance += normalized_dist\n", + " pairs += 1\n", + " \n", + " return total_distance / pairs if pairs > 0 else 0.0\n" + ] + }, + { + "cell_type": "code", +<<<<<<< HEAD + "execution_count": 2, +======= + "execution_count": 22, +>>>>>>> feb7b31 (added embedding model) + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Diversity Score: 0.8301\n", + "Is Correct: True\n", + "Metadata: {'responses': ['hi there', 'hi', 'hello', 'yo whatup']}\n" + ] + } + ], + "source": [ + "distance_evaluator = EditDistanceScoringEvaluator()\n", + "\n", + "# input_strs = [\n", + "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", + "# \"fej89qw098efjq29f38j0938j20f398jqwe098fjq98wf -- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", + "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", + "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", + "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", + "# ]\n", + "\n", + "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", + "\n", + "# input_strs = [\"This is a sample text with lots of repetition.\", \n", + "# \"This is a sample text with lots of repetition.\",\n", + "# \"This is a sample text with lots of repetition.\"]\n", + "\n", + "edit_distance = distance_evaluator.evaluate(input_strs)\n", + "\n", + "print(f\"Diversity Score: {edit_distance.score:.4f}\")\n", + "print(f\"Is Correct: {edit_distance.is_correct}\")\n", + "print(f\"Metadata: {edit_distance.metadata}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Novelty Score\n", + "- need to merge" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List\n", + "from dataclasses import dataclass\n", + "import numpy as np\n", + "\n", + "@dataclass\n", + "class EvaluationResult:\n", + " is_correct: bool\n", + " score: float\n", + " metadata: dict\n", + "\n", + "class NoveltyScoringEvaluator:\n", + " \n", + " def evaluate(self, model: EmbeddingModel, system_output: List[str], **kwargs) -> EvaluationResult:\n", + " if not system_output or len(system_output) == 0:\n", + " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", + "\n", + " novelty_scores = [self.compute_novelty(r, system_output[:i]) for i, r in enumerate(system_output)]\n", + "\n", + " avg_novelty = sum(novelty_scores) / len(novelty_scores) if novelty_scores else 0.0\n", + "\n", + " return EvaluationResult(\n", + " is_correct=True,\n", + " score=avg_novelty,\n", + " metadata={'responses': system_output, 'novelty_scores': novelty_scores}\n", + " )\n", + "\n", + " def compute_novelty(self, response: str, prior_responses: List[str]) -> float:\n", + " if not prior_responses:\n", + " return 1.0\n", + "\n", + " new_embedding = self.model.embed_text(response)\n", + " prior_embeddings = [self.model.embed_text(r) for r in prior_responses]\n", + "\n", + " similarities = [\n", + " np.dot(new_embedding, prior_embedding) /\n", + " (np.linalg.norm(new_embedding) * np.linalg.norm(prior_embedding))\n", + " for prior_embedding in prior_embeddings\n", + " ]\n", + "\n", + " return 1 - max(similarities)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "EvaluationResult(is_correct=True, score=0.08368770360509659, metadata={'responses': ['Hello world!', 'Hi there!', 'Goodbye!']})\n" + ] + } + ], + "source": [ + "novelty_evaluator = NoveltyScoringEvaluator()\n", + "\n", + "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", + "\n", + "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", + "novelty = novelty_evaluator.evaluate(mock_model, input_strs)\n", + "\n", + "print(f\"Diversity Score: {novelty.score:.4f}\")\n", + "print(f\"Is Correct: {novelty.is_correct}\")\n", + "print(f\"Metadata: {novelty.metadata}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "---\n", + "\n", + "## Putting it all together" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", + "cosine: CosineSimilarity = CosineSimilarity()\n", + "diversity_evaluator = DiversityScoringEvaluator()\n", + "edit_dist_evaluator = EditDistanceScoringEvaluator()\n", + "\n", + "def ensemble_diversity(strings):\n", + " compression = diversity_evaluator.evaluate(strings)\n", + " print(\"DiversityScoringEvaluator result:\", compression)\n", + " scores = list()\n", + " for ind1 in range(len(strings)):\n", + " ind2 = ind1+1 if ind1+1 != len(strings) else 0\n", + " curr_score = calculate_text_similarity(text1=strings[ind1], text2=strings[ind2], model=mock_model, metric=cosine)\n", + " print(f\"SimilarityScore between ind1={ind1} and ind2={ind2}: {curr_score}\")\n", + " scores.append(curr_score)\n", + " avg_score = np.average(scores)\n", + " print(f\"Avg cosine similarity: {avg_score}\")\n", + " print(f\"diversity cosine-sim inverse: {1-avg_score}\")\n", + " edit_distance = edit_dist_evaluator.evaluate(strings)\n", + " print(f\"edit-dist score: {edit_distance.score:.4f}\")\n", + " print(\"-------------------------------\")\n", + " print(f\"possible diversity score: {((1-avg_score) + compression.score + edit_distance.score) / 3.}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=0.063936, metadata={'responses': ['hi there', 'hi', 'hello', 'yo whatup']})\n", + "SimilarityScore between ind1=0 and ind2=1: 0.5207675658482732\n", + "SimilarityScore between ind1=1 and ind2=2: 0.6088947130341378\n", + "SimilarityScore between ind1=2 and ind2=3: 0.67913155770349\n", + "SimilarityScore between ind1=3 and ind2=0: 0.9344774636399475\n", + "Avg cosine similarity: 0.6858178250564622\n", + "diversity cosine-sim inverse: 0.31418217494353784\n", + "edit-dist score: 0.8301\n", + "-------------------------------\n", + "possible diversity score: 0.40273692251204346\n" + ] + } + ], + "source": [ + "# input_strs = [\n", + "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", + "# \"fej89qw098efjq29f38j0938j20f398jqwe098fjq98wf -- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", + "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", + "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", + "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", + "# ]\n", + "\n", + "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", + "\n", + "# input_strs = [\"This is a sample text with lots of repetition.\", \n", + "# \"This is a sample text with lots of repetition.\",\n", + "# \"This is a sample text with lots of repetition.\"]\n", + "\n", + "ensemble_diversity(input_strs)" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:27,328 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:27,340 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:27,343 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:27,348 [DEBUG] httpcore.connection: close.started\n", + "2025-03-21 00:21:27,350 [DEBUG] httpcore.connection: close.complete\n", + "2025-03-21 00:21:27,351 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", + "2025-03-21 00:21:27,391 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 00:21:27,393 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", + "2025-03-21 00:21:27,405 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 00:21:27,407 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:27,410 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:27,412 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:27,415 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:27,417 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:21:27,854 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:28 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'375'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999988'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_a7db3170b0524e33464f32cef401550f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb14c9ea65c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:27,856 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:27,858 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:27,862 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:27,864 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:27,866 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:27,868 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:28 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '375', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999988', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_a7db3170b0524e33464f32cef401550f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb14c9ea65c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:27,869 [DEBUG] openai._base_client: request_id: req_a7db3170b0524e33464f32cef401550f\n", + "2025-03-21 00:21:27,871 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:27,875 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:27,877 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:27,878 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:27,882 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:27,885 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:27,887 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:27,888 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 0: [Why don't skeletons fight each other? They don't have the guts!]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:28,285 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:28 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'351'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_1e219a2e3ce8c441aab4f3e57c0a654f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb14f88c65c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:28,286 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:28,287 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:28,289 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:28,290 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:28,291 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:28,293 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:28 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '351', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_1e219a2e3ce8c441aab4f3e57c0a654f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb14f88c65c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:28,293 [DEBUG] openai._base_client: request_id: req_1e219a2e3ce8c441aab4f3e57c0a654f\n", + "2025-03-21 00:21:28,296 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:28,306 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:28,310 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:28,314 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:28,316 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:28,323 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:28,328 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:28,331 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 1: [Why don't skeletons fight each other? They don't have the guts.]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:28,832 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:29 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'449'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_62cfe73bcae00e48f66067daa60b9a0f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'CF-Cache-Status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb1525b025c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:28,835 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:28,839 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:28,844 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:28,845 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:28,847 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:28,848 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:29 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '449', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_62cfe73bcae00e48f66067daa60b9a0f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb1525b025c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:28,850 [DEBUG] openai._base_client: request_id: req_62cfe73bcae00e48f66067daa60b9a0f\n", + "2025-03-21 00:21:28,854 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:28,861 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:28,863 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:28,865 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:28,866 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:28,867 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:28,868 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:28,869 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 2: [Why did the scarecrow win an award? Because he was outstanding in his field!]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:29,612 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:29 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'654'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_c81335d39ba9bd3c52cdd33e341a3f5f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb155bdd35c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:29,615 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:29,616 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:29,620 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:29,621 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:29,623 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:29,624 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:29 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '654', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_c81335d39ba9bd3c52cdd33e341a3f5f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb155bdd35c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:29,625 [DEBUG] openai._base_client: request_id: req_c81335d39ba9bd3c52cdd33e341a3f5f\n", + "2025-03-21 00:21:29,628 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:29,632 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:29,633 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:29,635 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:29,638 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:29,639 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:29,641 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:29,642 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 3: [Why don’t scientists trust atoms? Because they make up everything!]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:30,073 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:30 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'384'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_09b2adf96a48a99fc2ff344e924d2288'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb15a89135c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:30,075 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:30,076 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:30,079 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:30,081 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:30,082 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:30,084 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:30 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '384', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_09b2adf96a48a99fc2ff344e924d2288', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb15a89135c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:30,085 [DEBUG] openai._base_client: request_id: req_09b2adf96a48a99fc2ff344e924d2288\n", + "2025-03-21 00:21:30,086 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:30,091 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:30,093 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:30,095 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:30,098 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:30,098 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:30,100 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:30,101 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 4: [Why don't skeletons fight each other? They don't have the guts!]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:30,742 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:31 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'601'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_b762852f0cc93993624ba07c2e67ba9f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb15d6b105c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:30,745 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:30,749 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:30,755 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:30,758 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:30,761 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:30,763 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:31 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '601', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_b762852f0cc93993624ba07c2e67ba9f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb15d6b105c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:30,767 [DEBUG] openai._base_client: request_id: req_b762852f0cc93993624ba07c2e67ba9f\n", + "2025-03-21 00:21:30,773 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:30,785 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:30,788 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:30,795 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:30,797 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:30,801 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:30,807 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:30,813 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 5: [Why don't skeletons fight each other? \n", + "They don't have the guts.]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:31,353 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:31 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'471'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_ba026095394aadcd39b29c1bb1ce6973'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb161cdd35c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:31,357 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:31,360 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:31,363 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:31,364 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:31,365 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:31,366 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:31 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '471', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_ba026095394aadcd39b29c1bb1ce6973', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb161cdd35c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:31,368 [DEBUG] openai._base_client: request_id: req_ba026095394aadcd39b29c1bb1ce6973\n", + "2025-03-21 00:21:31,372 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:31,381 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:31,385 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:31,389 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:31,398 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:31,401 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:31,408 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:31,410 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 6: [Why don't scientists trust atoms? Because they make up everything!]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:31,944 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:32 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'444'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_d8bd19c90a2b876b62adaad8f0a58b70'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb16588705c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:31,945 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:31,947 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:31,951 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:31,952 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:31,953 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:31,955 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:32 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '444', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_d8bd19c90a2b876b62adaad8f0a58b70', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb16588705c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:31,956 [DEBUG] openai._base_client: request_id: req_d8bd19c90a2b876b62adaad8f0a58b70\n", + "2025-03-21 00:21:31,958 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:31,965 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:31,970 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:31,973 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:31,976 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:31,978 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:31,981 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:31,986 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 7: [Why don't scientists trust atoms?\n", + "\n", + "Because they make up everything!]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:32,559 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:32 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'519'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_1aea1c3c615b3eb8ed8e75303a68f56a'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb1692b0b5c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:32,561 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:32,564 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:32,568 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:32,570 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:32,573 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:32,573 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:32 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '519', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_1aea1c3c615b3eb8ed8e75303a68f56a', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb1692b0b5c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:32,575 [DEBUG] openai._base_client: request_id: req_1aea1c3c615b3eb8ed8e75303a68f56a\n", + "2025-03-21 00:21:32,580 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:21:32,595 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:21:32,598 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:21:32,601 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:21:32,604 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:21:32,604 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:21:32,607 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:21:32,608 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 8: [Why did the scarecrow win an award? Because he was outstanding in his field!]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:21:33,085 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:33 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'424'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_62bf812cc022bb70e422a7dc80b83749'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb16d1dac5c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:21:33,086 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:21:33,088 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:21:33,091 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:21:33,092 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:21:33,093 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:21:33,094 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:33 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '424', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_62bf812cc022bb70e422a7dc80b83749', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb16d1dac5c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:21:33,095 [DEBUG] openai._base_client: request_id: req_62bf812cc022bb70e422a7dc80b83749\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 9: [Why don't scientists trust atoms?\n", + "\n", + "Because they make up everything!]\n", + "-----\n", + "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=2.919, metadata={'responses': [\"Why don't skeletons fight each other? They don't have the guts!\", \"Why don't skeletons fight each other? They don't have the guts.\", 'Why did the scarecrow win an award? Because he was outstanding in his field!', 'Why don’t scientists trust atoms? Because they make up everything!', \"Why don't skeletons fight each other? They don't have the guts!\", \"Why don't skeletons fight each other? \\nThey don't have the guts.\", \"Why don't scientists trust atoms? Because they make up everything!\", \"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", 'Why did the scarecrow win an award? Because he was outstanding in his field!', \"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\"]})\n", + "SimilarityScore between ind1=0 and ind2=1: 0.9998557731781514\n", + "SimilarityScore between ind1=1 and ind2=2: 0.8516952804862096\n", + "SimilarityScore between ind1=2 and ind2=3: 0.12224781375093245\n", + "SimilarityScore between ind1=3 and ind2=4: 0.13899372940048665\n", + "SimilarityScore between ind1=4 and ind2=5: 0.930451468399891\n", + "SimilarityScore between ind1=5 and ind2=6: 0.9066215700385928\n", + "SimilarityScore between ind1=6 and ind2=7: 0.9524292508952135\n", + "SimilarityScore between ind1=7 and ind2=8: 0.8506419386731088\n", + "SimilarityScore between ind1=8 and ind2=9: 0.8506419386731088\n", + "SimilarityScore between ind1=9 and ind2=0: 0.8843819811752456\n", + "Avg cosine similarity: 0.748796074467094\n", + "diversity cosine-sim inverse: 0.25120392553290605\n", + "edit-dist score: 0.4794\n", + "-------------------------------\n", + "possible diversity score: 1.2165215583179603\n" + ] + } + ], + "source": [ + "num_jokes = 10\n", + "responses = []\n", + "\n", + "for i in range(num_jokes):\n", + " res = llm(prompt=\"Tell me a funny joke. Keep it concise.\", model_id=\"openai:gpt-4o\").data\n", + " responses.append(res)\n", + " print(f\"Joke {i}: [{res}]\")\n", + "\n", + "print(\"-----\")\n", + "ensemble_diversity(responses)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 00:20:51,760 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 00:20:51,776 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"Tell me 10 jokes. make them split with '||'. Don't say anything else besides the joke. \"}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 00:20:51,780 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 00:20:51,786 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 00:20:51,790 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 00:20:51,792 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 00:20:51,798 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 00:20:51,800 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 00:20:53,925 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:20:54 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'2074'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999976'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_fc11eea25331eac4ebdbdc434053f357'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb06dfcffd001-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 00:20:53,928 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 00:20:53,930 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 00:20:53,934 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 00:20:53,935 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 00:20:53,936 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 00:20:53,938 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:20:54 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '2074', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999976', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_fc11eea25331eac4ebdbdc434053f357', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb06dfcffd001-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 00:20:53,940 [DEBUG] openai._base_client: request_id: req_fc11eea25331eac4ebdbdc434053f357\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Joke 0: [Why did the scarecrow win an award? Because he was outstanding in his field! ]\n", + "Joke 1: [ Parallel lines have so much in common. It’s a shame they’ll never meet. ]\n", + "Joke 2: [ Why don’t skeletons fight each other? They don’t have the guts. ]\n", + "Joke 3: [ What do you call fake spaghetti? An impasta! ]\n", + "Joke 4: [ I would tell you a construction joke, but I'm still working on it. ]\n", + "Joke 5: [ Why couldn't the bicycle stand up by itself? It was two tired. ]\n", + "Joke 6: [ Why did the tomato turn red? Because it saw the salad dressing! ]\n", + "Joke 7: [ What did the ocean say to the beach? Nothing, it just waved. ]\n", + "Joke 8: [ Why did the math book look sad? Because it had too many problems. ]\n", + "Joke 9: [ I told my computer I needed a break, and now it won't stop sending me kit-kat ads!]\n", + "-----\n", + "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.513, metadata={'responses': ['Why did the scarecrow win an award? Because he was outstanding in his field! ', ' Parallel lines have so much in common. It’s a shame they’ll never meet. ', ' Why don’t skeletons fight each other? They don’t have the guts. ', ' What do you call fake spaghetti? An impasta! ', \" I would tell you a construction joke, but I'm still working on it. \", \" Why couldn't the bicycle stand up by itself? It was two tired. \", ' Why did the tomato turn red? Because it saw the salad dressing! ', ' What did the ocean say to the beach? Nothing, it just waved. ', ' Why did the math book look sad? Because it had too many problems. ', \" I told my computer I needed a break, and now it won't stop sending me kit-kat ads!\"]})\n", + "SimilarityScore between ind1=0 and ind2=1: 0.23585869748408375\n", + "SimilarityScore between ind1=1 and ind2=2: 0.030690112807883127\n", + "SimilarityScore between ind1=2 and ind2=3: 0.08483849065288684\n", + "SimilarityScore between ind1=3 and ind2=4: 0.750109080659053\n", + "SimilarityScore between ind1=4 and ind2=5: 0.894508987836746\n", + "SimilarityScore between ind1=5 and ind2=6: 0.9046943831161538\n", + "SimilarityScore between ind1=6 and ind2=7: 0.8888453958820549\n", + "SimilarityScore between ind1=7 and ind2=8: 0.8324994273641826\n", + "SimilarityScore between ind1=8 and ind2=9: 0.7777342312519292\n", + "SimilarityScore between ind1=9 and ind2=0: 0.8589345591870938\n", + "Avg cosine similarity: 0.6258713366242067\n", + "diversity cosine-sim inverse: 0.3741286633757933\n", + "edit-dist score: 0.7251\n", + "-------------------------------\n", + "possible diversity score: 0.8707337636418844\n" + ] + } + ], + "source": [ + "prompts = 1\n", + "responses = []\n", + "\n", + "for i in range(prompts):\n", + " res = llm(prompt=\"Tell me 10 jokes. make them split with \\'||\\'. Don't say anything else besides the joke. \", model_id=\"openai:gpt-4o\").data.split('||')\n", + " responses += res\n", + "\n", + "if prompts == 1 and len(responses) > 1:\n", + " for i in range(len(responses)):\n", + " print(f\"Joke {i}: [{responses[i]}]\")\n", + "\n", + "print(\"-----\")\n", + "ensemble_diversity(responses)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "---\n", + "## Improvements TODO\n", + "- Merge all functions\n", + "- fix ensembling\n", + "## Potential other cases to explore\n", + "- work ensembling all \"diversity\" related metrics \n", + " - add more metrics\n", + " - tune added metrics\n", + "- combination of validation/hallucination metric + ensembled diversity metric -> score" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ember_upgrade", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 4d09538e9e65361aa666a329266c32cdb22b988e Mon Sep 17 00:00:00 2001 From: jason-lee08 Date: Fri, 21 Mar 2025 03:34:36 -0700 Subject: [PATCH 02/14] demo updates --- .../model/base/schemas/chat_schemas.py | 1 + .../model/providers/openai/openai_provider.py | 43 +- src/ember/examples/diversity_demo.ipynb | 838 +++++++ src/ember/examples/diversity_testbench.ipynb | 2084 ++++++----------- 4 files changed, 1543 insertions(+), 1423 deletions(-) create mode 100644 src/ember/examples/diversity_demo.ipynb diff --git a/src/ember/core/registry/model/base/schemas/chat_schemas.py b/src/ember/core/registry/model/base/schemas/chat_schemas.py index ea33f021..80cdfe80 100644 --- a/src/ember/core/registry/model/base/schemas/chat_schemas.py +++ b/src/ember/core/registry/model/base/schemas/chat_schemas.py @@ -115,5 +115,6 @@ class ChatResponse(BaseModel): """ data: str + embedding: list[float] = None raw_output: Any = None usage: Optional[UsageStats] = None diff --git a/src/ember/core/registry/model/providers/openai/openai_provider.py b/src/ember/core/registry/model/providers/openai/openai_provider.py index 28381e2b..cd430c7e 100644 --- a/src/ember/core/registry/model/providers/openai/openai_provider.py +++ b/src/ember/core/registry/model/providers/openai/openai_provider.py @@ -326,6 +326,9 @@ def _prune_unsupported_params( logger.debug("Removing 'temperature' parameter for model: %s", model_name) kwargs.pop("temperature") return kwargs + + def _is_embedding_model(self, model_name: str) -> bool: + return model_name.startswith("text-embedding-") @retry( wait=wait_exponential(min=1, max=10), stop=stop_after_attempt(3), reraise=True @@ -384,19 +387,33 @@ def forward(self, request: ChatRequest) -> ChatResponse: ) try: - # Use the timeout parameter from the request or the default from BaseChatParameters - timeout = openai_kwargs.pop("timeout", 30) - response: Any = self.client.chat.completions.create( - model=self.model_info.name, - timeout=timeout, - **openai_kwargs, - ) - content: str = response.choices[0].message.content.strip() - usage_stats = self.usage_calculator.calculate( - raw_output=response, - model_info=self.model_info, - ) - return ChatResponse(data=content, raw_output=response, usage=usage_stats) + if self._is_embedding_model(self.model_info.name): + response: Any = self.client.embeddings.create( + model=self.model_info.name, + input=request.prompt, + timeout=30, + ) + embedding = response.data[0].embedding + usage_stats = self.usage_calculator.calculate( + raw_output=response, + model_info=self.model_info, + ) + + return ChatResponse(data="", embedding=embedding, raw_output=response, usage=usage_stats) + else: + # Use the timeout parameter from the request or the default from BaseChatParameters + timeout = openai_kwargs.pop("timeout", 30) + response: Any = self.client.chat.completions.create( + model=self.model_info.name, + timeout=timeout, + **openai_kwargs, + ) + content: str = response.choices[0].message.content.strip() + usage_stats = self.usage_calculator.calculate( + raw_output=response, + model_info=self.model_info, + ) + return ChatResponse(data=content, raw_output=response, usage=usage_stats) except HTTPError as http_err: if 500 <= http_err.response.status_code < 600: logger.error("OpenAI server error: %s", http_err) diff --git a/src/ember/examples/diversity_demo.ipynb b/src/ember/examples/diversity_demo.ipynb new file mode 100644 index 00000000..277d95e7 --- /dev/null +++ b/src/ember/examples/diversity_demo.ipynb @@ -0,0 +1,838 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:ember.core.registry.model.providers.anthropic.anthropic_discovery:Error fetching Anthropic models via REST API: 401 Client Error: Unauthorized for url: https://api.anthropic.com/v1/models\n", + "/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", + "I0000 00:00:1742550067.907817 410594 check_gcp_environment.cc:61] BIOS data file does not exist or cannot be opened.\n" + ] + } + ], + "source": [ + "import os \n", + "import logging\n", + "\n", + "# Set global logging level to ERROR\n", + "logging.basicConfig(level=logging.ERROR)\n", + "\n", + "os.environ[\"EMBER_LOGGING_LEVEL\"] = \"ERROR\"\n", + "\n", + "# from ember.core.registry.model.model_module.lm import LMModule, LMModuleConfig\n", + "from ember.core.registry.model.config.settings import initialize_registry\n", + "from ember.core.registry.model.base.services.model_service import ModelService" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:ember.core.registry.model.providers.anthropic.anthropic_discovery:Error fetching Anthropic models via REST API: 401 Client Error: Unauthorized for url: https://api.anthropic.com/v1/models\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n" + ] + } + ], + "source": [ + "model_registry = initialize_registry()\n", + "print(model_registry.list_models())\n", + "llm = ModelService(registry=model_registry)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['openai:gpt-4o-mini-transcribe',\n", + " 'openai:gpt-4o-audio-preview-2024-12-17',\n", + " 'openai:dall-e-3',\n", + " 'openai:dall-e-2',\n", + " 'openai:gpt-4o-audio-preview-2024-10-01',\n", + " 'openai:gpt-4o-realtime-preview-2024-10-01',\n", + " 'openai:gpt-4o-audio-preview',\n", + " 'openai:text-embedding-3-large',\n", + " 'openai:gpt-4',\n", + " 'openai:gpt-4o-mini-2024-07-18',\n", + " 'openai:gpt-4o-2024-05-13',\n", + " 'openai:gpt-4o-realtime-preview',\n", + " 'openai:gpt-4o-mini',\n", + " 'openai:gpt-4o-mini-audio-preview',\n", + " 'openai:gpt-3.5-turbo-instruct-0914',\n", + " 'openai:gpt-4o-mini-search-preview',\n", + " 'openai:gpt-3.5-turbo-1106',\n", + " 'openai:gpt-4o-search-preview',\n", + " 'openai:gpt-4-turbo',\n", + " 'openai:gpt-4o-realtime-preview-2024-12-17',\n", + " 'openai:gpt-3.5-turbo-instruct',\n", + " 'openai:gpt-3.5-turbo',\n", + " 'openai:gpt-4-turbo-preview',\n", + " 'openai:gpt-4o-mini-search-preview-2025-03-11',\n", + " 'openai:gpt-4o-mini-realtime-preview',\n", + " 'openai:gpt-3.5-turbo-0125',\n", + " 'openai:gpt-4o-2024-08-06',\n", + " 'openai:gpt-4-turbo-2024-04-09',\n", + " 'openai:gpt-3.5-turbo-16k',\n", + " 'openai:gpt-4o',\n", + " 'openai:gpt-4o-mini-realtime-preview-2024-12-17',\n", + " 'openai:gpt-4-1106-preview',\n", + " 'openai:text-embedding-ada-002',\n", + " 'openai:gpt-4-0613',\n", + " 'openai:gpt-4.5-preview',\n", + " 'openai:gpt-4.5-preview-2025-02-27',\n", + " 'openai:gpt-4o-search-preview-2025-03-11',\n", + " 'openai:gpt-4o-2024-11-20',\n", + " 'openai:gpt-4o-mini-tts',\n", + " 'openai:gpt-4-0125-preview',\n", + " 'openai:gpt-4o-transcribe',\n", + " 'openai:text-embedding-3-small',\n", + " 'openai:gpt-4o-mini-audio-preview-2024-12-17',\n", + " 'anthropic:claude-3-sonnet',\n", + " 'anthropic:claude-3-opus',\n", + " 'anthropic:claude-3-haiku',\n", + " 'anthropic:claude-3.5-sonnet',\n", + " 'anthropic:claude-3.7-sonnet',\n", + " 'google:models/gemini-1.0-pro-vision-latest',\n", + " 'google:models/gemini-pro-vision',\n", + " 'google:models/gemini-1.5-pro-latest',\n", + " 'google:models/gemini-1.5-pro-001',\n", + " 'google:models/gemini-1.5-pro-002',\n", + " 'google:models/gemini-1.5-pro',\n", + " 'google:models/gemini-1.5-flash-latest',\n", + " 'google:models/gemini-1.5-flash-001',\n", + " 'google:models/gemini-1.5-flash-001-tuning',\n", + " 'google:models/gemini-1.5-flash',\n", + " 'google:models/gemini-1.5-flash-002',\n", + " 'google:models/gemini-1.5-flash-8b',\n", + " 'google:models/gemini-1.5-flash-8b-001',\n", + " 'google:models/gemini-1.5-flash-8b-latest',\n", + " 'google:models/gemini-1.5-flash-8b-exp-0827',\n", + " 'google:models/gemini-1.5-flash-8b-exp-0924',\n", + " 'google:models/gemini-2.0-flash-exp',\n", + " 'google:models/gemini-2.0-flash',\n", + " 'google:models/gemini-2.0-flash-001',\n", + " 'google:models/gemini-2.0-flash-exp-image-generation',\n", + " 'google:models/gemini-2.0-flash-lite-001',\n", + " 'google:models/gemini-2.0-flash-lite',\n", + " 'google:models/gemini-2.0-flash-lite-preview-02-05',\n", + " 'google:models/gemini-2.0-flash-lite-preview',\n", + " 'google:models/gemini-2.0-pro-exp',\n", + " 'google:models/gemini-2.0-pro-exp-02-05',\n", + " 'google:models/gemini-exp-1206',\n", + " 'google:models/gemini-2.0-flash-thinking-exp-01-21',\n", + " 'google:models/gemini-2.0-flash-thinking-exp',\n", + " 'google:models/gemini-2.0-flash-thinking-exp-1219',\n", + " 'google:models/learnlm-1.5-pro-experimental',\n", + " 'google:models/gemma-3-27b-it']" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model_registry.list_models()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q openai" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Diversity Demo" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from abc import ABC, abstractmethod\n", + "from typing import List, Protocol\n", + "import math\n", + "\n", + "\n", + "################################################################\n", + "# 1) Embedding Model Interfaces & Implementations\n", + "################################################################\n", + "\n", + "\n", + "class EmbeddingModel(Protocol):\n", + " \"\"\"Interface for embedding models.\n", + "\n", + " This protocol defines the minimal interface required to compute a text\n", + " embedding. Implementations may use local models, external APIs, or custom\n", + " neural networks.\n", + "\n", + " Methods:\n", + " embed_text: Compute the embedding for a given text.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Computes the embedding vector for the provided text.\n", + "\n", + " Args:\n", + " text (str): The text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding vector.\n", + " \"\"\"\n", + " ...\n", + "\n", + "class Text_Embedding_Ada_002_Model:\n", + " \"\"\"Interface for embedding models.\n", + "\n", + " This protocol defines the minimal interface required to compute a text\n", + " embedding. Implementations may use local models, external APIs, or custom\n", + " neural networks.\n", + "\n", + " Methods:\n", + " embed_text: Compute the embedding for a given text.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Computes the embedding vector for the provided text.\n", + "\n", + " Args:\n", + " text (str): The text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding vector.\n", + " \"\"\"\n", + " response = llm(model_id=\"openai:text-embedding-ada-002\", prompt=text)\n", + " return response.embedding\n", + "\n", + "\n", + "class MockEmbeddingModel:\n", + " \"\"\"Mock implementation of an embedding model using naive ASCII encoding.\n", + "\n", + " This simple model converts each character in the text to a normalized ASCII\n", + " value. It is intended solely for demonstration and testing purposes.\n", + "\n", + " Methods:\n", + " embed_text: Converts text to a sequence of normalized ASCII values.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Embeds text by converting each character to its normalized ASCII code.\n", + "\n", + " Args:\n", + " text (str): The input text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding. Returns an\n", + " empty list if the text is empty.\n", + " \"\"\"\n", + " if not text:\n", + " return []\n", + " return [ord(ch) / 256.0 for ch in text]\n", + "\n", + "\n", + "################################################################\n", + "# 2) Similarity Metric Interface & Implementations\n", + "################################################################\n", + "\n", + "\n", + "class SimilarityMetric(ABC):\n", + " \"\"\"Abstract base class for computing similarity between embedding vectors.\n", + "\n", + " Subclasses must implement the similarity method to calculate a similarity\n", + " score between two vectors.\n", + " \"\"\"\n", + "\n", + " @abstractmethod\n", + " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", + " \"\"\"Calculates the similarity between two embedding vectors.\n", + "\n", + " Args:\n", + " vec_a (List[float]): The first embedding vector.\n", + " vec_b (List[float]): The second embedding vector.\n", + "\n", + " Returns:\n", + " float: The similarity score, typically in the range [0, 1] or [-1, 1].\n", + " \"\"\"\n", + " ...\n", + "\n", + "\n", + "class CosineSimilarity(SimilarityMetric):\n", + " \"\"\"Implementation of cosine similarity for embedding vectors.\n", + "\n", + " The cosine similarity is defined as:\n", + " similarity(a, b) = (a · b) / (||a|| * ||b||)\n", + "\n", + " Returns 0.0 if either vector is empty or if any vector's norm is zero.\n", + " \"\"\"\n", + "\n", + " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", + " \"\"\"Computes cosine similarity between two embedding vectors.\n", + "\n", + " Args:\n", + " vec_a (List[float]): The first embedding vector.\n", + " vec_b (List[float]): The second embedding vector.\n", + "\n", + " Returns:\n", + " float: The cosine similarity score.\n", + " \"\"\"\n", + " if not vec_a or not vec_b:\n", + " return 0.0\n", + "\n", + " dot_product: float = sum(a * b for a, b in zip(vec_a, vec_b))\n", + " norm_a: float = math.sqrt(sum(a * a for a in vec_a))\n", + " norm_b: float = math.sqrt(sum(b * b for b in vec_b))\n", + " if norm_a == 0 or norm_b == 0:\n", + " return 0.0\n", + "\n", + " return dot_product / (norm_a * norm_b)\n", + "\n", + "\n", + "################################################################\n", + "# 3) High-Level Utility Function\n", + "################################################################\n", + "\n", + "\n", + "def calculate_text_similarity(\n", + " text1: str, text2: str, model: EmbeddingModel, metric: SimilarityMetric\n", + ") -> float:\n", + " \"\"\"Calculates text similarity using an embedding model and a similarity metric.\n", + "\n", + " This function generates embeddings for the provided texts and then computes a\n", + " similarity score using the given similarity metric.\n", + "\n", + " Args:\n", + " text1 (str): The first text string.\n", + " text2 (str): The second text string.\n", + " model (EmbeddingModel): An instance conforming to the embedding model interface.\n", + " metric (SimilarityMetric): An instance implementing a similarity metric.\n", + "\n", + " Returns:\n", + " float: The computed similarity score.\n", + " \"\"\"\n", + " embedding1: List[float] = model.embed_text(text=text1)\n", + " embedding2: List[float] = model.embed_text(text=text2)\n", + "\n", + " return metric.similarity(vec_a=embedding1, vec_b=embedding2)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cosine similarity Score: 0.7287\n", + "\n", + "Cosine similarity Score: 0.8205\n", + "\n", + "Cosine similarity Score: 1.0000\n", + "\n" + ] + } + ], + "source": [ + "mock_model: Text_Embedding_Ada_002_Model = Text_Embedding_Ada_002_Model()\n", + "cosine: CosineSimilarity = CosineSimilarity()\n", + "\n", + "text_a: str = \"Hello world!\"\n", + "text_b: str = \"Hello, world??\"\n", + "\n", + "diverse_text = [\"Bananas don't belong in briefcases\", \"Abraham Lincoln\", \"ERROR 404: Index Not Found\"]\n", + "\n", + "different_words_not_diverse_strs = [\"peanut butter and jelly\", \"bacon lettuce tomato\"]\n", + "\n", + "repetition_strs = [\"This is a sample text with lots of repetition.\", \n", + " \"This is a sample text with lots of repetition.\"]\n", + "\n", + "test_strings = [diverse_text, different_words_not_diverse_strs, repetition_strs]\n", + "\n", + "for test in test_strings:\n", + " score: float = calculate_text_similarity(\n", + " text1=test[0], text2=test[1], model=mock_model, metric=cosine\n", + " )\n", + "\n", + " print(f\"Cosine similarity Score: {score:.4f}\")\n", + " print(\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "---\n", + "\n", + "## Compression Ratio (WIP)\n", + "\n", + "from `src/ember/core/utils/eval/evaluators.py`" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n", + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q diversity==0.2.0\n", + "%pip install -q spacy==3.8.4" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from typing import Any, Dict, TypeVar, Optional, List, Generic, Callable, Union\n", + "\n", + "from ember.core.utils.eval.base_evaluator import IEvaluator, EvaluationResult\n", + "from ember.core.utils.eval.extractors import RegexExtractor\n", + "\n", + "from diversity import compression_ratio\n", + "\n", + "T_out = TypeVar(\"T_out\")\n", + "T_truth = TypeVar(\"T_truth\")\n", + "\n", + "\n", + "class ComposedEvaluator(IEvaluator[T_out, T_truth], Generic[T_out, T_truth]):\n", + " \"\"\"Combines an output extractor with an evaluator for the extracted data.\n", + "\n", + " This evaluator first transforms the system output using the provided extractor,\n", + " then evaluates the extracted value using the specified base evaluator.\n", + "\n", + " Args:\n", + " extractor: An object with an `extract` method to process the system output.\n", + " base_evaluator (IEvaluator): An evaluator that processes the extracted output.\n", + "\n", + " Returns:\n", + " EvaluationResult: The result of the evaluation.\n", + " \"\"\"\n", + "\n", + " def __init__(\n", + " self,\n", + " extractor: Any, # Expecting an extractor with an `extract` method.\n", + " base_evaluator: IEvaluator[Any, Any],\n", + " ) -> None:\n", + " self.extractor = extractor\n", + " self.base_evaluator = base_evaluator\n", + "\n", + " def evaluate(\n", + " self, system_output: T_out, correct_answer: Any, **kwargs: Any\n", + " ) -> EvaluationResult:\n", + " \"\"\"Evaluates the provided system output against the correct answer.\n", + "\n", + " Args:\n", + " system_output (T_out): The raw output generated by the system.\n", + " correct_answer (Any): The expected correct answer.\n", + " **kwargs: Additional keyword arguments for extraction or evaluation.\n", + "\n", + " Returns:\n", + " EvaluationResult: The result of evaluating the extracted value.\n", + " \"\"\"\n", + " extracted_value = self.extractor.extract(system_output, **kwargs)\n", + " return self.base_evaluator.evaluate(extracted_value, correct_answer, **kwargs)\n", + "\n", + "\n", + "# Basic Evaluators\n", + "\n", + "\n", + "class ExactMatchEvaluator(IEvaluator[str, str]):\n", + " \"\"\"Evaluator to check for an exact match between two strings,\n", + " ignoring differences in whitespace and case.\n", + "\n", + " Example:\n", + " evaluator = ExactMatchEvaluator()\n", + " result = evaluator.evaluate(\"Hello World\", \"hello world\")\n", + "\n", + " Args:\n", + " compare_fn (Optional[Callable[[str, str], bool]]): Optional custom comparison function.\n", + " If not provided, strings are normalized (whitespace removed, lowercase) before comparison.\n", + "\n", + " Returns:\n", + " EvaluationResult: The result containing a correctness flag and a score.\n", + " \"\"\"\n", + "\n", + " def __init__(self, compare_fn: Optional[Callable[[str, str], bool]] = None) -> None:\n", + " self.compare_fn = compare_fn or self._default_compare\n", + "\n", + " def _default_compare(self, str1: str, str2: str) -> bool:\n", + " \"\"\"Default string comparison function that ignores case and whitespace.\n", + "\n", + " Args:\n", + " str1 (str): First string to compare\n", + " str2 (str): Second string to compare\n", + "\n", + " Returns:\n", + " bool: True if strings match after normalization\n", + " \"\"\"\n", + " return str1.strip().lower() == str2.strip().lower()\n", + "\n", + " def evaluate(\n", + " self, system_output: str, correct_answer: str, **kwargs: Any\n", + " ) -> EvaluationResult:\n", + " \"\"\"Evaluates whether a system output exactly matches the correct answer.\n", + "\n", + " Args:\n", + " system_output (str): The system-generated string.\n", + " correct_answer (str): The expected answer string.\n", + " **kwargs: Additional keyword arguments (unused).\n", + "\n", + " Returns:\n", + " EvaluationResult: An object with `is_correct` set to True if the normalized strings match,\n", + " along with a corresponding score.\n", + " \"\"\"\n", + " is_correct = self.compare_fn(system_output, correct_answer)\n", + " score = 1.0 if is_correct else 0.0\n", + " return EvaluationResult(is_correct=is_correct, score=score)\n", + "\n", + "class DiversityScoringEvaluator(IEvaluator[List[str], None]):\n", + " \"\"\"\n", + " Evaluator to test ensemble outputs -> score them (float)\n", + " \"\"\"\n", + " def evaluate(\n", + " self, \n", + " system_output: List[str], \n", + " **kwargs) -> EvaluationResult:\n", + " if system_output is None or len(system_output) == 0:\n", + " return EvaluationResult(is_correct=False, score=-1)\n", + "\n", + "\n", + " letter_sum = sum(len(response) for response in system_output)\n", + " ratio = compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100)\n", + "\n", + " return EvaluationResult(is_correct=True,score=ratio,metadata = {'responses': system_output})\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "\n", + "---\n", + "\n", + "## **Edit Distance**" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q python-Levenshtein" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import Levenshtein\n", + "from typing import List\n", + "from dataclasses import dataclass\n", + "\n", + "@dataclass\n", + "class EvaluationResult:\n", + " is_correct: bool\n", + " score: float\n", + " metadata: dict\n", + "\n", + "class EditDistanceScoringEvaluator:\n", + "\n", + " def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult:\n", + " if system_output is None or len(system_output) == 0:\n", + " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", + "\n", + " diversity_score = self.compute_distance(system_output)\n", + "\n", + " return EvaluationResult(\n", + " is_correct=True, \n", + " score=diversity_score,\n", + " metadata={'responses': system_output}\n", + " )\n", + "\n", + " def compute_distance(self, outputs: List[str]) -> float:\n", + " n = len(outputs)\n", + " if n < 2:\n", + " return 0.0\n", + "\n", + " total_distance = 0\n", + " pairs = 0\n", + "\n", + " for i in range(n):\n", + " for j in range(i + 1, n):\n", + " dist = Levenshtein.distance(outputs[i], outputs[j])\n", + " max_len = max(len(outputs[i]), len(outputs[j]))\n", + " normalized_dist = dist / max_len if max_len > 0 else 0 \n", + " total_distance += normalized_dist\n", + " pairs += 1\n", + " \n", + " return total_distance / pairs if pairs > 0 else 0.0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Diversity Score: 0.8635\n", + "Is Correct: True\n", + "Metadata: {'responses': [\"Bananas don't belong in briefcases\", 'Abraham Lincoln', 'ERROR 404: Index Not Found']}\n", + "\n", + "Diversity Score: 0.8573\n", + "Is Correct: True\n", + "Metadata: {'responses': ['peanut butter and jelly', 'bacon lettuce tomato', 'grilled cheese', 'Banh mi']}\n", + "\n", + "Diversity Score: 0.0000\n", + "Is Correct: True\n", + "Metadata: {'responses': ['This is a sample text with lots of repetition.', 'This is a sample text with lots of repetition.', 'This is a sample text with lots of repetition.']}\n", + "\n" + ] + } + ], + "source": [ + "distance_evaluator = EditDistanceScoringEvaluator()\n", + "\n", + "# input_strs = [\n", + "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", + "# \"fej89qw098efjq29f38j0938j20f398jqwe098fjq98wf -- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", + "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", + "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", + "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", + "# ]\n", + "\n", + "diverse_text = [\"Bananas don't belong in briefcases\", \"Abraham Lincoln\", \"ERROR 404: Index Not Found\"]\n", + "\n", + "different_words_not_diverse_strs = [\"peanut butter and jelly\", \"bacon lettuce tomato\", \"grilled cheese\"]\n", + "\n", + "repetition_strs = [\"This is a sample text with lots of repetition.\", \n", + " \"This is a sample text with lots of repetition.\",\n", + " \"This is a sample text with lots of repetition.\"]\n", + "\n", + "test_strings = [diverse_text, different_words_not_diverse_strs, repetition_strs]\n", + "\n", + "for test in test_strings:\n", + " edit_distance = distance_evaluator.evaluate(test)\n", + "\n", + " print(f\"Diversity Score: {edit_distance.score:.4f}\")\n", + " print(f\"Is Correct: {edit_distance.is_correct}\")\n", + " print(f\"Metadata: {edit_distance.metadata}\")\n", + " print(\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "\n", + "---\n", + "\n", + "## Novelty Score\n", + "\n", + "#### (From AidanBench)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List\n", + "from dataclasses import dataclass\n", + "import numpy as np\n", + "\n", + "@dataclass\n", + "class EvaluationResult:\n", + " is_correct: bool\n", + " score: float\n", + " metadata: dict\n", + "\n", + "class NoveltyScoringEvaluator:\n", + " \n", + " def evaluate(self, model: EmbeddingModel, system_output: List[str], **kwargs) -> EvaluationResult:\n", + " if not system_output or len(system_output) == 0:\n", + " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", + " \n", + " self.model = model\n", + "\n", + " novelty_scores = [self.compute_novelty(r, system_output[:i]) for i, r in enumerate(system_output)]\n", + "\n", + " print(\"scores: \", novelty_scores)\n", + "\n", + " avg_novelty = sum(novelty_scores) / len(novelty_scores) if novelty_scores else 0.0\n", + "\n", + " return EvaluationResult(\n", + " is_correct=True,\n", + " score=avg_novelty,\n", + " metadata={'responses': system_output, 'novelty_scores': novelty_scores}\n", + " )\n", + "\n", + " def compute_novelty(self, response: str, prior_responses: List[str]) -> float:\n", + " if not prior_responses:\n", + " return 1.0\n", + "\n", + " new_embedding = self.model.embed_text(response)\n", + " prior_embeddings = [self.model.embed_text(r) for r in prior_responses]\n", + "\n", + " similarities = [\n", + " np.dot(new_embedding, prior_embedding) /\n", + " (np.linalg.norm(new_embedding) * np.linalg.norm(prior_embedding))\n", + " for prior_embedding in prior_embeddings\n", + " ]\n", + "\n", + " return 1 - max(similarities)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "scores: [1.0, 0.2712776724205106, 0.259596190452704]\n", + "Diversity Score: 0.5103\n", + "Is Correct: True\n", + "Metadata: {'responses': [\"Bananas don't belong in briefcases\", 'Abraham Lincoln', 'ERROR 404: Index Not Found'], 'novelty_scores': [1.0, 0.2712776724205106, 0.259596190452704]}\n", + "\n", + "scores: [1.0, 0.17952900510509806, 0.13489158507389332]\n", + "Diversity Score: 0.4381\n", + "Is Correct: True\n", + "Metadata: {'responses': ['peanut butter and jelly', 'bacon lettuce tomato', 'grilled cheese'], 'novelty_scores': [1.0, 0.17952900510509806, 0.13489158507389332]}\n", + "\n", + "scores: [1.0, 0.0, 0.0]\n", + "Diversity Score: 0.3333\n", + "Is Correct: True\n", + "Metadata: {'responses': ['This is a sample text with lots of repetition.', 'This is a sample text with lots of repetition.', 'This is a sample text with lots of repetition.'], 'novelty_scores': [1.0, 0.0, 0.0]}\n", + "\n" + ] + } + ], + "source": [ + "novelty_evaluator = NoveltyScoringEvaluator()\n", + "\n", + "diverse_text = [\"Bananas don't belong in briefcases\", \"Abraham Lincoln\", \"ERROR 404: Index Not Found\"]\n", + "\n", + "different_words_not_diverse_strs = [\"peanut butter and jelly\", \"bacon lettuce tomato\", \"grilled cheese\"]\n", + "\n", + "repetition_strs = [\"This is a sample text with lots of repetition.\", \n", + " \"This is a sample text with lots of repetition.\",\n", + " \"This is a sample text with lots of repetition.\"]\n", + "\n", + "ada_002: Text_Embedding_Ada_002_Model = Text_Embedding_Ada_002_Model()\n", + "test_strings = [diverse_text, different_words_not_diverse_strs, repetition_strs]\n", + "\n", + "for test in test_strings:\n", + " results = novelty_evaluator.evaluate(ada_002, test)\n", + "\n", + " print(f\"Diversity Score: {results.score:.4f}\")\n", + " print(f\"Is Correct: {results.is_correct}\")\n", + " print(f\"Metadata: {results.metadata}\")\n", + " print(\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ember_upgrade", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/src/ember/examples/diversity_testbench.ipynb b/src/ember/examples/diversity_testbench.ipynb index 63b5bdff..f42c6407 100644 --- a/src/ember/examples/diversity_testbench.ipynb +++ b/src/ember/examples/diversity_testbench.ipynb @@ -26,33 +26,17 @@ }, { "cell_type": "code", -<<<<<<< HEAD - "execution_count": 10, -======= - "execution_count": 33, ->>>>>>> feb7b31 (added embedding model) + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import logging, sys, os\n", - "from typing import Dict, Any, List\n", - "\n", - "logging.basicConfig(level=logging.ERROR)" - ] - }, - { - "cell_type": "code", -<<<<<<< HEAD - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "os.environ[\"OPENAI_API_KEY\"] = ''" + "from typing import Dict, Any, List" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -61,39 +45,14 @@ }, { "cell_type": "code", - "execution_count": 13, -======= - "execution_count": 34, ->>>>>>> feb7b31 (added embedding model) - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ -<<<<<<< HEAD - "/Users/kathleenge/Desktop/NON/ember-v2\n" -======= - "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\n" - ] - } - ], - "source": [ - "openai_key = os.getenv(\"OPENAI_API_KEY\")\n", - "print(openai_key)" - ] - }, - { - "cell_type": "code", - "execution_count": 35, + "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "/root/ember/connor/ember-v2\n" ->>>>>>> feb7b31 (added embedding model) + "/root/ember/jared/ember\n" ] } ], @@ -111,22 +70,14 @@ }, { "cell_type": "code", -<<<<<<< HEAD - "execution_count": 14, -======= - "execution_count": 36, ->>>>>>> feb7b31 (added embedding model) + "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ -<<<<<<< HEAD - "/Users/kathleenge/desktop/non/ember-v2\r\n" -======= - "/root/ember/connor/ember-v2\n" ->>>>>>> feb7b31 (added embedding model) + "/root/ember/jared/ember\n" ] } ], @@ -143,11 +94,7 @@ }, { "cell_type": "code", -<<<<<<< HEAD - "execution_count": 15, -======= - "execution_count": 37, ->>>>>>> feb7b31 (added embedding model) + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -164,13 +111,473 @@ }, { "cell_type": "code", -<<<<<<< HEAD - "execution_count": 16, -======= - "execution_count": 38, ->>>>>>> feb7b31 (added embedding model) + "execution_count": 6, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-03-21 02:00:21,378 [DEBUG] ConfigManager: Loading configuration...\n", + "2025-03-21 02:00:21,379 [DEBUG] ConfigManager: Configuration loaded successfully\n", + "2025-03-21 02:00:21,379 [INFO] ember.core.registry.model.initialization: Execute model discovery (timeout: 30 seconds per provider, running in parallel)\n", + "2025-03-21 02:00:22,003 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", + "2025-03-21 02:00:22,005 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", + "2025-03-21 02:00:22,013 [DEBUG] ember.core.registry.model.base.registry.discovery: OPENAI_API_KEY found, initialized OpenAIDiscovery successfully\n", + "2025-03-21 02:00:22,014 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", + "2025-03-21 02:00:22,015 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", + "2025-03-21 02:00:22,022 [DEBUG] ember.core.registry.model.base.registry.discovery: ANTHROPIC_API_KEY found, initialized AnthropicDiscovery successfully\n", + "2025-03-21 02:00:22,023 [DEBUG] ember.core.registry.model.base.registry.discovery: GOOGLE_API_KEY found, initialized DeepmindDiscovery successfully\n", + "2025-03-21 02:00:22,024 [INFO] ember.core.registry.model.initialization: Initiating model discovery via ModelDiscoveryService\n", + "2025-03-21 02:00:22,027 [DEBUG] openai._base_client: Request options: {'method': 'get', 'url': '/models', 'post_parser': ._parser at 0x7fd791a30220>, 'json_data': None}\n", + "2025-03-21 02:00:22,029 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Starting Anthropic model fetch via REST API...\n", + "2025-03-21 02:00:22,032 [DEBUG] openai._base_client: Sending HTTP Request: GET https://api.openai.com/v1/models\n", + "2025-03-21 02:00:22,032 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Calling Anthropic REST API: https://api.anthropic.com/v1/models with timeout=(2,5)\n", + "2025-03-21 02:00:22,033 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=5.0 socket_options=None\n", + "2025-03-21 02:00:22,037 [DEBUG] urllib3.connectionpool: Starting new HTTPS connection (1): api.anthropic.com:443\n", + "2025-03-21 02:00:22,065 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 02:00:22,067 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=5.0\n", + "2025-03-21 02:00:22,127 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 02:00:22,130 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 02:00:22,131 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 02:00:22,133 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 02:00:22,134 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 02:00:22,134 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 02:00:22,417 [DEBUG] urllib3.connectionpool: https://api.anthropic.com:443 \"GET /v1/models HTTP/1.1\" 401 86\n", + "2025-03-21 02:00:22,419 [ERROR] ember.core.registry.model.providers.anthropic.anthropic_discovery: Error fetching Anthropic models via REST API: 401 Client Error: Unauthorized for url: https://api.anthropic.com/v1/models\n", + "2025-03-21 02:00:22,420 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Using fallback models due to API request error\n", + "2025-03-21 02:00:22,421 [INFO] ember.core.registry.model.base.registry.discovery: Provider AnthropicDiscovery completed in 0.39s\n", + "2025-03-21 02:00:22,529 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 09:00:22 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'openai-version', b'2020-10-01'), (b'x-request-id', b'edeac53a55ded28a21bbef63a67b7187'), (b'openai-processing-ms', b'318'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=9oqKjTNazifaL7c8EB7goelYAka5BwxIcA8Zdpk3olw-1742547622-1.0.1.1-5Vdcdtx1vgOSNwpdHNHX064wrD84MoXWK60t5tnMCx.G5WMmpH8lXlQS4lUmO1RIBNVIfDOed7xbsshEXao9UfK2TQdWuq5k4WitIqV8xkQ; path=/; expires=Fri, 21-Mar-25 09:30:22 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=0zdaLCIygVBxhND6uISxrfeO1Q6pVOqcGGiPGaWdFUo-1742547622791-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c42302ee7f9d8-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 02:00:22,530 [INFO] httpx: HTTP Request: GET https://api.openai.com/v1/models \"HTTP/1.1 200 OK\"\n", + "2025-03-21 02:00:22,532 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 02:00:22,534 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 02:00:22,535 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 02:00:22,535 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 02:00:22,536 [DEBUG] openai._base_client: HTTP Response: GET https://api.openai.com/v1/models \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 09:00:22 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('openai-version', '2020-10-01'), ('x-request-id', 'edeac53a55ded28a21bbef63a67b7187'), ('openai-processing-ms', '318'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=9oqKjTNazifaL7c8EB7goelYAka5BwxIcA8Zdpk3olw-1742547622-1.0.1.1-5Vdcdtx1vgOSNwpdHNHX064wrD84MoXWK60t5tnMCx.G5WMmpH8lXlQS4lUmO1RIBNVIfDOed7xbsshEXao9UfK2TQdWuq5k4WitIqV8xkQ; path=/; expires=Fri, 21-Mar-25 09:30:22 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=0zdaLCIygVBxhND6uISxrfeO1Q6pVOqcGGiPGaWdFUo-1742547622791-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923c42302ee7f9d8-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", + "2025-03-21 02:00:22,537 [DEBUG] openai._base_client: request_id: edeac53a55ded28a21bbef63a67b7187\n", + "2025-03-21 02:00:22,541 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Fetched 65 models from OpenAI API\n", + "2025-03-21 02:00:22,542 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Filtered to 43 relevant models\n", + "2025-03-21 02:00:22,543 [INFO] ember.core.registry.model.base.registry.discovery: Provider OpenAIDiscovery completed in 0.52s\n", + "2025-03-21 02:00:22,545 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 43 models from OpenAIDiscovery\n", + "2025-03-21 02:00:22,547 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 5 models from AnthropicDiscovery\n", + "/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", + "I0000 00:00:1742547622.658771 400399 check_gcp_environment.cc:61] BIOS data file does not exist or cannot be opened.\n", + "2025-03-21 02:00:22,816 [INFO] ember.core.registry.model.base.registry.discovery: Provider DeepmindDiscovery completed in 0.79s\n", + "2025-03-21 02:00:22,817 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 32 models from DeepmindDiscovery\n", + "2025-03-21 02:00:22,818 [INFO] ember.core.registry.model.base.registry.discovery: Discovered 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4o-mini', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 02:00:22,819 [DEBUG] ember.core.registry.model.initialization: Raw discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4o-mini', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 02:00:22,820 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-transcribe discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,821 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-transcribe\n", + "2025-03-21 02:00:22,822 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,822 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-12-17\n", + "2025-03-21 02:00:22,822 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-3 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,823 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-3\n", + "2025-03-21 02:00:22,823 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-2 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,823 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-2\n", + "2025-03-21 02:00:22,824 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,824 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-10-01\n", + "2025-03-21 02:00:22,825 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,825 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-10-01\n", + "2025-03-21 02:00:22,825 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,826 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview\n", + "2025-03-21 02:00:22,826 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-large discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,826 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-large\n", + "2025-03-21 02:00:22,827 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,827 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4\n", + "2025-03-21 02:00:22,828 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-05-13 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,829 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-05-13\n", + "2025-03-21 02:00:22,830 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,832 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview\n", + "2025-03-21 02:00:22,832 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,833 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview\n", + "2025-03-21 02:00:22,833 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct-0914 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,833 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct-0914\n", + "2025-03-21 02:00:22,834 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,835 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview\n", + "2025-03-21 02:00:22,835 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-1106 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,836 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-1106\n", + "2025-03-21 02:00:22,837 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,837 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview\n", + "2025-03-21 02:00:22,837 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,838 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo\n", + "2025-03-21 02:00:22,838 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,839 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-12-17\n", + "2025-03-21 02:00:22,839 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,840 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct\n", + "2025-03-21 02:00:22,841 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,841 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo\n", + "2025-03-21 02:00:22,841 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,842 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-preview\n", + "2025-03-21 02:00:22,842 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,843 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview-2025-03-11\n", + "2025-03-21 02:00:22,844 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,845 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview\n", + "2025-03-21 02:00:22,845 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-2024-07-18 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,846 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-2024-07-18\n", + "2025-03-21 02:00:22,846 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-0125 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,847 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-0125\n", + "2025-03-21 02:00:22,850 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-08-06 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,851 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-08-06\n", + "2025-03-21 02:00:22,851 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-2024-04-09 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,852 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-2024-04-09\n", + "2025-03-21 02:00:22,853 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-16k discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,853 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-16k\n", + "2025-03-21 02:00:22,854 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,855 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o\n", + "2025-03-21 02:00:22,855 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,856 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview-2024-12-17\n", + "2025-03-21 02:00:22,857 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,857 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini\n", + "2025-03-21 02:00:22,858 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-1106-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,858 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-1106-preview\n", + "2025-03-21 02:00:22,859 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-ada-002 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,859 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-ada-002\n", + "2025-03-21 02:00:22,860 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0613 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,860 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0613\n", + "2025-03-21 02:00:22,861 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,861 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview\n", + "2025-03-21 02:00:22,861 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview-2025-02-27 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,862 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview-2025-02-27\n", + "2025-03-21 02:00:22,862 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,863 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview-2025-03-11\n", + "2025-03-21 02:00:22,863 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-11-20 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,864 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-11-20\n", + "2025-03-21 02:00:22,864 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-tts discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,865 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-tts\n", + "2025-03-21 02:00:22,865 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0125-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,866 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0125-preview\n", + "2025-03-21 02:00:22,866 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-transcribe discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,869 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-transcribe\n", + "2025-03-21 02:00:22,870 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-small discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,870 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-small\n", + "2025-03-21 02:00:22,871 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,872 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview-2024-12-17\n", + "2025-03-21 02:00:22,873 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-sonnet discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,873 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-sonnet\n", + "2025-03-21 02:00:22,874 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-opus discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,874 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-opus\n", + "2025-03-21 02:00:22,874 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-haiku discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,875 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-haiku\n", + "2025-03-21 02:00:22,875 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.5-sonnet discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,876 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.5-sonnet\n", + "2025-03-21 02:00:22,877 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.7-sonnet discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,877 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.7-sonnet\n", + "2025-03-21 02:00:22,878 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.0-pro-vision-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,878 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.0-pro-vision-latest\n", + "2025-03-21 02:00:22,879 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-pro-vision discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,879 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-pro-vision\n", + "2025-03-21 02:00:22,879 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,880 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-latest\n", + "2025-03-21 02:00:22,880 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,881 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-001\n", + "2025-03-21 02:00:22,881 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-002 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,882 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-002\n", + "2025-03-21 02:00:22,884 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,884 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro\n", + "2025-03-21 02:00:22,885 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,885 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-latest\n", + "2025-03-21 02:00:22,885 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,886 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001\n", + "2025-03-21 02:00:22,887 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001-tuning discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,887 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001-tuning\n", + "2025-03-21 02:00:22,888 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,889 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash\n", + "2025-03-21 02:00:22,889 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-002 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,890 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-002\n", + "2025-03-21 02:00:22,891 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,891 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b\n", + "2025-03-21 02:00:22,892 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,893 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-001\n", + "2025-03-21 02:00:22,893 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-latest discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,895 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-latest\n", + "2025-03-21 02:00:22,896 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0827 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,896 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0827\n", + "2025-03-21 02:00:22,897 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0924 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,897 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0924\n", + "2025-03-21 02:00:22,898 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,899 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp\n", + "2025-03-21 02:00:22,899 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,900 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash\n", + "2025-03-21 02:00:22,900 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,901 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-001\n", + "2025-03-21 02:00:22,901 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp-image-generation discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,902 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp-image-generation\n", + "2025-03-21 02:00:22,902 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-001 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,903 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-001\n", + "2025-03-21 02:00:22,903 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,904 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite\n", + "2025-03-21 02:00:22,904 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview-02-05 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,906 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview-02-05\n", + "2025-03-21 02:00:22,907 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,908 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview\n", + "2025-03-21 02:00:22,909 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,909 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp\n", + "2025-03-21 02:00:22,910 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp-02-05 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,910 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp-02-05\n", + "2025-03-21 02:00:22,911 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-exp-1206 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,914 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-exp-1206\n", + "2025-03-21 02:00:22,915 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-01-21 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,916 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-01-21\n", + "2025-03-21 02:00:22,916 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,917 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp\n", + "2025-03-21 02:00:22,917 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-1219 discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,918 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-1219\n", + "2025-03-21 02:00:22,918 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/learnlm-1.5-pro-experimental discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,919 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/learnlm-1.5-pro-experimental\n", + "2025-03-21 02:00:22,919 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemma-3-27b-it discovered via API but not in local config; using defaults with environment API key.\n", + "2025-03-21 02:00:22,920 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemma-3-27b-it\n", + "2025-03-21 02:00:22,920 [DEBUG] ember.core.registry.model.initialization: Merged discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4o-mini', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 02:00:22,921 [INFO] ember.core.registry.model.initialization: Registering 80 models from discovery\n", + "2025-03-21 02:00:22,921 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-transcribe (provider: Openai)\n", + "2025-03-21 02:00:22,922 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", + "2025-03-21 02:00:22,923 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", + "2025-03-21 02:00:22,923 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 02:00:22,925 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 02:00:22,925 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 02:00:22,926 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-3 (provider: Openai)\n", + "2025-03-21 02:00:22,927 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", + "2025-03-21 02:00:22,927 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", + "2025-03-21 02:00:22,928 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-2 (provider: Openai)\n", + "2025-03-21 02:00:22,929 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", + "2025-03-21 02:00:22,930 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", + "2025-03-21 02:00:22,930 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-10-01 (provider: Openai)\n", + "2025-03-21 02:00:22,931 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", + "2025-03-21 02:00:22,932 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", + "2025-03-21 02:00:22,933 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-10-01 (provider: Openai)\n", + "2025-03-21 02:00:22,933 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", + "2025-03-21 02:00:22,934 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", + "2025-03-21 02:00:22,935 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview (provider: Openai)\n", + "2025-03-21 02:00:22,936 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", + "2025-03-21 02:00:22,937 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", + "2025-03-21 02:00:22,938 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-large (provider: Openai)\n", + "2025-03-21 02:00:22,939 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", + "2025-03-21 02:00:22,939 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", + "2025-03-21 02:00:22,940 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4 (provider: Openai)\n", + "2025-03-21 02:00:22,940 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", + "2025-03-21 02:00:22,941 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", + "2025-03-21 02:00:22,942 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-05-13 (provider: Openai)\n", + "2025-03-21 02:00:22,942 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", + "2025-03-21 02:00:22,944 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", + "2025-03-21 02:00:22,944 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview (provider: Openai)\n", + "2025-03-21 02:00:22,945 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", + "2025-03-21 02:00:22,945 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", + "2025-03-21 02:00:22,946 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview (provider: Openai)\n", + "2025-03-21 02:00:22,946 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", + "2025-03-21 02:00:22,948 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", + "2025-03-21 02:00:22,948 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct-0914 (provider: Openai)\n", + "2025-03-21 02:00:22,949 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", + "2025-03-21 02:00:22,950 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", + "2025-03-21 02:00:22,950 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview (provider: Openai)\n", + "2025-03-21 02:00:22,951 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", + "2025-03-21 02:00:22,951 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", + "2025-03-21 02:00:22,952 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-1106 (provider: Openai)\n", + "2025-03-21 02:00:22,953 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", + "2025-03-21 02:00:22,954 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", + "2025-03-21 02:00:22,954 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview (provider: Openai)\n", + "2025-03-21 02:00:22,955 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", + "2025-03-21 02:00:22,955 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", + "2025-03-21 02:00:22,956 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo (provider: Openai)\n", + "2025-03-21 02:00:22,956 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", + "2025-03-21 02:00:22,957 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", + "2025-03-21 02:00:22,958 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 02:00:22,959 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 02:00:22,960 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 02:00:22,961 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct (provider: Openai)\n", + "2025-03-21 02:00:22,961 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", + "2025-03-21 02:00:22,962 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", + "2025-03-21 02:00:22,962 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo (provider: Openai)\n", + "2025-03-21 02:00:22,962 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", + "2025-03-21 02:00:22,963 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", + "2025-03-21 02:00:22,963 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-preview (provider: Openai)\n", + "2025-03-21 02:00:22,964 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", + "2025-03-21 02:00:22,964 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", + "2025-03-21 02:00:22,965 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview-2025-03-11 (provider: Openai)\n", + "2025-03-21 02:00:22,965 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 02:00:22,965 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 02:00:22,966 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview (provider: Openai)\n", + "2025-03-21 02:00:22,966 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", + "2025-03-21 02:00:22,967 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", + "2025-03-21 02:00:22,967 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-2024-07-18 (provider: Openai)\n", + "2025-03-21 02:00:22,968 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", + "2025-03-21 02:00:22,968 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", + "2025-03-21 02:00:22,969 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-0125 (provider: Openai)\n", + "2025-03-21 02:00:22,972 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", + "2025-03-21 02:00:22,972 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", + "2025-03-21 02:00:22,973 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-08-06 (provider: Openai)\n", + "2025-03-21 02:00:22,974 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", + "2025-03-21 02:00:22,974 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", + "2025-03-21 02:00:22,975 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-2024-04-09 (provider: Openai)\n", + "2025-03-21 02:00:22,975 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", + "2025-03-21 02:00:22,976 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", + "2025-03-21 02:00:22,976 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-16k (provider: Openai)\n", + "2025-03-21 02:00:22,977 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", + "2025-03-21 02:00:22,977 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", + "2025-03-21 02:00:22,978 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o (provider: Openai)\n", + "2025-03-21 02:00:22,978 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", + "2025-03-21 02:00:22,979 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", + "2025-03-21 02:00:22,979 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 02:00:22,980 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 02:00:22,981 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", + "2025-03-21 02:00:22,984 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini (provider: Openai)\n", + "2025-03-21 02:00:22,985 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", + "2025-03-21 02:00:22,986 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", + "2025-03-21 02:00:22,987 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-1106-preview (provider: Openai)\n", + "2025-03-21 02:00:22,988 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", + "2025-03-21 02:00:22,988 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", + "2025-03-21 02:00:22,989 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-ada-002 (provider: Openai)\n", + "2025-03-21 02:00:22,989 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", + "2025-03-21 02:00:22,990 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", + "2025-03-21 02:00:22,991 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0613 (provider: Openai)\n", + "2025-03-21 02:00:22,992 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", + "2025-03-21 02:00:22,993 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", + "2025-03-21 02:00:22,996 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview (provider: Openai)\n", + "2025-03-21 02:00:22,997 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", + "2025-03-21 02:00:22,997 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", + "2025-03-21 02:00:22,998 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview-2025-02-27 (provider: Openai)\n", + "2025-03-21 02:00:22,999 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", + "2025-03-21 02:00:23,002 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", + "2025-03-21 02:00:23,002 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview-2025-03-11 (provider: Openai)\n", + "2025-03-21 02:00:23,003 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 02:00:23,004 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", + "2025-03-21 02:00:23,004 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-11-20 (provider: Openai)\n", + "2025-03-21 02:00:23,005 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", + "2025-03-21 02:00:23,005 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", + "2025-03-21 02:00:23,006 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-tts (provider: Openai)\n", + "2025-03-21 02:00:23,007 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", + "2025-03-21 02:00:23,007 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", + "2025-03-21 02:00:23,008 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0125-preview (provider: Openai)\n", + "2025-03-21 02:00:23,008 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", + "2025-03-21 02:00:23,009 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", + "2025-03-21 02:00:23,009 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-transcribe (provider: Openai)\n", + "2025-03-21 02:00:23,010 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", + "2025-03-21 02:00:23,010 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", + "2025-03-21 02:00:23,014 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-small (provider: Openai)\n", + "2025-03-21 02:00:23,014 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", + "2025-03-21 02:00:23,015 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", + "2025-03-21 02:00:23,015 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview-2024-12-17 (provider: Openai)\n", + "2025-03-21 02:00:23,016 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 02:00:23,016 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", + "2025-03-21 02:00:23,017 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-sonnet (provider: Anthropic)\n", + "2025-03-21 02:00:23,017 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", + "2025-03-21 02:00:23,018 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", + "2025-03-21 02:00:23,019 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-opus (provider: Anthropic)\n", + "2025-03-21 02:00:23,019 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", + "2025-03-21 02:00:23,020 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", + "2025-03-21 02:00:23,020 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-haiku (provider: Anthropic)\n", + "2025-03-21 02:00:23,021 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", + "2025-03-21 02:00:23,021 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", + "2025-03-21 02:00:23,022 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.5-sonnet (provider: Anthropic)\n", + "2025-03-21 02:00:23,022 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", + "2025-03-21 02:00:23,023 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", + "2025-03-21 02:00:23,023 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.7-sonnet (provider: Anthropic)\n", + "2025-03-21 02:00:23,023 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", + "2025-03-21 02:00:23,025 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", + "2025-03-21 02:00:23,025 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.0-pro-vision-latest (provider: Google)\n", + "2025-03-21 02:00:23,026 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", + "2025-03-21 02:00:23,026 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", + "2025-03-21 02:00:23,027 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-pro-vision (provider: Google)\n", + "2025-03-21 02:00:23,027 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", + "2025-03-21 02:00:23,028 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", + "2025-03-21 02:00:23,029 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-latest (provider: Google)\n", + "2025-03-21 02:00:23,031 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", + "2025-03-21 02:00:23,034 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", + "2025-03-21 02:00:23,034 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-001 (provider: Google)\n", + "2025-03-21 02:00:23,035 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", + "2025-03-21 02:00:23,036 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", + "2025-03-21 02:00:23,037 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-002 (provider: Google)\n", + "2025-03-21 02:00:23,037 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", + "2025-03-21 02:00:23,038 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", + "2025-03-21 02:00:23,039 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro (provider: Google)\n", + "2025-03-21 02:00:23,039 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", + "2025-03-21 02:00:23,047 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", + "2025-03-21 02:00:23,048 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-latest (provider: Google)\n", + "2025-03-21 02:00:23,049 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", + "2025-03-21 02:00:23,050 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", + "2025-03-21 02:00:23,051 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001 (provider: Google)\n", + "2025-03-21 02:00:23,051 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", + "2025-03-21 02:00:23,052 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", + "2025-03-21 02:00:23,053 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001-tuning (provider: Google)\n", + "2025-03-21 02:00:23,054 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", + "2025-03-21 02:00:23,054 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", + "2025-03-21 02:00:23,055 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash (provider: Google)\n", + "2025-03-21 02:00:23,056 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", + "2025-03-21 02:00:23,057 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", + "2025-03-21 02:00:23,057 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-002 (provider: Google)\n", + "2025-03-21 02:00:23,058 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", + "2025-03-21 02:00:23,059 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", + "2025-03-21 02:00:23,060 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b (provider: Google)\n", + "2025-03-21 02:00:23,060 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", + "2025-03-21 02:00:23,061 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", + "2025-03-21 02:00:23,070 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-001 (provider: Google)\n", + "2025-03-21 02:00:23,071 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", + "2025-03-21 02:00:23,072 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", + "2025-03-21 02:00:23,073 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-latest (provider: Google)\n", + "2025-03-21 02:00:23,073 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", + "2025-03-21 02:00:23,074 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", + "2025-03-21 02:00:23,075 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0827 (provider: Google)\n", + "2025-03-21 02:00:23,076 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", + "2025-03-21 02:00:23,077 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", + "2025-03-21 02:00:23,078 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0924 (provider: Google)\n", + "2025-03-21 02:00:23,086 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", + "2025-03-21 02:00:23,087 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", + "2025-03-21 02:00:23,088 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp (provider: Google)\n", + "2025-03-21 02:00:23,089 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", + "2025-03-21 02:00:23,090 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", + "2025-03-21 02:00:23,091 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash (provider: Google)\n", + "2025-03-21 02:00:23,092 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", + "2025-03-21 02:00:23,092 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", + "2025-03-21 02:00:23,093 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-001 (provider: Google)\n", + "2025-03-21 02:00:23,096 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", + "2025-03-21 02:00:23,097 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", + "2025-03-21 02:00:23,098 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp-image-generation (provider: Google)\n", + "2025-03-21 02:00:23,099 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", + "2025-03-21 02:00:23,100 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", + "2025-03-21 02:00:23,100 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-001 (provider: Google)\n", + "2025-03-21 02:00:23,101 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", + "2025-03-21 02:00:23,102 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", + "2025-03-21 02:00:23,103 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite (provider: Google)\n", + "2025-03-21 02:00:23,103 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", + "2025-03-21 02:00:23,104 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", + "2025-03-21 02:00:23,104 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview-02-05 (provider: Google)\n", + "2025-03-21 02:00:23,104 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", + "2025-03-21 02:00:23,104 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", + "2025-03-21 02:00:23,105 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview (provider: Google)\n", + "2025-03-21 02:00:23,105 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", + "2025-03-21 02:00:23,106 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", + "2025-03-21 02:00:23,106 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp (provider: Google)\n", + "2025-03-21 02:00:23,107 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", + "2025-03-21 02:00:23,107 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", + "2025-03-21 02:00:23,108 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp-02-05 (provider: Google)\n", + "2025-03-21 02:00:23,108 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", + "2025-03-21 02:00:23,108 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", + "2025-03-21 02:00:23,109 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-exp-1206 (provider: Google)\n", + "2025-03-21 02:00:23,109 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", + "2025-03-21 02:00:23,109 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", + "2025-03-21 02:00:23,110 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-01-21 (provider: Google)\n", + "2025-03-21 02:00:23,113 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", + "2025-03-21 02:00:23,114 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", + "2025-03-21 02:00:23,114 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp (provider: Google)\n", + "2025-03-21 02:00:23,115 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", + "2025-03-21 02:00:23,116 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", + "2025-03-21 02:00:23,116 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-1219 (provider: Google)\n", + "2025-03-21 02:00:23,118 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", + "2025-03-21 02:00:23,118 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", + "2025-03-21 02:00:23,119 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/learnlm-1.5-pro-experimental (provider: Google)\n", + "2025-03-21 02:00:23,119 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", + "2025-03-21 02:00:23,120 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", + "2025-03-21 02:00:23,120 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemma-3-27b-it (provider: Google)\n", + "2025-03-21 02:00:23,121 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", + "2025-03-21 02:00:23,121 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", + "2025-03-21 02:00:23,122 [INFO] ember.core.registry.model.initialization: Registration summary: 80 new, 0 skipped, 0 failed\n", + "2025-03-21 02:00:23,124 [INFO] ember.core.registry.model.initialization: Successfully discovered and registered 80 new models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4o-mini', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", + "2025-03-21 02:00:23,124 [INFO] ember.core.registry.model.initialization: Discovered 80 new models in 1.74s: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13'] and 70 more\n", + "2025-03-21 02:00:23,821 [DEBUG] httpcore.connection: close.started\n", + "2025-03-21 02:00:23,823 [DEBUG] httpcore.connection: close.complete\n" + ] + } + ], "source": [ "# from ember.core.registry.model.model_module.lm import LMModule, LMModuleConfig\n", "from ember.core.registry.model.config.settings import initialize_ember\n", @@ -180,519 +587,37 @@ "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", "\n", "from ember.core.registry.model import load_model, ChatResponse\n", - "from ember.core.registry.model.base.services.model_service import ModelService" + "from ember.core.registry.model.base.services.model_service import ModelService\n" ] }, { "cell_type": "code", -<<<<<<< HEAD - "execution_count": 17, + "execution_count": 7, "metadata": { "scrolled": true }, "outputs": [ { - "ename": "ValidationError", - "evalue": "4 validation errors for EmberSettings\nregistry.models.3.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.4.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.5.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.6.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error", + "ename": "SyntaxError", + "evalue": "invalid syntax (1625361117.py, line 1)", "output_type": "error", "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[17], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m model_registry \u001b[38;5;241m=\u001b[39m initialize_ember()\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(model_registry\u001b[38;5;241m.\u001b[39mlist_models())\n\u001b[1;32m 3\u001b[0m llm \u001b[38;5;241m=\u001b[39m ModelService(registry\u001b[38;5;241m=\u001b[39mmodel_registry)\n", - "File \u001b[0;32m~/Desktop/NON/ember-v2/src/ember/core/registry/model/config/settings.py:252\u001b[0m, in \u001b[0;36minitialize_ember\u001b[0;34m(config_path, auto_register, auto_discover)\u001b[0m\n\u001b[1;32m 249\u001b[0m settings_obj\u001b[38;5;241m.\u001b[39mregistry\u001b[38;5;241m.\u001b[39mauto_register \u001b[38;5;241m=\u001b[39m auto_register\n\u001b[1;32m 250\u001b[0m settings_obj\u001b[38;5;241m.\u001b[39mregistry\u001b[38;5;241m.\u001b[39mauto_discover \u001b[38;5;241m=\u001b[39m auto_discover\n\u001b[0;32m--> 252\u001b[0m registry_instance: ModelRegistry \u001b[38;5;241m=\u001b[39m _initialize_model_registry(settings\u001b[38;5;241m=\u001b[39msettings_obj)\n\u001b[1;32m 253\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m registry_instance\n", - "File \u001b[0;32m~/Desktop/NON/ember-v2/src/ember/core/registry/model/config/settings.py:184\u001b[0m, in \u001b[0;36m_initialize_model_registry\u001b[0;34m(settings)\u001b[0m\n\u001b[1;32m 181\u001b[0m merged_config \u001b[38;5;241m=\u001b[39m resolve_env_vars(data\u001b[38;5;241m=\u001b[39mmerged_config)\n\u001b[1;32m 182\u001b[0m logger\u001b[38;5;241m.\u001b[39mdebug(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFinal merged config keys: \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28mlist\u001b[39m(merged_config\u001b[38;5;241m.\u001b[39mkeys()))\n\u001b[0;32m--> 184\u001b[0m final_settings: EmberSettings \u001b[38;5;241m=\u001b[39m EmberSettings(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmerged_config)\n\u001b[1;32m 185\u001b[0m registry: ModelRegistry \u001b[38;5;241m=\u001b[39m ModelRegistry(logger\u001b[38;5;241m=\u001b[39mlogger)\n\u001b[1;32m 187\u001b[0m discovered_models: Dict[\u001b[38;5;28mstr\u001b[39m, ModelInfo] \u001b[38;5;241m=\u001b[39m {}\n", - "File \u001b[0;32m~/anaconda3/lib/python3.11/site-packages/pydantic_settings/main.py:176\u001b[0m, in \u001b[0;36mBaseSettings.__init__\u001b[0;34m(__pydantic_self__, _case_sensitive, _nested_model_default_partial_update, _env_prefix, _env_file, _env_file_encoding, _env_ignore_empty, _env_nested_delimiter, _env_nested_max_split, _env_parse_none_str, _env_parse_enums, _cli_prog_name, _cli_parse_args, _cli_settings_source, _cli_parse_none_str, _cli_hide_none_type, _cli_avoid_json, _cli_enforce_required, _cli_use_class_docs_for_groups, _cli_exit_on_error, _cli_prefix, _cli_flag_prefix_char, _cli_implicit_flags, _cli_ignore_unknown_args, _cli_kebab_case, _secrets_dir, **values)\u001b[0m\n\u001b[1;32m 147\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\n\u001b[1;32m 148\u001b[0m __pydantic_self__,\n\u001b[1;32m 149\u001b[0m _case_sensitive: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m|\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 174\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mvalues: Any,\n\u001b[1;32m 175\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 176\u001b[0m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__init__\u001b[39m(\n\u001b[1;32m 177\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m__pydantic_self__\u001b[38;5;241m.\u001b[39m_settings_build_values(\n\u001b[1;32m 178\u001b[0m values,\n\u001b[1;32m 179\u001b[0m _case_sensitive\u001b[38;5;241m=\u001b[39m_case_sensitive,\n\u001b[1;32m 180\u001b[0m _nested_model_default_partial_update\u001b[38;5;241m=\u001b[39m_nested_model_default_partial_update,\n\u001b[1;32m 181\u001b[0m _env_prefix\u001b[38;5;241m=\u001b[39m_env_prefix,\n\u001b[1;32m 182\u001b[0m _env_file\u001b[38;5;241m=\u001b[39m_env_file,\n\u001b[1;32m 183\u001b[0m _env_file_encoding\u001b[38;5;241m=\u001b[39m_env_file_encoding,\n\u001b[1;32m 184\u001b[0m _env_ignore_empty\u001b[38;5;241m=\u001b[39m_env_ignore_empty,\n\u001b[1;32m 185\u001b[0m _env_nested_delimiter\u001b[38;5;241m=\u001b[39m_env_nested_delimiter,\n\u001b[1;32m 186\u001b[0m _env_nested_max_split\u001b[38;5;241m=\u001b[39m_env_nested_max_split,\n\u001b[1;32m 187\u001b[0m _env_parse_none_str\u001b[38;5;241m=\u001b[39m_env_parse_none_str,\n\u001b[1;32m 188\u001b[0m _env_parse_enums\u001b[38;5;241m=\u001b[39m_env_parse_enums,\n\u001b[1;32m 189\u001b[0m _cli_prog_name\u001b[38;5;241m=\u001b[39m_cli_prog_name,\n\u001b[1;32m 190\u001b[0m _cli_parse_args\u001b[38;5;241m=\u001b[39m_cli_parse_args,\n\u001b[1;32m 191\u001b[0m _cli_settings_source\u001b[38;5;241m=\u001b[39m_cli_settings_source,\n\u001b[1;32m 192\u001b[0m _cli_parse_none_str\u001b[38;5;241m=\u001b[39m_cli_parse_none_str,\n\u001b[1;32m 193\u001b[0m _cli_hide_none_type\u001b[38;5;241m=\u001b[39m_cli_hide_none_type,\n\u001b[1;32m 194\u001b[0m _cli_avoid_json\u001b[38;5;241m=\u001b[39m_cli_avoid_json,\n\u001b[1;32m 195\u001b[0m _cli_enforce_required\u001b[38;5;241m=\u001b[39m_cli_enforce_required,\n\u001b[1;32m 196\u001b[0m _cli_use_class_docs_for_groups\u001b[38;5;241m=\u001b[39m_cli_use_class_docs_for_groups,\n\u001b[1;32m 197\u001b[0m _cli_exit_on_error\u001b[38;5;241m=\u001b[39m_cli_exit_on_error,\n\u001b[1;32m 198\u001b[0m _cli_prefix\u001b[38;5;241m=\u001b[39m_cli_prefix,\n\u001b[1;32m 199\u001b[0m _cli_flag_prefix_char\u001b[38;5;241m=\u001b[39m_cli_flag_prefix_char,\n\u001b[1;32m 200\u001b[0m _cli_implicit_flags\u001b[38;5;241m=\u001b[39m_cli_implicit_flags,\n\u001b[1;32m 201\u001b[0m _cli_ignore_unknown_args\u001b[38;5;241m=\u001b[39m_cli_ignore_unknown_args,\n\u001b[1;32m 202\u001b[0m _cli_kebab_case\u001b[38;5;241m=\u001b[39m_cli_kebab_case,\n\u001b[1;32m 203\u001b[0m _secrets_dir\u001b[38;5;241m=\u001b[39m_secrets_dir,\n\u001b[1;32m 204\u001b[0m )\n\u001b[1;32m 205\u001b[0m )\n", - "File \u001b[0;32m~/anaconda3/lib/python3.11/site-packages/pydantic/main.py:214\u001b[0m, in \u001b[0;36mBaseModel.__init__\u001b[0;34m(self, **data)\u001b[0m\n\u001b[1;32m 212\u001b[0m \u001b[38;5;66;03m# `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks\u001b[39;00m\n\u001b[1;32m 213\u001b[0m __tracebackhide__ \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[0;32m--> 214\u001b[0m validated_self \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m__pydantic_validator__\u001b[38;5;241m.\u001b[39mvalidate_python(data, self_instance\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n\u001b[1;32m 215\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m validated_self:\n\u001b[1;32m 216\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[1;32m 217\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mA custom validator is returning a value other than `self`.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 218\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mReturning anything other than `self` from a top level model validator isn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt supported when validating via `__init__`.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 219\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mSee the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 220\u001b[0m stacklevel\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m,\n\u001b[1;32m 221\u001b[0m )\n", - "\u001b[0;31mValidationError\u001b[0m: 4 validation errors for EmberSettings\nregistry.models.3.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.4.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.5.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error\nregistry.models.6.api_key\n Value error, No API key provided or defaulted. [type=value_error, input_value=None, input_type=NoneType]\n For further information visit https://errors.pydantic.dev/2.10/v/value_error" -======= - "execution_count": 39, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/tmp/ipykernel_374013/1570386974.py:1: DeprecationWarning: initialize_ember() is deprecated. Use initialize_registry() from ember.core.registry.model.initialization instead.\n", - " model_registry = initialize_ember()\n", - "2025-03-21 00:23:33,902 [DEBUG] ConfigManager: Loading configuration...\n", - "2025-03-21 00:23:33,915 [DEBUG] ConfigManager: Configuration loaded successfully\n", - "2025-03-21 00:23:33,923 [INFO] ember.core.registry.model.initialization: Execute model discovery (timeout: 30 seconds per provider, running in parallel)\n", - "2025-03-21 00:23:33,939 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", - "2025-03-21 00:23:33,964 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", - "2025-03-21 00:23:34,021 [DEBUG] ember.core.registry.model.base.registry.discovery: OPENAI_API_KEY found, initialized OpenAIDiscovery successfully\n", - "2025-03-21 00:23:34,026 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", - "2025-03-21 00:23:34,031 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", - "2025-03-21 00:23:34,073 [DEBUG] ember.core.registry.model.base.registry.discovery: ANTHROPIC_API_KEY found, initialized AnthropicDiscovery successfully\n", - "2025-03-21 00:23:34,076 [DEBUG] ember.core.registry.model.base.registry.discovery: GOOGLE_API_KEY found, initialized DeepmindDiscovery successfully\n", - "2025-03-21 00:23:34,077 [INFO] ember.core.registry.model.initialization: Initiating model discovery via ModelDiscoveryService\n", - "2025-03-21 00:23:34,094 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Starting Anthropic model fetch via REST API...\n", - "2025-03-21 00:23:34,107 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Calling Anthropic REST API: https://api.anthropic.com/v1/models with timeout=(2,5)\n", - "2025-03-21 00:23:34,218 [DEBUG] openai._base_client: Request options: {'method': 'get', 'url': '/models', 'post_parser': ._parser at 0x7f16ab00f4c0>, 'json_data': None}\n", - "2025-03-21 00:23:34,253 [DEBUG] urllib3.connectionpool: Starting new HTTPS connection (1): api.anthropic.com:443\n", - "2025-03-21 00:23:34,256 [DEBUG] openai._base_client: Sending HTTP Request: GET https://api.openai.com/v1/models\n", - "2025-03-21 00:23:34,272 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=5.0 socket_options=None\n", - "2025-03-21 00:23:34,333 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 00:23:34,335 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=5.0\n", - "2025-03-21 00:23:34,359 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 00:23:34,362 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:23:34,374 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:23:34,377 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:23:34,380 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:23:34,383 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:23:34,499 [DEBUG] urllib3.connectionpool: https://api.anthropic.com:443 \"GET /v1/models HTTP/1.1\" 401 86\n", - "2025-03-21 00:23:34,506 [ERROR] ember.core.registry.model.providers.anthropic.anthropic_discovery: Error fetching Anthropic models via REST API: 401 Client Error: Unauthorized for url: https://api.anthropic.com/v1/models\n", - "2025-03-21 00:23:34,512 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Using fallback models due to API request error\n", - "2025-03-21 00:23:34,544 [INFO] ember.core.registry.model.base.registry.discovery: Provider DeepmindDiscovery completed in 0.43s\n", - "2025-03-21 00:23:34,546 [INFO] ember.core.registry.model.base.registry.discovery: Provider AnthropicDiscovery completed in 0.45s\n", - "2025-03-21 00:23:34,664 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:23:35 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'openai-version', b'2020-10-01'), (b'x-request-id', b'047be343f630a078753ab850368c8104'), (b'openai-processing-ms', b'213'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=lQoQY2KyrWGyGIl5tZm.yBfn5JGOSCA.AqZNb5sDQZ4-1742541815-1.0.1.1-jkeEwlGMhqCzRiPce_S94AqxyEmbQh2B4RQosPoE7.eFMwL5UwmspCv.OEN88cyk98iKiq0wLvcEGKQdTKIjJrKLMq4kGA32abjIo.do_WM; path=/; expires=Fri, 21-Mar-25 07:53:35 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=E2ZuBrVws6lg65OWU8SjS7lE_GkfGfdTWJ17B8epbyU-1742541815014-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb4661860d03d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:23:34,674 [INFO] httpx: HTTP Request: GET https://api.openai.com/v1/models \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:23:34,677 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:23:34,684 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:23:34,689 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:23:34,694 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:23:34,698 [DEBUG] openai._base_client: HTTP Response: GET https://api.openai.com/v1/models \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 07:23:35 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('openai-version', '2020-10-01'), ('x-request-id', '047be343f630a078753ab850368c8104'), ('openai-processing-ms', '213'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=lQoQY2KyrWGyGIl5tZm.yBfn5JGOSCA.AqZNb5sDQZ4-1742541815-1.0.1.1-jkeEwlGMhqCzRiPce_S94AqxyEmbQh2B4RQosPoE7.eFMwL5UwmspCv.OEN88cyk98iKiq0wLvcEGKQdTKIjJrKLMq4kGA32abjIo.do_WM; path=/; expires=Fri, 21-Mar-25 07:53:35 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=E2ZuBrVws6lg65OWU8SjS7lE_GkfGfdTWJ17B8epbyU-1742541815014-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923bb4661860d03d-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:23:34,702 [DEBUG] openai._base_client: request_id: 047be343f630a078753ab850368c8104\n", - "2025-03-21 00:23:34,722 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Fetched 65 models from OpenAI API\n", - "2025-03-21 00:23:34,724 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Filtered to 43 relevant models\n", - "2025-03-21 00:23:34,729 [INFO] ember.core.registry.model.base.registry.discovery: Provider OpenAIDiscovery completed in 0.65s\n", - "2025-03-21 00:23:34,732 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 43 models from OpenAIDiscovery\n", - "2025-03-21 00:23:34,735 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 5 models from AnthropicDiscovery\n", - "2025-03-21 00:23:34,737 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 32 models from DeepmindDiscovery\n", - "2025-03-21 00:23:34,741 [INFO] ember.core.registry.model.base.registry.discovery: Discovered 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 00:23:34,745 [DEBUG] ember.core.registry.model.initialization: Raw discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 00:23:34,752 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-transcribe discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,757 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-transcribe\n", - "2025-03-21 00:23:34,760 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,763 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-12-17\n", - "2025-03-21 00:23:34,764 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-3 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,765 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-3\n", - "2025-03-21 00:23:34,767 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-2 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,770 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-2\n", - "2025-03-21 00:23:34,772 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,775 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-10-01\n", - "2025-03-21 00:23:34,776 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,778 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-10-01\n", - "2025-03-21 00:23:34,780 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,782 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview\n", - "2025-03-21 00:23:34,783 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-large discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,784 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-large\n", - "2025-03-21 00:23:34,785 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,786 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4\n", - "2025-03-21 00:23:34,789 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-05-13 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,791 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-05-13\n", - "2025-03-21 00:23:34,792 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,794 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview\n", - "2025-03-21 00:23:34,797 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,800 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview\n", - "2025-03-21 00:23:34,803 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct-0914 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,807 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct-0914\n", - "2025-03-21 00:23:34,809 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,811 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview\n", - "2025-03-21 00:23:34,812 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-1106 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,814 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-1106\n", - "2025-03-21 00:23:34,815 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,815 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview\n", - "2025-03-21 00:23:34,817 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,818 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo\n", - "2025-03-21 00:23:34,820 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,823 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-12-17\n", - "2025-03-21 00:23:34,825 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,828 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct\n", - "2025-03-21 00:23:34,832 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,836 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo\n", - "2025-03-21 00:23:34,842 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,846 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-preview\n", - "2025-03-21 00:23:34,848 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,852 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview-2025-03-11\n", - "2025-03-21 00:23:34,856 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,859 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview\n", - "2025-03-21 00:23:34,862 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-0125 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,863 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-0125\n", - "2025-03-21 00:23:34,867 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-08-06 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,868 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-08-06\n", - "2025-03-21 00:23:34,870 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-2024-04-09 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,872 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-2024-04-09\n", - "2025-03-21 00:23:34,873 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-16k discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,874 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-16k\n", - "2025-03-21 00:23:34,875 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,876 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o\n", - "2025-03-21 00:23:34,877 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,878 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview-2024-12-17\n", - "2025-03-21 00:23:34,881 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-1106-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,883 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-1106-preview\n", - "2025-03-21 00:23:34,884 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-ada-002 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,885 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-ada-002\n", - "2025-03-21 00:23:34,889 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0613 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,891 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0613\n", - "2025-03-21 00:23:34,894 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,897 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview\n", - "2025-03-21 00:23:34,899 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview-2025-02-27 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,900 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview-2025-02-27\n", - "2025-03-21 00:23:34,901 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,902 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview-2025-03-11\n", - "2025-03-21 00:23:34,903 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-11-20 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,904 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-11-20\n", - "2025-03-21 00:23:34,905 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-2024-07-18 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,905 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-2024-07-18\n", - "2025-03-21 00:23:34,906 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-tts discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,907 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-tts\n", - "2025-03-21 00:23:34,907 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,908 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini\n", - "2025-03-21 00:23:34,909 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0125-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,912 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0125-preview\n", - "2025-03-21 00:23:34,913 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-transcribe discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,915 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-transcribe\n", - "2025-03-21 00:23:34,916 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-small discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,917 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-small\n", - "2025-03-21 00:23:34,917 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,918 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview-2024-12-17\n", - "2025-03-21 00:23:34,919 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-sonnet discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,920 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-sonnet\n", - "2025-03-21 00:23:34,921 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-opus discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,922 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-opus\n", - "2025-03-21 00:23:34,925 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-haiku discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,927 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-haiku\n", - "2025-03-21 00:23:34,928 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.5-sonnet discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,929 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.5-sonnet\n", - "2025-03-21 00:23:34,931 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.7-sonnet discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,932 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.7-sonnet\n", - "2025-03-21 00:23:34,934 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.0-pro-vision-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,935 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.0-pro-vision-latest\n", - "2025-03-21 00:23:34,936 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-pro-vision discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,937 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-pro-vision\n", - "2025-03-21 00:23:34,938 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,940 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-latest\n", - "2025-03-21 00:23:34,940 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,941 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-001\n", - "2025-03-21 00:23:34,942 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-002 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,943 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-002\n", - "2025-03-21 00:23:34,944 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,946 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro\n", - "2025-03-21 00:23:34,947 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,947 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-latest\n", - "2025-03-21 00:23:34,949 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,951 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001\n", - "2025-03-21 00:23:34,952 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001-tuning discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,953 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001-tuning\n", - "2025-03-21 00:23:34,955 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,956 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash\n", - "2025-03-21 00:23:34,957 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-002 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,958 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-002\n", - "2025-03-21 00:23:34,959 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,960 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b\n", - "2025-03-21 00:23:34,961 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,965 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-001\n", - "2025-03-21 00:23:34,966 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,967 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-latest\n", - "2025-03-21 00:23:34,969 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0827 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,970 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0827\n", - "2025-03-21 00:23:34,970 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0924 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,971 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0924\n", - "2025-03-21 00:23:34,973 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,975 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp\n", - "2025-03-21 00:23:34,976 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,977 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash\n", - "2025-03-21 00:23:34,978 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,980 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-001\n", - "2025-03-21 00:23:34,982 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp-image-generation discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,983 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp-image-generation\n", - "2025-03-21 00:23:34,985 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,986 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-001\n", - "2025-03-21 00:23:34,987 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,987 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite\n", - "2025-03-21 00:23:34,989 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview-02-05 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,989 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview-02-05\n", - "2025-03-21 00:23:34,990 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,991 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview\n", - "2025-03-21 00:23:34,993 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,994 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp\n", - "2025-03-21 00:23:34,997 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp-02-05 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:34,998 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp-02-05\n", - "2025-03-21 00:23:35,000 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-exp-1206 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:35,001 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-exp-1206\n", - "2025-03-21 00:23:35,003 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-01-21 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:35,004 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-01-21\n", - "2025-03-21 00:23:35,004 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:35,005 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp\n", - "2025-03-21 00:23:35,006 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-1219 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:35,008 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-1219\n", - "2025-03-21 00:23:35,011 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/learnlm-1.5-pro-experimental discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:35,013 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/learnlm-1.5-pro-experimental\n", - "2025-03-21 00:23:35,014 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemma-3-27b-it discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:23:35,015 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemma-3-27b-it\n", - "2025-03-21 00:23:35,017 [DEBUG] ember.core.registry.model.initialization: Merged discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 00:23:35,018 [INFO] ember.core.registry.model.initialization: Registering 80 models from discovery\n", - "2025-03-21 00:23:35,022 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-transcribe (provider: Openai)\n", - "2025-03-21 00:23:35,025 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", - "2025-03-21 00:23:35,027 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", - "2025-03-21 00:23:35,031 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 00:23:35,032 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:23:35,035 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:23:35,037 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-3 (provider: Openai)\n", - "2025-03-21 00:23:35,038 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", - "2025-03-21 00:23:35,039 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", - "2025-03-21 00:23:35,040 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-2 (provider: Openai)\n", - "2025-03-21 00:23:35,041 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", - "2025-03-21 00:23:35,042 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", - "2025-03-21 00:23:35,043 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-10-01 (provider: Openai)\n", - "2025-03-21 00:23:35,044 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", - "2025-03-21 00:23:35,045 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", - "2025-03-21 00:23:35,047 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-10-01 (provider: Openai)\n", - "2025-03-21 00:23:35,048 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", - "2025-03-21 00:23:35,050 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", - "2025-03-21 00:23:35,051 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview (provider: Openai)\n", - "2025-03-21 00:23:35,056 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", - "2025-03-21 00:23:35,057 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", - "2025-03-21 00:23:35,059 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-large (provider: Openai)\n", - "2025-03-21 00:23:35,063 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", - "2025-03-21 00:23:35,064 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", - "2025-03-21 00:23:35,066 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4 (provider: Openai)\n", - "2025-03-21 00:23:35,067 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", - "2025-03-21 00:23:35,068 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", - "2025-03-21 00:23:35,069 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-05-13 (provider: Openai)\n", - "2025-03-21 00:23:35,073 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", - "2025-03-21 00:23:35,075 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", - "2025-03-21 00:23:35,076 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview (provider: Openai)\n", - "2025-03-21 00:23:35,077 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", - "2025-03-21 00:23:35,078 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", - "2025-03-21 00:23:35,079 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview (provider: Openai)\n", - "2025-03-21 00:23:35,080 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", - "2025-03-21 00:23:35,081 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", - "2025-03-21 00:23:35,083 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct-0914 (provider: Openai)\n", - "2025-03-21 00:23:35,083 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", - "2025-03-21 00:23:35,088 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", - "2025-03-21 00:23:35,090 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview (provider: Openai)\n", - "2025-03-21 00:23:35,091 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", - "2025-03-21 00:23:35,092 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", - "2025-03-21 00:23:35,093 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-1106 (provider: Openai)\n", - "2025-03-21 00:23:35,096 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", - "2025-03-21 00:23:35,097 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", - "2025-03-21 00:23:35,098 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview (provider: Openai)\n", - "2025-03-21 00:23:35,098 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", - "2025-03-21 00:23:35,099 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", - "2025-03-21 00:23:35,101 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo (provider: Openai)\n", - "2025-03-21 00:23:35,102 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", - "2025-03-21 00:23:35,104 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", - "2025-03-21 00:23:35,108 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 00:23:35,109 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:23:35,111 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:23:35,114 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct (provider: Openai)\n", - "2025-03-21 00:23:35,115 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", - "2025-03-21 00:23:35,116 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", - "2025-03-21 00:23:35,118 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo (provider: Openai)\n", - "2025-03-21 00:23:35,119 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", - "2025-03-21 00:23:35,120 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", - "2025-03-21 00:23:35,121 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-preview (provider: Openai)\n", - "2025-03-21 00:23:35,122 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", - "2025-03-21 00:23:35,123 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", - "2025-03-21 00:23:35,124 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview-2025-03-11 (provider: Openai)\n", - "2025-03-21 00:23:35,126 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 00:23:35,127 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 00:23:35,128 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview (provider: Openai)\n", - "2025-03-21 00:23:35,130 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", - "2025-03-21 00:23:35,131 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", - "2025-03-21 00:23:35,132 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-0125 (provider: Openai)\n", - "2025-03-21 00:23:35,132 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", - "2025-03-21 00:23:35,133 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", - "2025-03-21 00:23:35,135 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-08-06 (provider: Openai)\n", - "2025-03-21 00:23:35,136 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", - "2025-03-21 00:23:35,136 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", - "2025-03-21 00:23:35,137 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-2024-04-09 (provider: Openai)\n", - "2025-03-21 00:23:35,143 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", - "2025-03-21 00:23:35,144 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", - "2025-03-21 00:23:35,145 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-16k (provider: Openai)\n", - "2025-03-21 00:23:35,147 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", - "2025-03-21 00:23:35,148 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", - "2025-03-21 00:23:35,148 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o (provider: Openai)\n", - "2025-03-21 00:23:35,149 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", - "2025-03-21 00:23:35,150 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", - "2025-03-21 00:23:35,151 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 00:23:35,152 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:23:35,155 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:23:35,157 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-1106-preview (provider: Openai)\n", - "2025-03-21 00:23:35,159 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", - "2025-03-21 00:23:35,160 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", - "2025-03-21 00:23:35,163 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-ada-002 (provider: Openai)\n", - "2025-03-21 00:23:35,168 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", - "2025-03-21 00:23:35,170 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", - "2025-03-21 00:23:35,172 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0613 (provider: Openai)\n", - "2025-03-21 00:23:35,173 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", - "2025-03-21 00:23:35,174 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", - "2025-03-21 00:23:35,174 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview (provider: Openai)\n", - "2025-03-21 00:23:35,176 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", - "2025-03-21 00:23:35,177 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", - "2025-03-21 00:23:35,178 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview-2025-02-27 (provider: Openai)\n", - "2025-03-21 00:23:35,179 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", - "2025-03-21 00:23:35,183 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", - "2025-03-21 00:23:35,184 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview-2025-03-11 (provider: Openai)\n", - "2025-03-21 00:23:35,185 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 00:23:35,187 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 00:23:35,189 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-11-20 (provider: Openai)\n", - "2025-03-21 00:23:35,190 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", - "2025-03-21 00:23:35,192 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", - "2025-03-21 00:23:35,193 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-2024-07-18 (provider: Openai)\n", - "2025-03-21 00:23:35,194 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", - "2025-03-21 00:23:35,196 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", - "2025-03-21 00:23:35,197 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-tts (provider: Openai)\n", - "2025-03-21 00:23:35,200 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", - "2025-03-21 00:23:35,202 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", - "2025-03-21 00:23:35,203 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini (provider: Openai)\n", - "2025-03-21 00:23:35,204 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", - "2025-03-21 00:23:35,205 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", - "2025-03-21 00:23:35,208 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0125-preview (provider: Openai)\n", - "2025-03-21 00:23:35,209 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", - "2025-03-21 00:23:35,210 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", - "2025-03-21 00:23:35,212 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-transcribe (provider: Openai)\n", - "2025-03-21 00:23:35,213 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", - "2025-03-21 00:23:35,215 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", - "2025-03-21 00:23:35,217 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-small (provider: Openai)\n", - "2025-03-21 00:23:35,220 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", - "2025-03-21 00:23:35,222 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", - "2025-03-21 00:23:35,224 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 00:23:35,226 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:23:35,228 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:23:35,230 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-sonnet (provider: Anthropic)\n", - "2025-03-21 00:23:35,232 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", - "2025-03-21 00:23:35,233 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", - "2025-03-21 00:23:35,235 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-opus (provider: Anthropic)\n", - "2025-03-21 00:23:35,237 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", - "2025-03-21 00:23:35,239 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", - "2025-03-21 00:23:35,241 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-haiku (provider: Anthropic)\n", - "2025-03-21 00:23:35,242 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", - "2025-03-21 00:23:35,252 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", - "2025-03-21 00:23:35,260 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.5-sonnet (provider: Anthropic)\n", - "2025-03-21 00:23:35,261 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", - "2025-03-21 00:23:35,262 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", - "2025-03-21 00:23:35,263 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.7-sonnet (provider: Anthropic)\n", - "2025-03-21 00:23:35,265 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", - "2025-03-21 00:23:35,266 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", - "2025-03-21 00:23:35,266 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.0-pro-vision-latest (provider: Google)\n", - "2025-03-21 00:23:35,267 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", - "2025-03-21 00:23:35,268 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", - "2025-03-21 00:23:35,269 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-pro-vision (provider: Google)\n", - "2025-03-21 00:23:35,270 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", - "2025-03-21 00:23:35,270 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", - "2025-03-21 00:23:35,271 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-latest (provider: Google)\n", - "2025-03-21 00:23:35,271 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", - "2025-03-21 00:23:35,273 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", - "2025-03-21 00:23:35,275 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-001 (provider: Google)\n", - "2025-03-21 00:23:35,276 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", - "2025-03-21 00:23:35,277 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", - "2025-03-21 00:23:35,278 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-002 (provider: Google)\n", - "2025-03-21 00:23:35,280 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", - "2025-03-21 00:23:35,281 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", - "2025-03-21 00:23:35,283 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro (provider: Google)\n", - "2025-03-21 00:23:35,285 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", - "2025-03-21 00:23:35,285 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", - "2025-03-21 00:23:35,286 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-latest (provider: Google)\n", - "2025-03-21 00:23:35,287 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", - "2025-03-21 00:23:35,288 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", - "2025-03-21 00:23:35,290 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001 (provider: Google)\n", - "2025-03-21 00:23:35,290 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", - "2025-03-21 00:23:35,291 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", - "2025-03-21 00:23:35,292 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001-tuning (provider: Google)\n", - "2025-03-21 00:23:35,292 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", - "2025-03-21 00:23:35,293 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", - "2025-03-21 00:23:35,296 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash (provider: Google)\n", - "2025-03-21 00:23:35,305 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", - "2025-03-21 00:23:35,307 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", - "2025-03-21 00:23:35,308 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-002 (provider: Google)\n", - "2025-03-21 00:23:35,309 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", - "2025-03-21 00:23:35,310 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", - "2025-03-21 00:23:35,312 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b (provider: Google)\n", - "2025-03-21 00:23:35,314 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", - "2025-03-21 00:23:35,315 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", - "2025-03-21 00:23:35,318 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-001 (provider: Google)\n", - "2025-03-21 00:23:35,319 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", - "2025-03-21 00:23:35,321 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", - "2025-03-21 00:23:35,325 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-latest (provider: Google)\n", - "2025-03-21 00:23:35,326 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", - "2025-03-21 00:23:35,328 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", - "2025-03-21 00:23:35,330 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0827 (provider: Google)\n", - "2025-03-21 00:23:35,332 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", - "2025-03-21 00:23:35,335 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", - "2025-03-21 00:23:35,338 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0924 (provider: Google)\n", - "2025-03-21 00:23:35,340 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", - "2025-03-21 00:23:35,341 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", - "2025-03-21 00:23:35,344 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp (provider: Google)\n", - "2025-03-21 00:23:35,352 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", - "2025-03-21 00:23:35,354 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", - "2025-03-21 00:23:35,357 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash (provider: Google)\n", - "2025-03-21 00:23:35,359 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", - "2025-03-21 00:23:35,362 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", - "2025-03-21 00:23:35,365 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-001 (provider: Google)\n", - "2025-03-21 00:23:35,369 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", - "2025-03-21 00:23:35,372 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", - "2025-03-21 00:23:35,374 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp-image-generation (provider: Google)\n", - "2025-03-21 00:23:35,376 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", - "2025-03-21 00:23:35,378 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", - "2025-03-21 00:23:35,380 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-001 (provider: Google)\n", - "2025-03-21 00:23:35,382 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", - "2025-03-21 00:23:35,384 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", - "2025-03-21 00:23:35,386 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite (provider: Google)\n", - "2025-03-21 00:23:35,388 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", - "2025-03-21 00:23:35,390 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", - "2025-03-21 00:23:35,391 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview-02-05 (provider: Google)\n", - "2025-03-21 00:23:35,393 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", - "2025-03-21 00:23:35,399 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", - "2025-03-21 00:23:35,405 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview (provider: Google)\n", - "2025-03-21 00:23:35,409 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", - "2025-03-21 00:23:35,411 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", - "2025-03-21 00:23:35,413 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp (provider: Google)\n", - "2025-03-21 00:23:35,418 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", - "2025-03-21 00:23:35,422 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", - "2025-03-21 00:23:35,424 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp-02-05 (provider: Google)\n", - "2025-03-21 00:23:35,427 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", - "2025-03-21 00:23:35,431 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", - "2025-03-21 00:23:35,434 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-exp-1206 (provider: Google)\n", - "2025-03-21 00:23:35,435 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", - "2025-03-21 00:23:35,437 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", - "2025-03-21 00:23:35,439 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-01-21 (provider: Google)\n", - "2025-03-21 00:23:35,440 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", - "2025-03-21 00:23:35,440 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", - "2025-03-21 00:23:35,442 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp (provider: Google)\n", - "2025-03-21 00:23:35,443 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", - "2025-03-21 00:23:35,444 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", - "2025-03-21 00:23:35,445 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-1219 (provider: Google)\n", - "2025-03-21 00:23:35,446 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", - "2025-03-21 00:23:35,447 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", - "2025-03-21 00:23:35,448 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/learnlm-1.5-pro-experimental (provider: Google)\n", - "2025-03-21 00:23:35,449 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", - "2025-03-21 00:23:35,450 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", - "2025-03-21 00:23:35,451 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemma-3-27b-it (provider: Google)\n", - "2025-03-21 00:23:35,454 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", - "2025-03-21 00:23:35,455 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", - "2025-03-21 00:23:35,456 [INFO] ember.core.registry.model.initialization: Registration summary: 80 new, 0 skipped, 0 failed\n", - "2025-03-21 00:23:35,457 [INFO] ember.core.registry.model.initialization: Successfully discovered and registered 80 new models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 00:23:35,458 [INFO] ember.core.registry.model.initialization: Discovered 80 new models in 1.53s: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13'] and 70 more\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-4o-mini-tts', 'openai:gpt-4o-mini', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n" ->>>>>>> feb7b31 (added embedding model) + " \u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[7]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[31m \u001b[39m\u001b[31mmodel_registry = initialize_ember(config_path=)\u001b[39m\n ^\n\u001b[31mSyntaxError\u001b[39m\u001b[31m:\u001b[39m invalid syntax\n" ] } ], "source": [ - "model_registry = initialize_ember()\n", + "model_registry = initialize_ember(config_path=)\n", "print(model_registry.list_models())\n", "llm = ModelService(registry=model_registry)" ] }, { "cell_type": "code", -<<<<<<< HEAD - "execution_count": 63, + "execution_count": 8, "metadata": { "scrolled": true }, -======= - "execution_count": 8, - "metadata": {}, ->>>>>>> feb7b31 (added embedding model) "outputs": [ { "data": { @@ -702,7 +627,9 @@ " 'openai:dall-e-3',\n", " 'openai:dall-e-2',\n", " 'openai:gpt-4o-audio-preview-2024-10-01',\n", + " 'openai:gpt-4o-mini-2024-07-18',\n", " 'openai:gpt-4o-realtime-preview-2024-10-01',\n", + " 'openai:gpt-4o-mini',\n", " 'openai:gpt-4o-audio-preview',\n", " 'openai:text-embedding-3-large',\n", " 'openai:gpt-4',\n", @@ -733,9 +660,7 @@ " 'openai:gpt-4.5-preview-2025-02-27',\n", " 'openai:gpt-4o-search-preview-2025-03-11',\n", " 'openai:gpt-4o-2024-11-20',\n", - " 'openai:gpt-4o-mini-2024-07-18',\n", " 'openai:gpt-4o-mini-tts',\n", - " 'openai:gpt-4o-mini',\n", " 'openai:gpt-4-0125-preview',\n", " 'openai:gpt-4o-transcribe',\n", " 'openai:text-embedding-3-small',\n", @@ -795,7 +720,6 @@ "outputs": [], "source": [ "model_ids: List[str] = [\n", - " \"openai:o1\",\n", " \"openai:gpt-4o\",\n", " \"openai:gpt-4o-mini\",\n", " # \"anthropic:claude-3.5-sonnet\", # API key not working\n", @@ -813,109 +737,35 @@ "name": "stderr", "output_type": "stream", "text": [ - "2025-03-21 00:18:22,993 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", - "2025-03-21 00:18:22,994 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o' using provider class 'OpenAIModel'.\n", - "2025-03-21 00:18:22,995 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o\n", - "2025-03-21 00:18:22,997 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:18:23,001 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", - "2025-03-21 00:18:23,005 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n" + "2025-03-21 01:47:31,411 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", + "2025-03-21 01:47:31,411 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o' using provider class 'OpenAIModel'.\n", + "2025-03-21 01:47:31,412 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o\n", + "2025-03-21 01:47:31,413 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 01:47:31,414 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", + "2025-03-21 01:47:31,415 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "2025-03-21 00:18:23,032 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:18:23,033 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:18:23,035 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n" + "2025-03-21 01:47:31,427 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 01:47:31,428 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 01:47:31,429 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", + "2025-03-21 01:47:31,451 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 01:47:31,452 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", + "2025-03-21 01:47:31,461 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 01:47:31,462 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 01:47:31,463 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 01:47:31,464 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 01:47:31,465 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 01:47:31,465 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "➡️ Testing model: openai:o1\n", - "❌ Error with model openai:o1: Model 'openai:o1' not found. Available models:\n", - "- openai:gpt-4o-mini-transcribe\n", - "- openai:gpt-4o-audio-preview-2024-12-17\n", - "- openai:dall-e-3\n", - "- openai:dall-e-2\n", - "- openai:gpt-4o-audio-preview-2024-10-01\n", - "- openai:gpt-4o-realtime-preview-2024-10-01\n", - "- openai:gpt-4o-audio-preview\n", - "- openai:text-embedding-3-large\n", - "- openai:gpt-4\n", - "- openai:gpt-4o-2024-05-13\n", - "- openai:gpt-4o-realtime-preview\n", - "- openai:gpt-4o-mini-audio-preview\n", - "- openai:gpt-3.5-turbo-instruct-0914\n", - "- openai:gpt-4o-mini-search-preview\n", - "- openai:gpt-3.5-turbo-1106\n", - "- openai:gpt-4o-search-preview\n", - "- openai:gpt-4-turbo\n", - "- openai:gpt-4o-realtime-preview-2024-12-17\n", - "- openai:gpt-3.5-turbo-instruct\n", - "- openai:gpt-3.5-turbo\n", - "- openai:gpt-4-turbo-preview\n", - "- openai:gpt-4o-mini-search-preview-2025-03-11\n", - "- openai:gpt-4o-mini-realtime-preview\n", - "- openai:gpt-3.5-turbo-0125\n", - "- openai:gpt-4o-2024-08-06\n", - "- openai:gpt-4-turbo-2024-04-09\n", - "- openai:gpt-3.5-turbo-16k\n", - "- openai:gpt-4o\n", - "- openai:gpt-4o-mini-realtime-preview-2024-12-17\n", - "- openai:gpt-4-1106-preview\n", - "- openai:text-embedding-ada-002\n", - "- openai:gpt-4-0613\n", - "- openai:gpt-4.5-preview\n", - "- openai:gpt-4.5-preview-2025-02-27\n", - "- openai:gpt-4o-search-preview-2025-03-11\n", - "- openai:gpt-4o-2024-11-20\n", - "- openai:gpt-4o-mini-2024-07-18\n", - "- openai:gpt-4o-mini-tts\n", - "- openai:gpt-4o-mini\n", - "- openai:gpt-4-0125-preview\n", - "- openai:gpt-4o-transcribe\n", - "- openai:text-embedding-3-small\n", - "- openai:gpt-4o-mini-audio-preview-2024-12-17\n", - "- anthropic:claude-3-sonnet\n", - "- anthropic:claude-3-opus\n", - "- anthropic:claude-3-haiku\n", - "- anthropic:claude-3.5-sonnet\n", - "- anthropic:claude-3.7-sonnet\n", - "- google:models/gemini-1.0-pro-vision-latest\n", - "- google:models/gemini-pro-vision\n", - "- google:models/gemini-1.5-pro-latest\n", - "- google:models/gemini-1.5-pro-001\n", - "- google:models/gemini-1.5-pro-002\n", - "- google:models/gemini-1.5-pro\n", - "- google:models/gemini-1.5-flash-latest\n", - "- google:models/gemini-1.5-flash-001\n", - "- google:models/gemini-1.5-flash-001-tuning\n", - "- google:models/gemini-1.5-flash\n", - "- google:models/gemini-1.5-flash-002\n", - "- google:models/gemini-1.5-flash-8b\n", - "- google:models/gemini-1.5-flash-8b-001\n", - "- google:models/gemini-1.5-flash-8b-latest\n", - "- google:models/gemini-1.5-flash-8b-exp-0827\n", - "- google:models/gemini-1.5-flash-8b-exp-0924\n", - "- google:models/gemini-2.0-flash-exp\n", - "- google:models/gemini-2.0-flash\n", - "- google:models/gemini-2.0-flash-001\n", - "- google:models/gemini-2.0-flash-exp-image-generation\n", - "- google:models/gemini-2.0-flash-lite-001\n", - "- google:models/gemini-2.0-flash-lite\n", - "- google:models/gemini-2.0-flash-lite-preview-02-05\n", - "- google:models/gemini-2.0-flash-lite-preview\n", - "- google:models/gemini-2.0-pro-exp\n", - "- google:models/gemini-2.0-pro-exp-02-05\n", - "- google:models/gemini-exp-1206\n", - "- google:models/gemini-2.0-flash-thinking-exp-01-21\n", - "- google:models/gemini-2.0-flash-thinking-exp\n", - "- google:models/gemini-2.0-flash-thinking-exp-1219\n", - "- google:models/learnlm-1.5-pro-experimental\n", - "- google:models/gemma-3-27b-it\n", "➡️ Testing model: openai:gpt-4o\n" ] }, @@ -923,30 +773,22 @@ "name": "stderr", "output_type": "stream", "text": [ - "2025-03-21 00:18:23,052 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 00:18:23,053 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", - "2025-03-21 00:18:23,068 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 00:18:23,069 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:18:23,071 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:18:23,072 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:18:23,075 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:18:23,078 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:18:24,532 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:18:24 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1350'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_4484c1b7b2d43adb83ccc149b107da95'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=6lh2Yp5h0FaYvXxpQQAxO9Jt6HDCdmUeuY4.kM2i6ps-1742541504-1.0.1.1-_IHIr..1OAWdoybv_Qs3tz4oLMWDWudghLRy7.RfguO5RHXiKnjZ_j3p3t6MOuUfyuRPgEE7hksYaVr_aZjLjYeXWFZh8PG6vgZ3yCPlnOk; path=/; expires=Fri, 21-Mar-25 07:48:24 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=DanRUEufXKZ7K8DxP_7kPrhHNdJOhS0UH.jyo2WiVto-1742541504892-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923baccc7a771566-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:18:24,535 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:18:24,536 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:18:24,594 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:18:24,596 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:18:24,598 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:18:24,601 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 07:18:24 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('access-control-expose-headers', 'X-Request-ID'), ('openai-organization', 'user-iqhmndueuqg2ljzblqkr2tgh'), ('openai-processing-ms', '1350'), ('openai-version', '2020-10-01'), ('x-ratelimit-limit-requests', '50000'), ('x-ratelimit-limit-tokens', '150000000'), ('x-ratelimit-remaining-requests', '49999'), ('x-ratelimit-remaining-tokens', '149999987'), ('x-ratelimit-reset-requests', '1ms'), ('x-ratelimit-reset-tokens', '0s'), ('x-request-id', 'req_4484c1b7b2d43adb83ccc149b107da95'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=6lh2Yp5h0FaYvXxpQQAxO9Jt6HDCdmUeuY4.kM2i6ps-1742541504-1.0.1.1-_IHIr..1OAWdoybv_Qs3tz4oLMWDWudghLRy7.RfguO5RHXiKnjZ_j3p3t6MOuUfyuRPgEE7hksYaVr_aZjLjYeXWFZh8PG6vgZ3yCPlnOk; path=/; expires=Fri, 21-Mar-25 07:48:24 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=DanRUEufXKZ7K8DxP_7kPrhHNdJOhS0UH.jyo2WiVto-1742541504892-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923baccc7a771566-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:18:24,604 [DEBUG] openai._base_client: request_id: req_4484c1b7b2d43adb83ccc149b107da95\n", - "2025-03-21 00:18:24,615 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:18:24,624 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:18:24,631 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:18:24,637 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:18:24,641 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:18:24,646 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:18:24,654 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:18:24,660 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + "2025-03-21 01:47:32,975 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:47:33 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1377'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_5f9891031dd6ee412bfe5be821dc436d'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=SofhfG4xIJOYKjL3oEQAMSMqtlMVqPmaNBBHje9FrXs-1742546853-1.0.1.1-CxDgL8jHzq.vx_fL0XJTsFydnKINH8wgc_lUngYj7SOLZAUJEeQr8hac4cNva8aHIH_qU8y85200abtGfzFpehSQ_FneYO5O976vWZQXK_8; path=/; expires=Fri, 21-Mar-25 09:17:33 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=BVSC_XlemJmZHqusJyDYGgHj6.ak4SFKLmWjPK6lGwg-1742546853265-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c2f5f896df96b-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 01:47:32,978 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 01:47:32,980 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 01:47:32,988 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 01:47:32,989 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 01:47:32,990 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 01:47:32,991 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 08:47:33 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('access-control-expose-headers', 'X-Request-ID'), ('openai-organization', 'user-iqhmndueuqg2ljzblqkr2tgh'), ('openai-processing-ms', '1377'), ('openai-version', '2020-10-01'), ('x-ratelimit-limit-requests', '50000'), ('x-ratelimit-limit-tokens', '150000000'), ('x-ratelimit-remaining-requests', '49999'), ('x-ratelimit-remaining-tokens', '149999987'), ('x-ratelimit-reset-requests', '1ms'), ('x-ratelimit-reset-tokens', '0s'), ('x-request-id', 'req_5f9891031dd6ee412bfe5be821dc436d'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=SofhfG4xIJOYKjL3oEQAMSMqtlMVqPmaNBBHje9FrXs-1742546853-1.0.1.1-CxDgL8jHzq.vx_fL0XJTsFydnKINH8wgc_lUngYj7SOLZAUJEeQr8hac4cNva8aHIH_qU8y85200abtGfzFpehSQ_FneYO5O976vWZQXK_8; path=/; expires=Fri, 21-Mar-25 09:17:33 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=BVSC_XlemJmZHqusJyDYGgHj6.ak4SFKLmWjPK6lGwg-1742546853265-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923c2f5f896df96b-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", + "2025-03-21 01:47:32,991 [DEBUG] openai._base_client: request_id: req_5f9891031dd6ee412bfe5be821dc436d\n", + "2025-03-21 01:47:32,996 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 01:47:33,000 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 01:47:33,002 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 01:47:33,003 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 01:47:33,005 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 01:47:33,005 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 01:47:33,007 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 01:47:33,007 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" ] }, { @@ -954,7 +796,7 @@ "output_type": "stream", "text": [ "🛎️ Service response from openai:gpt-4o:\n", - "Quantum computing leverages quantum mechanics principles, using qubits that exist in multiple states simultaneously. This allows for immense parallel processing power, enabling computations beyond classical computers' capabilities. Entanglement and superposition enhance efficiency, promising breakthroughs in cryptography, optimization, and complex problem-solving. It's transformative, yet still largely experimental.\n", + "Quantum computing leverages quantum bits (qubits), exploiting superposition and entanglement principles, enabling simultaneous processing of vast possibilities. Unlike classical bits, qubits can represent 0 and 1 simultaneously, potentially solving complex problems exponentially faster than classical computers, revolutionizing fields like cryptography, optimization, and materials science.\n", "\n" ] }, @@ -962,25 +804,25 @@ "name": "stderr", "output_type": "stream", "text": [ - "2025-03-21 00:18:24,922 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:18:25 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'227'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999989'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_a4d75e258ae593c95536f95bfb4b46d8'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bacd65ade1566-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:18:24,924 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:18:24,925 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:18:24,927 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:18:24,928 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:18:24,929 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:18:24,930 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:18:25 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '227', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999989', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_a4d75e258ae593c95536f95bfb4b46d8', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bacd65ade1566-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:18:24,931 [DEBUG] openai._base_client: request_id: req_a4d75e258ae593c95536f95bfb4b46d8\n", - "2025-03-21 00:18:24,932 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", - "2025-03-21 00:18:24,933 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o-mini' using provider class 'OpenAIModel'.\n", - "2025-03-21 00:18:24,933 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o-mini\n", - "2025-03-21 00:18:24,934 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:18:24,938 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:18:24,939 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:18:24,940 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:18:24,941 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:18:24,943 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:18:24,944 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:18:24,945 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + "2025-03-21 01:47:33,434 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:47:33 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'384'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_c47455e6d10ded7a049fedfde39ace97'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c2f6928acf96b-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 01:47:33,437 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 01:47:33,439 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 01:47:33,443 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 01:47:33,446 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 01:47:33,448 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 01:47:33,450 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 08:47:33 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '384', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_c47455e6d10ded7a049fedfde39ace97', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923c2f6928acf96b-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 01:47:33,452 [DEBUG] openai._base_client: request_id: req_c47455e6d10ded7a049fedfde39ace97\n", + "2025-03-21 01:47:33,456 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", + "2025-03-21 01:47:33,458 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o-mini' using provider class 'OpenAIModel'.\n", + "2025-03-21 01:47:33,460 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o-mini\n", + "2025-03-21 01:47:33,461 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 01:47:33,470 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 01:47:33,473 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 01:47:33,475 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 01:47:33,479 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 01:47:33,481 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 01:47:33,485 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 01:47:33,487 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" ] }, { @@ -997,22 +839,22 @@ "name": "stderr", "output_type": "stream", "text": [ - "2025-03-21 00:18:26,197 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:18:26 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1201'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999988'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_5e2501f946b9db85f3fe2255e49f7894'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bacd83c6f1566-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:18:26,201 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:18:26,204 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:18:26,210 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:18:26,213 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:18:26,215 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:18:26,218 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:18:26 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '1201', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999988', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_5e2501f946b9db85f3fe2255e49f7894', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bacd83c6f1566-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:18:26,221 [DEBUG] openai._base_client: request_id: req_5e2501f946b9db85f3fe2255e49f7894\n", - "2025-03-21 00:18:26,225 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:18:26,235 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:18:26,238 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:18:26,240 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:18:26,245 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:18:26,247 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:18:26,251 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:18:26,253 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" + "2025-03-21 01:47:34,916 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:47:35 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1363'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_41c953699a64c8f5e49420f537cf7fe6'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c2f6c1af8f96b-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 01:47:34,917 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 01:47:34,918 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 01:47:34,923 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 01:47:34,925 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 01:47:34,926 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 01:47:34,927 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 08:47:35 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '1363', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_41c953699a64c8f5e49420f537cf7fe6', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923c2f6c1af8f96b-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 01:47:34,928 [DEBUG] openai._base_client: request_id: req_41c953699a64c8f5e49420f537cf7fe6\n", + "2025-03-21 01:47:34,930 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 01:47:34,934 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 01:47:34,935 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 01:47:34,936 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 01:47:34,938 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 01:47:34,939 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 01:47:34,939 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 01:47:34,940 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" ] }, { @@ -1020,7 +862,7 @@ "output_type": "stream", "text": [ "🛎️ Service response from openai:gpt-4o-mini:\n", - "Quantum computing harnesses the principles of quantum mechanics to process information. Unlike classical bits, quantum bits (qubits) can exist in multiple states simultaneously, enabling parallel computations. This potential for massive parallelism allows quantum computers to solve complex problems, such as optimization and cryptography, much faster than traditional computers can.\n", + "Quantum computing harnesses the principles of quantum mechanics to process information using qubits, which can represent multiple states simultaneously. This enables vastly superior computational power for certain tasks, such as factoring large numbers or simulating molecular interactions, potentially solving problems that are currently intractable for classical computers.\n", "\n" ] }, @@ -1028,14 +870,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "2025-03-21 00:18:26,743 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:18:27 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'450'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_7c748282fa721b7ab5121245acddd69d'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'CF-Cache-Status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bace05afe1566-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:18:26,745 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:18:26,746 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:18:26,752 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:18:26,753 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:18:26,755 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:18:26,756 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:18:27 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '450', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_7c748282fa721b7ab5121245acddd69d', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bace05afe1566-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:18:26,757 [DEBUG] openai._base_client: request_id: req_7c748282fa721b7ab5121245acddd69d\n" + "2025-03-21 01:47:35,574 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:47:35 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'581'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_99f2a454c9dd8454feccdaa4c1581cfd'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c2f7539aef96b-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 01:47:35,576 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 01:47:35,577 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 01:47:35,579 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 01:47:35,579 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 01:47:35,580 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 01:47:35,581 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 08:47:35 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '581', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_99f2a454c9dd8454feccdaa4c1581cfd', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923c2f7539aef96b-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 01:47:35,581 [DEBUG] openai._base_client: request_id: req_99f2a454c9dd8454feccdaa4c1581cfd\n" ] }, { @@ -1070,65 +912,47 @@ "\n", " except Exception as error:\n", " print(f\"❌ Error with model {model_id}: {str(error)}\")\n", - " continue" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "# Register an OpenAI GPT-4o model\n", - "# openai_info = ModelInfo(\n", - "# model_id=\"openai:gpt-4o\",\n", - "# model_name=\"gpt-4o\",\n", - "# cost=ModelCost(input_cost_per_thousand=0.03, output_cost_per_thousand=0.06),\n", - "# rate_limit=RateLimit(tokens_per_minute=80000, requests_per_minute=5000),\n", - "# provider=ProviderInfo(name=\"OpenAI\", default_api_key=openai_key),\n", - "# api_key=openai_key,\n", - "# )\n", - "# model_registry.register_model(openai_info)" + " continue\n" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "2025-03-21 00:19:41,794 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:19:41,802 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello!'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:19:41,807 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:19:41,811 [DEBUG] httpcore.connection: close.started\n", - "2025-03-21 00:19:41,813 [DEBUG] httpcore.connection: close.complete\n", - "2025-03-21 00:19:41,815 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", - "2025-03-21 00:19:41,885 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 00:19:41,886 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", - "2025-03-21 00:19:41,902 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 00:19:41,904 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:19:41,906 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:19:41,907 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:19:41,909 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:19:41,910 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:19:42,443 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:19:42 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'452'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999996'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_43be483befeb53bf9ac56f8f100d5aae'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923baeb93b086459-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:19:42,444 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:19:42,445 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:19:42,448 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:19:42,448 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:19:42,449 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:19:42,451 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:19:42 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '452', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999996', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_43be483befeb53bf9ac56f8f100d5aae', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923baeb93b086459-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:19:42,451 [DEBUG] openai._base_client: request_id: req_43be483befeb53bf9ac56f8f100d5aae\n" + "2025-03-21 01:49:49,992 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", + "2025-03-21 01:49:50,000 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello!'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", + "2025-03-21 01:49:50,004 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", + "2025-03-21 01:49:50,005 [DEBUG] httpcore.connection: close.started\n", + "2025-03-21 01:49:50,007 [DEBUG] httpcore.connection: close.complete\n", + "2025-03-21 01:49:50,009 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", + "2025-03-21 01:49:50,035 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", + "2025-03-21 01:49:50,036 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", + "2025-03-21 01:49:50,065 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", + "2025-03-21 01:49:50,067 [DEBUG] httpcore.http11: send_request_headers.started request=\n", + "2025-03-21 01:49:50,070 [DEBUG] httpcore.http11: send_request_headers.complete\n", + "2025-03-21 01:49:50,071 [DEBUG] httpcore.http11: send_request_body.started request=\n", + "2025-03-21 01:49:50,074 [DEBUG] httpcore.http11: send_request_body.complete\n", + "2025-03-21 01:49:50,075 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", + "2025-03-21 01:49:50,577 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:49:50 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'438'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999995'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_afeab9892d79b1ffddd93afd92847985'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c32c1cf45156c-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", + "2025-03-21 01:49:50,579 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "2025-03-21 01:49:50,580 [DEBUG] httpcore.http11: receive_response_body.started request=\n", + "2025-03-21 01:49:50,583 [DEBUG] httpcore.http11: receive_response_body.complete\n", + "2025-03-21 01:49:50,584 [DEBUG] httpcore.http11: response_closed.started\n", + "2025-03-21 01:49:50,585 [DEBUG] httpcore.http11: response_closed.complete\n", + "2025-03-21 01:49:50,587 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 08:49:50 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '438', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999995', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_afeab9892d79b1ffddd93afd92847985', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923c32c1cf45156c-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", + "2025-03-21 01:49:50,588 [DEBUG] openai._base_client: request_id: req_afeab9892d79b1ffddd93afd92847985\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Hello! How can I assist you today?\n" + "Hi there! How can I assist you today?\n" ] } ], @@ -1153,7 +977,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -1171,11 +995,7 @@ }, { "cell_type": "code", -<<<<<<< HEAD - "execution_count": 7, -======= - "execution_count": 50, ->>>>>>> feb7b31 (added embedding model) + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -1216,7 +1036,7 @@ " \"\"\"\n", " ...\n", "\n", - "class Text_Embedding_3_EmbeddingModel(EmbeddingModel):\n", + "class Text_Embedding_3_EmbeddingModel(Protocol):\n", " \"\"\"Interface for embedding models.\n", "\n", " This protocol defines the minimal interface required to compute a text\n", @@ -1227,6 +1047,17 @@ " embed_text: Compute the embedding for a given text.\n", " \"\"\"\n", "\n", + " def __init__(self, api_key: str = None):\n", + " \"\"\"Initializes the embedding model with the OpenAI API key.\n", + "\n", + " Args:\n", + " api_key (str): OpenAI API key for authentication.\n", + " \"\"\"\n", + " self.api_key = api_key or os.environ.get(\"OPENAI_API_KEY\")\n", + " if not self.api_key:\n", + " raise ValueError(\"OpenAI API key must be provided or set in the environment variable OPENAI_API_KEY.\")\n", + " openai.api_key = self.api_key\n", + "\n", " def embed_text(self, text: str) -> List[float]:\n", " \"\"\"Computes the embedding vector for the provided text.\n", "\n", @@ -1236,13 +1067,11 @@ " Returns:\n", " List[float]: A list of floats representing the embedding vector.\n", " \"\"\"\n", - " response = llm(model_id=\"openai:text-embedding-3-small\", prompt=text)\n", - "\n", - " # response = openai.Embedding.create(\n", - " # model=\"text-embedding-3\",\n", - " # input=text\n", - " # )\n", - " return response.data\n", + " response = openai.Embedding.create(\n", + " model=\"text-embedding-3\",\n", + " input=text\n", + " )\n", + " return response[\"data\"][0][\"embedding\"]\n", "\n", "\n", "class MockEmbeddingModel:\n", @@ -1356,283 +1185,28 @@ }, { "cell_type": "code", -<<<<<<< HEAD - "execution_count": 8, + "execution_count": 14, "metadata": { "scrolled": true }, "outputs": [ { - "ename": "NameError", - "evalue": "name 'mock_model' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[8], line 8\u001b[0m\n\u001b[1;32m 4\u001b[0m text_a: \u001b[38;5;28mstr\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHello world!\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 5\u001b[0m text_b: \u001b[38;5;28mstr\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHello, world??\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 7\u001b[0m score: \u001b[38;5;28mfloat\u001b[39m \u001b[38;5;241m=\u001b[39m calculate_text_similarity(\n\u001b[0;32m----> 8\u001b[0m text1\u001b[38;5;241m=\u001b[39mtext_a, text2\u001b[38;5;241m=\u001b[39mtext_b, model\u001b[38;5;241m=\u001b[39mmock_model, metric\u001b[38;5;241m=\u001b[39mcosine\n\u001b[1;32m 9\u001b[0m )\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSimilarity between \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_a\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m and \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_b\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mscore\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n", - "\u001b[0;31mNameError\u001b[0m: name 'mock_model' is not defined" -======= - "execution_count": 51, - "metadata": {}, - "outputs": [ - { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "2025-03-21 00:37:15,471 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", - "2025-03-21 00:37:15,475 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:text-embedding-3-large' using provider class 'OpenAIModel'.\n", - "2025-03-21 00:37:15,482 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:text-embedding-3-large\n", - "2025-03-21 00:37:15,528 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:37:15,661 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello world!'}], 'model': 'text-embedding-3-large', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:37:15,695 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:37:15,708 [DEBUG] httpcore.connection: close.started\n", - "2025-03-21 00:37:15,744 [DEBUG] httpcore.connection: close.complete\n", - "2025-03-21 00:37:15,752 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", - "2025-03-21 00:37:15,775 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 00:37:15,779 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", - "2025-03-21 00:37:15,866 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 00:37:15,870 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:37:15,876 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:37:15,877 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:37:15,878 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:37:15,879 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:37:15,920 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 403, b'Forbidden', [(b'Date', b'Fri, 21 Mar 2025 07:37:16 GMT'), (b'Content-Type', b'application/json; charset=utf-8'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'vary', b'Origin'), (b'x-request-id', b'req_536398e43921f797ac509114aae14cae'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=rQJfdoJ54KiOHVXBfvUG99o9mGLvvP81mJe39yApwnM-1742542636-1.0.1.1-t6_oW0vtdbrhY.lVxM0S223ktjIO_SQ4ohXzxKRtCabBNZWZq9TEun6DIfyIJAlK77DPrCUMENp6Wkwrxd67RJJmb35J0Piu0S8e7F2TFoE; path=/; expires=Fri, 21-Mar-25 08:07:16 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bc87478aaeb35-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:37:15,936 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 403 Forbidden\"\n", - "2025-03-21 00:37:15,943 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:37:15,948 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:37:15,952 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:37:15,955 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:37:15,958 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"403 Forbidden\" Headers({'date': 'Fri, 21 Mar 2025 07:37:16 GMT', 'content-type': 'application/json; charset=utf-8', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'vary': 'Origin', 'x-request-id': 'req_536398e43921f797ac509114aae14cae', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'set-cookie': '__cf_bm=rQJfdoJ54KiOHVXBfvUG99o9mGLvvP81mJe39yApwnM-1742542636-1.0.1.1-t6_oW0vtdbrhY.lVxM0S223ktjIO_SQ4ohXzxKRtCabBNZWZq9TEun6DIfyIJAlK77DPrCUMENp6Wkwrxd67RJJmb35J0Piu0S8e7F2TFoE; path=/; expires=Fri, 21-Mar-25 08:07:16 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bc87478aaeb35-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:37:15,962 [DEBUG] openai._base_client: request_id: req_536398e43921f797ac509114aae14cae\n", - "2025-03-21 00:37:15,972 [DEBUG] openai._base_client: Encountered httpx.HTTPStatusError\n", - "Traceback (most recent call last):\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1040, in _request\n", - " response.raise_for_status()\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/httpx/_models.py\", line 763, in raise_for_status\n", - " raise HTTPStatusError(message, request=request, response=self)\n", - "httpx.HTTPStatusError: Client error '403 Forbidden' for url 'https://api.openai.com/v1/chat/completions'\n", - "For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/403\n", - "2025-03-21 00:37:16,136 [DEBUG] openai._base_client: Not retrying\n", - "2025-03-21 00:37:16,137 [DEBUG] openai._base_client: Re-raising status error\n", - "2025-03-21 00:37:16,151 [ERROR] ember.core.registry.model.providers.openai.openai_provider: Unexpected error in OpenAIModel.forward()\n", - "Traceback (most recent call last):\n", - " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 389, in forward\n", - " response: Any = self.client.chat.completions.create(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 275, in wrapper\n", - " return func(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 829, in create\n", - " return self._post(\n", - " ^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1280, in post\n", - " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 957, in request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1061, in _request\n", - " raise self._make_status_error_from_response(err.response) from None\n", - "openai.PermissionDeniedError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", - "2025-03-21 00:37:17,168 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:37:17,172 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello world!'}], 'model': 'text-embedding-3-large', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:37:17,174 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:37:17,175 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:37:17,176 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:37:17,178 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:37:17,182 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:37:17,184 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:37:17,215 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 403, b'Forbidden', [(b'Date', b'Fri, 21 Mar 2025 07:37:17 GMT'), (b'Content-Type', b'application/json; charset=utf-8'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'vary', b'Origin'), (b'x-request-id', b'req_b1c4e773cbe460c0c8d22a74e4869095'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bc87c9c9beb35-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:37:17,219 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 403 Forbidden\"\n", - "2025-03-21 00:37:17,222 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:37:17,227 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:37:17,231 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:37:17,235 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:37:17,242 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"403 Forbidden\" Headers({'date': 'Fri, 21 Mar 2025 07:37:17 GMT', 'content-type': 'application/json; charset=utf-8', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'vary': 'Origin', 'x-request-id': 'req_b1c4e773cbe460c0c8d22a74e4869095', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bc87c9c9beb35-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:37:17,245 [DEBUG] openai._base_client: request_id: req_b1c4e773cbe460c0c8d22a74e4869095\n", - "2025-03-21 00:37:17,248 [DEBUG] openai._base_client: Encountered httpx.HTTPStatusError\n", - "Traceback (most recent call last):\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1040, in _request\n", - " response.raise_for_status()\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/httpx/_models.py\", line 763, in raise_for_status\n", - " raise HTTPStatusError(message, request=request, response=self)\n", - "httpx.HTTPStatusError: Client error '403 Forbidden' for url 'https://api.openai.com/v1/chat/completions'\n", - "For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/403\n", - "2025-03-21 00:37:17,253 [DEBUG] openai._base_client: Not retrying\n", - "2025-03-21 00:37:17,258 [DEBUG] openai._base_client: Re-raising status error\n", - "2025-03-21 00:37:17,261 [ERROR] ember.core.registry.model.providers.openai.openai_provider: Unexpected error in OpenAIModel.forward()\n", - "Traceback (most recent call last):\n", - " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 389, in forward\n", - " response: Any = self.client.chat.completions.create(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 275, in wrapper\n", - " return func(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 829, in create\n", - " return self._post(\n", - " ^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1280, in post\n", - " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 957, in request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1061, in _request\n", - " raise self._make_status_error_from_response(err.response) from None\n", - "openai.PermissionDeniedError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", - "2025-03-21 00:37:19,267 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:37:19,277 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello world!'}], 'model': 'text-embedding-3-large', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:37:19,281 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:37:19,284 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:37:19,288 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:37:19,290 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:37:19,295 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:37:19,298 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:37:19,332 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 403, b'Forbidden', [(b'Date', b'Fri, 21 Mar 2025 07:37:19 GMT'), (b'Content-Type', b'application/json; charset=utf-8'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'vary', b'Origin'), (b'x-request-id', b'req_e3547707ff5d4f5809990164a72b6a49'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bc889cd03eb35-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:37:19,334 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 403 Forbidden\"\n", - "2025-03-21 00:37:19,339 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:37:19,346 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:37:19,349 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:37:19,351 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:37:19,354 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"403 Forbidden\" Headers({'date': 'Fri, 21 Mar 2025 07:37:19 GMT', 'content-type': 'application/json; charset=utf-8', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'vary': 'Origin', 'x-request-id': 'req_e3547707ff5d4f5809990164a72b6a49', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bc889cd03eb35-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:37:19,355 [DEBUG] openai._base_client: request_id: req_e3547707ff5d4f5809990164a72b6a49\n", - "2025-03-21 00:37:19,362 [DEBUG] openai._base_client: Encountered httpx.HTTPStatusError\n", - "Traceback (most recent call last):\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1040, in _request\n", - " response.raise_for_status()\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/httpx/_models.py\", line 763, in raise_for_status\n", - " raise HTTPStatusError(message, request=request, response=self)\n", - "httpx.HTTPStatusError: Client error '403 Forbidden' for url 'https://api.openai.com/v1/chat/completions'\n", - "For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/403\n", - "2025-03-21 00:37:19,368 [DEBUG] openai._base_client: Not retrying\n", - "2025-03-21 00:37:19,374 [DEBUG] openai._base_client: Re-raising status error\n", - "2025-03-21 00:37:19,377 [ERROR] ember.core.registry.model.providers.openai.openai_provider: Unexpected error in OpenAIModel.forward()\n", - "Traceback (most recent call last):\n", - " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 389, in forward\n", - " response: Any = self.client.chat.completions.create(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 275, in wrapper\n", - " return func(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 829, in create\n", - " return self._post(\n", - " ^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1280, in post\n", - " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 957, in request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1061, in _request\n", - " raise self._make_status_error_from_response(err.response) from None\n", - "openai.PermissionDeniedError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", - "2025-03-21 00:37:19,380 [ERROR] ModelService: Error invoking model 'openai:text-embedding-3-large'.\n", - "Traceback (most recent call last):\n", - " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 389, in forward\n", - " response: Any = self.client.chat.completions.create(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 275, in wrapper\n", - " return func(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 829, in create\n", - " return self._post(\n", - " ^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1280, in post\n", - " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 957, in request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py\", line 1061, in _request\n", - " raise self._make_status_error_from_response(err.response) from None\n", - "openai.PermissionDeniedError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", - "\n", - "The above exception was the direct cause of the following exception:\n", - "\n", - "Traceback (most recent call last):\n", - " File \"/root/ember/jared/ember/src/ember/core/registry/model/base/services/model_service.py\", line 106, in _invoke\n", - " response = model(prompt=prompt, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/base_provider.py\", line 182, in __call__\n", - " return self.forward(request=chat_request)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 336, in wrapped_f\n", - " return copy(f, *args, **kw)\n", - " ^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 475, in __call__\n", - " do = self.iter(retry_state=retry_state)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 376, in iter\n", - " result = action(retry_state)\n", - " ^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 418, in exc_check\n", - " raise retry_exc.reraise()\n", - " ^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 185, in reraise\n", - " raise self.last_attempt.result()\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/concurrent/futures/_base.py\", line 449, in result\n", - " return self.__get_result()\n", - " ^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n", - " raise self._exception\n", - " File \"/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py\", line 478, in __call__\n", - " result = fn(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^\n", - " File \"/root/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py\", line 406, in forward\n", - " raise ProviderAPIError(str(exc)) from exc\n", - "ember.core.exceptions.ProviderAPIError: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n" - ] - }, - { - "ename": "ProviderAPIError", - "evalue": "Error invoking model openai:text-embedding-3-large", - "output_type": "error", - "traceback": [ - "\u001b[31m---------------------------------------------------------------------------\u001b[39m", - "\u001b[31mPermissionDeniedError\u001b[39m Traceback (most recent call last)", - "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py:389\u001b[39m, in \u001b[36mOpenAIModel.forward\u001b[39m\u001b[34m(self, request)\u001b[39m\n\u001b[32m 388\u001b[39m timeout = openai_kwargs.pop(\u001b[33m\"\u001b[39m\u001b[33mtimeout\u001b[39m\u001b[33m\"\u001b[39m, \u001b[32m30\u001b[39m)\n\u001b[32m--> \u001b[39m\u001b[32m389\u001b[39m response: Any = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mclient\u001b[49m\u001b[43m.\u001b[49m\u001b[43mchat\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcompletions\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcreate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 390\u001b[39m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mmodel_info\u001b[49m\u001b[43m.\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 391\u001b[39m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 392\u001b[39m \u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mopenai_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 393\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 394\u001b[39m content: \u001b[38;5;28mstr\u001b[39m = response.choices[\u001b[32m0\u001b[39m].message.content.strip()\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_utils/_utils.py:275\u001b[39m, in \u001b[36mrequired_args..inner..wrapper\u001b[39m\u001b[34m(*args, **kwargs)\u001b[39m\n\u001b[32m 274\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(msg)\n\u001b[32m--> \u001b[39m\u001b[32m275\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/resources/chat/completions.py:829\u001b[39m, in \u001b[36mCompletions.create\u001b[39m\u001b[34m(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, response_format, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)\u001b[39m\n\u001b[32m 828\u001b[39m validate_response_format(response_format)\n\u001b[32m--> \u001b[39m\u001b[32m829\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_post\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 830\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43m/chat/completions\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[32m 831\u001b[39m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmaybe_transform\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 832\u001b[39m \u001b[43m \u001b[49m\u001b[43m{\u001b[49m\n\u001b[32m 833\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmessages\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 834\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmodel\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 835\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43maudio\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43maudio\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 836\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mfrequency_penalty\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrequency_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 837\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mfunction_call\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfunction_call\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 838\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mfunctions\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfunctions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 839\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mlogit_bias\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mlogit_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 840\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mlogprobs\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mlogprobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 841\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmax_completion_tokens\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_completion_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 842\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmax_tokens\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 843\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmetadata\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 844\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmodalities\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodalities\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 845\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mn\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 846\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mparallel_tool_calls\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mparallel_tool_calls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 847\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mprediction\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mprediction\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 848\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mpresence_penalty\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mpresence_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 849\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mresponse_format\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mresponse_format\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 850\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mseed\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mseed\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 851\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mservice_tier\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mservice_tier\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 852\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstop\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 853\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstore\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstore\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 854\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstream\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 855\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstream_options\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 856\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtemperature\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtemperature\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 857\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtool_choice\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtool_choice\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 858\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtools\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 859\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtop_logprobs\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtop_logprobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 860\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtop_p\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtop_p\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 861\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43muser\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43muser\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 862\u001b[39m \u001b[43m \u001b[49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 863\u001b[39m \u001b[43m \u001b[49m\u001b[43mcompletion_create_params\u001b[49m\u001b[43m.\u001b[49m\u001b[43mCompletionCreateParams\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 864\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 865\u001b[39m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmake_request_options\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 866\u001b[39m \u001b[43m \u001b[49m\u001b[43mextra_headers\u001b[49m\u001b[43m=\u001b[49m\u001b[43mextra_headers\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mextra_query\u001b[49m\u001b[43m=\u001b[49m\u001b[43mextra_query\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mextra_body\u001b[49m\u001b[43m=\u001b[49m\u001b[43mextra_body\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout\u001b[49m\n\u001b[32m 867\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 868\u001b[39m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m=\u001b[49m\u001b[43mChatCompletion\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 869\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 870\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m=\u001b[49m\u001b[43mStream\u001b[49m\u001b[43m[\u001b[49m\u001b[43mChatCompletionChunk\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 871\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py:1280\u001b[39m, in \u001b[36mSyncAPIClient.post\u001b[39m\u001b[34m(self, path, cast_to, body, options, files, stream, stream_cls)\u001b[39m\n\u001b[32m 1277\u001b[39m opts = FinalRequestOptions.construct(\n\u001b[32m 1278\u001b[39m method=\u001b[33m\"\u001b[39m\u001b[33mpost\u001b[39m\u001b[33m\"\u001b[39m, url=path, json_data=body, files=to_httpx_files(files), **options\n\u001b[32m 1279\u001b[39m )\n\u001b[32m-> \u001b[39m\u001b[32m1280\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m cast(ResponseT, \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mopts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m)\u001b[49m)\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py:957\u001b[39m, in \u001b[36mSyncAPIClient.request\u001b[39m\u001b[34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[39m\n\u001b[32m 955\u001b[39m retries_taken = \u001b[32m0\u001b[39m\n\u001b[32m--> \u001b[39m\u001b[32m957\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 958\u001b[39m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 959\u001b[39m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[43m=\u001b[49m\u001b[43moptions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 960\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 961\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 962\u001b[39m \u001b[43m \u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[43m=\u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 963\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/openai/_base_client.py:1061\u001b[39m, in \u001b[36mSyncAPIClient._request\u001b[39m\u001b[34m(self, cast_to, options, retries_taken, stream, stream_cls)\u001b[39m\n\u001b[32m 1060\u001b[39m log.debug(\u001b[33m\"\u001b[39m\u001b[33mRe-raising status error\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m-> \u001b[39m\u001b[32m1061\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m._make_status_error_from_response(err.response) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 1063\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._process_response(\n\u001b[32m 1064\u001b[39m cast_to=cast_to,\n\u001b[32m 1065\u001b[39m options=options,\n\u001b[32m (...)\u001b[39m\u001b[32m 1069\u001b[39m retries_taken=retries_taken,\n\u001b[32m 1070\u001b[39m )\n", - "\u001b[31mPermissionDeniedError\u001b[39m: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001b[31mProviderAPIError\u001b[39m Traceback (most recent call last)", - "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/base/services/model_service.py:106\u001b[39m, in \u001b[36mModelService._invoke\u001b[39m\u001b[34m(self, model_id, prompt, **kwargs)\u001b[39m\n\u001b[32m 105\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m106\u001b[39m response = \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m=\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 107\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exc:\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/providers/base_provider.py:182\u001b[39m, in \u001b[36mBaseProviderModel.__call__\u001b[39m\u001b[34m(self, prompt, **kwargs)\u001b[39m\n\u001b[32m 181\u001b[39m chat_request: ChatRequest = ChatRequest(prompt=prompt, **kwargs)\n\u001b[32m--> \u001b[39m\u001b[32m182\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m=\u001b[49m\u001b[43mchat_request\u001b[49m\u001b[43m)\u001b[49m\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:336\u001b[39m, in \u001b[36mBaseRetrying.wraps..wrapped_f\u001b[39m\u001b[34m(*args, **kw)\u001b[39m\n\u001b[32m 335\u001b[39m wrapped_f.statistics = copy.statistics \u001b[38;5;66;03m# type: ignore[attr-defined]\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m336\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcopy\u001b[49m\u001b[43m(\u001b[49m\u001b[43mf\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkw\u001b[49m\u001b[43m)\u001b[49m\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:475\u001b[39m, in \u001b[36mRetrying.__call__\u001b[39m\u001b[34m(self, fn, *args, **kwargs)\u001b[39m\n\u001b[32m 474\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m475\u001b[39m do = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43miter\u001b[49m\u001b[43m(\u001b[49m\u001b[43mretry_state\u001b[49m\u001b[43m=\u001b[49m\u001b[43mretry_state\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 476\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(do, DoAttempt):\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:376\u001b[39m, in \u001b[36mBaseRetrying.iter\u001b[39m\u001b[34m(self, retry_state)\u001b[39m\n\u001b[32m 375\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m action \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m.iter_state.actions:\n\u001b[32m--> \u001b[39m\u001b[32m376\u001b[39m result = \u001b[43maction\u001b[49m\u001b[43m(\u001b[49m\u001b[43mretry_state\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 377\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m result\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:418\u001b[39m, in \u001b[36mBaseRetrying._post_stop_check_actions..exc_check\u001b[39m\u001b[34m(rs)\u001b[39m\n\u001b[32m 417\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.reraise:\n\u001b[32m--> \u001b[39m\u001b[32m418\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[43mretry_exc\u001b[49m\u001b[43m.\u001b[49m\u001b[43mreraise\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 419\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m retry_exc \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mfut\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mexception\u001b[39;00m()\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:185\u001b[39m, in \u001b[36mRetryError.reraise\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 184\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.last_attempt.failed:\n\u001b[32m--> \u001b[39m\u001b[32m185\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mlast_attempt\u001b[49m\u001b[43m.\u001b[49m\u001b[43mresult\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 186\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/concurrent/futures/_base.py:449\u001b[39m, in \u001b[36mFuture.result\u001b[39m\u001b[34m(self, timeout)\u001b[39m\n\u001b[32m 448\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m._state == FINISHED:\n\u001b[32m--> \u001b[39m\u001b[32m449\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m__get_result\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 451\u001b[39m \u001b[38;5;28mself\u001b[39m._condition.wait(timeout)\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/concurrent/futures/_base.py:401\u001b[39m, in \u001b[36mFuture.__get_result\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 400\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m401\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m._exception\n\u001b[32m 402\u001b[39m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[32m 403\u001b[39m \u001b[38;5;66;03m# Break a reference cycle with the exception in self._exception\u001b[39;00m\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tenacity/__init__.py:478\u001b[39m, in \u001b[36mRetrying.__call__\u001b[39m\u001b[34m(self, fn, *args, **kwargs)\u001b[39m\n\u001b[32m 477\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m478\u001b[39m result = \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 479\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m: \u001b[38;5;66;03m# noqa: B902\u001b[39;00m\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/providers/openai/openai_provider.py:406\u001b[39m, in \u001b[36mOpenAIModel.forward\u001b[39m\u001b[34m(self, request)\u001b[39m\n\u001b[32m 405\u001b[39m logger.exception(\u001b[33m\"\u001b[39m\u001b[33mUnexpected error in OpenAIModel.forward()\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m--> \u001b[39m\u001b[32m406\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m ProviderAPIError(\u001b[38;5;28mstr\u001b[39m(exc)) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mexc\u001b[39;00m\n", - "\u001b[31mProviderAPIError\u001b[39m: Error code: 403 - {'error': {'message': 'You are not allowed to sample from this model', 'type': 'invalid_request_error', 'param': None, 'code': None}}", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001b[31mProviderAPIError\u001b[39m Traceback (most recent call last)", - "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[51]\u001b[39m\u001b[32m, line 7\u001b[39m\n\u001b[32m 4\u001b[39m text_a: \u001b[38;5;28mstr\u001b[39m = \u001b[33m\"\u001b[39m\u001b[33mHello world!\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 5\u001b[39m text_b: \u001b[38;5;28mstr\u001b[39m = \u001b[33m\"\u001b[39m\u001b[33mHello, world??\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m7\u001b[39m score: \u001b[38;5;28mfloat\u001b[39m = \u001b[43mcalculate_text_similarity\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 8\u001b[39m \u001b[43m \u001b[49m\u001b[43mtext1\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtext_a\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtext2\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtext_b\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m=\u001b[49m\u001b[43mopenai_embedding_model\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmetric\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcosine\u001b[49m\n\u001b[32m 9\u001b[39m \u001b[43m)\u001b[49m\n\u001b[32m 10\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mSimilarity between \u001b[39m\u001b[33m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_a\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m'\u001b[39m\u001b[33m and \u001b[39m\u001b[33m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_b\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mscore\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n", - "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[50]\u001b[39m\u001b[32m, line 171\u001b[39m, in \u001b[36mcalculate_text_similarity\u001b[39m\u001b[34m(text1, text2, model, metric)\u001b[39m\n\u001b[32m 154\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mcalculate_text_similarity\u001b[39m(\n\u001b[32m 155\u001b[39m text1: \u001b[38;5;28mstr\u001b[39m, text2: \u001b[38;5;28mstr\u001b[39m, model: EmbeddingModel, metric: SimilarityMetric\n\u001b[32m 156\u001b[39m ) -> \u001b[38;5;28mfloat\u001b[39m:\n\u001b[32m 157\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"Calculates text similarity using an embedding model and a similarity metric.\u001b[39;00m\n\u001b[32m 158\u001b[39m \n\u001b[32m 159\u001b[39m \u001b[33;03m This function generates embeddings for the provided texts and then computes a\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 169\u001b[39m \u001b[33;03m float: The computed similarity score.\u001b[39;00m\n\u001b[32m 170\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m171\u001b[39m embedding1: List[\u001b[38;5;28mfloat\u001b[39m] = \u001b[43mmodel\u001b[49m\u001b[43m.\u001b[49m\u001b[43membed_text\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtext\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtext1\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 172\u001b[39m embedding2: List[\u001b[38;5;28mfloat\u001b[39m] = model.embed_text(text=text2)\n\u001b[32m 173\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m metric.similarity(vec_a=embedding1, vec_b=embedding2)\n", - "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[50]\u001b[39m\u001b[32m, line 58\u001b[39m, in \u001b[36mText_Embedding_3_EmbeddingModel.embed_text\u001b[39m\u001b[34m(self, text)\u001b[39m\n\u001b[32m 49\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34membed_text\u001b[39m(\u001b[38;5;28mself\u001b[39m, text: \u001b[38;5;28mstr\u001b[39m) -> List[\u001b[38;5;28mfloat\u001b[39m]:\n\u001b[32m 50\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"Computes the embedding vector for the provided text.\u001b[39;00m\n\u001b[32m 51\u001b[39m \n\u001b[32m 52\u001b[39m \u001b[33;03m Args:\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 56\u001b[39m \u001b[33;03m List[float]: A list of floats representing the embedding vector.\u001b[39;00m\n\u001b[32m 57\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m58\u001b[39m response = \u001b[43mllm\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_id\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mopenai:text-embedding-3-large\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtext\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 60\u001b[39m \u001b[38;5;66;03m# response = openai.Embedding.create(\u001b[39;00m\n\u001b[32m 61\u001b[39m \u001b[38;5;66;03m# model=\"text-embedding-3\",\u001b[39;00m\n\u001b[32m 62\u001b[39m \u001b[38;5;66;03m# input=text\u001b[39;00m\n\u001b[32m 63\u001b[39m \u001b[38;5;66;03m# )\u001b[39;00m\n\u001b[32m 64\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m response.data\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/base/services/model_service.py:100\u001b[39m, in \u001b[36mModelService.invoke_model\u001b[39m\u001b[34m(self, model_id, prompt, **kwargs)\u001b[39m\n\u001b[32m 98\u001b[39m response = \u001b[38;5;28mself\u001b[39m._invoke(model_id, prompt, **kwargs)\n\u001b[32m 99\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m100\u001b[39m response = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_invoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 101\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m response\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/ember/jared/ember/src/ember/core/registry/model/base/services/model_service.py:109\u001b[39m, in \u001b[36mModelService._invoke\u001b[39m\u001b[34m(self, model_id, prompt, **kwargs)\u001b[39m\n\u001b[32m 107\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exc:\n\u001b[32m 108\u001b[39m \u001b[38;5;28mself\u001b[39m._logger.exception(\u001b[33m\"\u001b[39m\u001b[33mError invoking model \u001b[39m\u001b[33m'\u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m'\u001b[39m\u001b[33m.\u001b[39m\u001b[33m\"\u001b[39m, model_id)\n\u001b[32m--> \u001b[39m\u001b[32m109\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m ProviderAPIError(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mError invoking model \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mexc\u001b[39;00m\n\u001b[32m 111\u001b[39m metric_counter = \u001b[38;5;28mself\u001b[39m._metrics.get(\u001b[33m\"\u001b[39m\u001b[33mmodel_invocations\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 112\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m metric_counter \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n", - "\u001b[31mProviderAPIError\u001b[39m: Error invoking model openai:text-embedding-3-large" ->>>>>>> feb7b31 (added embedding model) + "Similarity between 'Hello world!' and 'Hello, world??': 0.9150491464734943\n" ] } ], "source": [ - "openai_embedding_model = Text_Embedding_3_EmbeddingModel()\n", + "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", "\n", "text_a: str = \"Hello world!\"\n", "text_b: str = \"Hello, world??\"\n", "\n", "score: float = calculate_text_similarity(\n", - " text1=text_a, text2=text_b, model=openai_embedding_model, metric=cosine\n", + " text1=text_a, text2=text_b, model=mock_model, metric=cosine\n", ")\n", "print(f\"Similarity between '{text_a}' and '{text_b}': {score}\")" ] @@ -1651,7 +1225,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -1672,7 +1246,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -1690,7 +1264,6 @@ "T_out = TypeVar(\"T_out\")\n", "T_truth = TypeVar(\"T_truth\")\n", "\n", - "\n", "class ComposedEvaluator(IEvaluator[T_out, T_truth], Generic[T_out, T_truth]):\n", " \"\"\"Combines an output extractor with an evaluator for the extracted data.\n", "\n", @@ -1733,7 +1306,7 @@ "# Basic Evaluators\n", "\n", "\n", - "class ExactMatchEaluator(IEvaluator[str, str]):\n", + "class ExactMatchEvaluator(IEvaluator[str, str]):\n", " \"\"\"Evaluator to check for an exact match between two strings,\n", " ignoring differences in whitespace and case.\n", "\n", @@ -1814,7 +1387,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 74, "metadata": { "collapsed": true }, @@ -1823,8 +1396,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", - "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + "Note: you may need to restart the kernel to use updated packages.\n" ] } ], @@ -1834,11 +1406,7 @@ }, { "cell_type": "code", -<<<<<<< HEAD "execution_count": 1, -======= - "execution_count": 21, ->>>>>>> feb7b31 (added embedding model) "metadata": {}, "outputs": [], "source": [ @@ -1887,11 +1455,7 @@ }, { "cell_type": "code", -<<<<<<< HEAD "execution_count": 2, -======= - "execution_count": 22, ->>>>>>> feb7b31 (added embedding model) "metadata": {}, "outputs": [ { @@ -2022,13 +1586,14 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 82, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", + "exact_evaluator = ExactMatchEvaluator()\n", "diversity_evaluator = DiversityScoringEvaluator()\n", "edit_dist_evaluator = EditDistanceScoringEvaluator()\n", "\n", @@ -2047,12 +1612,12 @@ " edit_distance = edit_dist_evaluator.evaluate(strings)\n", " print(f\"edit-dist score: {edit_distance.score:.4f}\")\n", " print(\"-------------------------------\")\n", - " print(f\"possible diversity score: {((1-avg_score) + compression.score + edit_distance.score) / 3.}\")" + " print(f\"possible diversity score: {(1-avg_score) * compression.score * edit_distance.score}\")" ] }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 86, "metadata": {}, "outputs": [ { @@ -2068,7 +1633,7 @@ "diversity cosine-sim inverse: 0.31418217494353784\n", "edit-dist score: 0.8301\n", "-------------------------------\n", - "possible diversity score: 0.40273692251204346\n" + "possible diversity score: 0.0166745277343434\n" ] } ], @@ -2092,325 +1657,46 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 97, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:27,328 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:27,340 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:27,343 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:27,348 [DEBUG] httpcore.connection: close.started\n", - "2025-03-21 00:21:27,350 [DEBUG] httpcore.connection: close.complete\n", - "2025-03-21 00:21:27,351 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", - "2025-03-21 00:21:27,391 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 00:21:27,393 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", - "2025-03-21 00:21:27,405 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 00:21:27,407 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:27,410 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:27,412 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:27,415 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:27,417 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:21:27,854 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:28 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'375'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999988'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_a7db3170b0524e33464f32cef401550f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb14c9ea65c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:27,856 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:27,858 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:27,862 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:27,864 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:27,866 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:27,868 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:28 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '375', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999988', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_a7db3170b0524e33464f32cef401550f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb14c9ea65c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:27,869 [DEBUG] openai._base_client: request_id: req_a7db3170b0524e33464f32cef401550f\n", - "2025-03-21 00:21:27,871 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:27,875 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:27,877 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:27,878 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:27,882 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:27,885 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:27,887 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:27,888 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ - "Joke 0: [Why don't skeletons fight each other? They don't have the guts!]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:28,285 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:28 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'351'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_1e219a2e3ce8c441aab4f3e57c0a654f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb14f88c65c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:28,286 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:28,287 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:28,289 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:28,290 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:28,291 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:28,293 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:28 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '351', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_1e219a2e3ce8c441aab4f3e57c0a654f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb14f88c65c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:28,293 [DEBUG] openai._base_client: request_id: req_1e219a2e3ce8c441aab4f3e57c0a654f\n", - "2025-03-21 00:21:28,296 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:28,306 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:28,310 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:28,314 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:28,316 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:28,323 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:28,328 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:28,331 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 1: [Why don't skeletons fight each other? They don't have the guts.]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:28,832 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:29 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'449'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_62cfe73bcae00e48f66067daa60b9a0f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'CF-Cache-Status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb1525b025c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:28,835 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:28,839 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:28,844 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:28,845 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:28,847 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:28,848 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:29 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '449', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_62cfe73bcae00e48f66067daa60b9a0f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb1525b025c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:28,850 [DEBUG] openai._base_client: request_id: req_62cfe73bcae00e48f66067daa60b9a0f\n", - "2025-03-21 00:21:28,854 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:28,861 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:28,863 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:28,865 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:28,866 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:28,867 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:28,868 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:28,869 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 2: [Why did the scarecrow win an award? Because he was outstanding in his field!]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:29,612 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:29 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'654'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_c81335d39ba9bd3c52cdd33e341a3f5f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb155bdd35c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:29,615 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:29,616 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:29,620 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:29,621 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:29,623 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:29,624 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:29 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '654', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_c81335d39ba9bd3c52cdd33e341a3f5f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb155bdd35c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:29,625 [DEBUG] openai._base_client: request_id: req_c81335d39ba9bd3c52cdd33e341a3f5f\n", - "2025-03-21 00:21:29,628 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:29,632 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:29,633 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:29,635 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:29,638 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:29,639 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:29,641 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:29,642 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 3: [Why don’t scientists trust atoms? Because they make up everything!]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:30,073 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:30 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'384'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_09b2adf96a48a99fc2ff344e924d2288'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb15a89135c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:30,075 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:30,076 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:30,079 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:30,081 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:30,082 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:30,084 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:30 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '384', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_09b2adf96a48a99fc2ff344e924d2288', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb15a89135c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:30,085 [DEBUG] openai._base_client: request_id: req_09b2adf96a48a99fc2ff344e924d2288\n", - "2025-03-21 00:21:30,086 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:30,091 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:30,093 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:30,095 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:30,098 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:30,098 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:30,100 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:30,101 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 4: [Why don't skeletons fight each other? They don't have the guts!]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:30,742 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:31 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'601'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_b762852f0cc93993624ba07c2e67ba9f'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb15d6b105c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:30,745 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:30,749 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:30,755 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:30,758 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:30,761 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:30,763 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:31 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '601', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_b762852f0cc93993624ba07c2e67ba9f', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb15d6b105c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:30,767 [DEBUG] openai._base_client: request_id: req_b762852f0cc93993624ba07c2e67ba9f\n", - "2025-03-21 00:21:30,773 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:30,785 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:30,788 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:30,795 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:30,797 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:30,801 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:30,807 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:30,813 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 5: [Why don't skeletons fight each other? \n", - "They don't have the guts.]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:31,353 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:31 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'471'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_ba026095394aadcd39b29c1bb1ce6973'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb161cdd35c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:31,357 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:31,360 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:31,363 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:31,364 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:31,365 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:31,366 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:31 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '471', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_ba026095394aadcd39b29c1bb1ce6973', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb161cdd35c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:31,368 [DEBUG] openai._base_client: request_id: req_ba026095394aadcd39b29c1bb1ce6973\n", - "2025-03-21 00:21:31,372 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:31,381 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:31,385 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:31,389 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:31,398 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:31,401 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:31,408 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:31,410 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 6: [Why don't scientists trust atoms? Because they make up everything!]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:31,944 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:32 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'444'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_d8bd19c90a2b876b62adaad8f0a58b70'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb16588705c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:31,945 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:31,947 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:31,951 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:31,952 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:31,953 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:31,955 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:32 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '444', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_d8bd19c90a2b876b62adaad8f0a58b70', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb16588705c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:31,956 [DEBUG] openai._base_client: request_id: req_d8bd19c90a2b876b62adaad8f0a58b70\n", - "2025-03-21 00:21:31,958 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:31,965 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:31,970 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:31,973 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:31,976 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:31,978 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:31,981 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:31,986 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 7: [Why don't scientists trust atoms?\n", + "Joke 0: [Why don't scientists trust atoms?\n", "\n", - "Because they make up everything!]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:32,559 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:32 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'519'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_1aea1c3c615b3eb8ed8e75303a68f56a'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb1692b0b5c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:32,561 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:32,564 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:32,568 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:32,570 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:32,573 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:32,573 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:32 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '519', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_1aea1c3c615b3eb8ed8e75303a68f56a', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb1692b0b5c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:32,575 [DEBUG] openai._base_client: request_id: req_1aea1c3c615b3eb8ed8e75303a68f56a\n", - "2025-03-21 00:21:32,580 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:21:32,595 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Tell me a funny joke. Keep it concise.'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:21:32,598 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:21:32,601 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:21:32,604 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:21:32,604 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:21:32,607 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:21:32,608 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 8: [Why did the scarecrow win an award? Because he was outstanding in his field!]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:21:33,085 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:21:33 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'424'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_62bf812cc022bb70e422a7dc80b83749'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb16d1dac5c1d-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:21:33,086 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:21:33,088 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:21:33,091 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:21:33,092 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:21:33,093 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:21:33,094 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:21:33 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '424', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_62bf812cc022bb70e422a7dc80b83749', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb16d1dac5c1d-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:21:33,095 [DEBUG] openai._base_client: request_id: req_62bf812cc022bb70e422a7dc80b83749\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 9: [Why don't scientists trust atoms?\n", + "Because they make up everything!]\n", + "Joke 1: [Why don't skeletons fight each other? They don't have the guts!]\n", + "Joke 2: [Why don't skeletons fight each other?\n", + "\n", + "They don't have the guts.]\n", + "Joke 3: [Why don't scientists trust atoms?\n", "\n", "Because they make up everything!]\n", + "Joke 4: [Why don't skeletons fight each other? They don't have the guts.]\n", + "Joke 5: [Why don't skeletons fight each other? They don't have the guts.]\n", + "Joke 6: [Why don't skeletons fight each other? They don't have the guts!]\n", + "Joke 7: [Why don’t skeletons fight each other? They don’t have the guts.]\n", + "Joke 8: [Why don't skeletons fight each other? They don't have the guts.]\n", + "Joke 9: [Why don't skeletons fight each other? They don't have the guts.]\n", "-----\n", - "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=2.919, metadata={'responses': [\"Why don't skeletons fight each other? They don't have the guts!\", \"Why don't skeletons fight each other? They don't have the guts.\", 'Why did the scarecrow win an award? Because he was outstanding in his field!', 'Why don’t scientists trust atoms? Because they make up everything!', \"Why don't skeletons fight each other? They don't have the guts!\", \"Why don't skeletons fight each other? \\nThey don't have the guts.\", \"Why don't scientists trust atoms? Because they make up everything!\", \"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", 'Why did the scarecrow win an award? Because he was outstanding in his field!', \"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\"]})\n", - "SimilarityScore between ind1=0 and ind2=1: 0.9998557731781514\n", - "SimilarityScore between ind1=1 and ind2=2: 0.8516952804862096\n", - "SimilarityScore between ind1=2 and ind2=3: 0.12224781375093245\n", - "SimilarityScore between ind1=3 and ind2=4: 0.13899372940048665\n", - "SimilarityScore between ind1=4 and ind2=5: 0.930451468399891\n", - "SimilarityScore between ind1=5 and ind2=6: 0.9066215700385928\n", - "SimilarityScore between ind1=6 and ind2=7: 0.9524292508952135\n", - "SimilarityScore between ind1=7 and ind2=8: 0.8506419386731088\n", - "SimilarityScore between ind1=8 and ind2=9: 0.8506419386731088\n", - "SimilarityScore between ind1=9 and ind2=0: 0.8843819811752456\n", - "Avg cosine similarity: 0.748796074467094\n", - "diversity cosine-sim inverse: 0.25120392553290605\n", - "edit-dist score: 0.4794\n", + "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=3.45, metadata={'responses': [\"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", \"Why don't skeletons fight each other? They don't have the guts!\", \"Why don't skeletons fight each other?\\n\\nThey don't have the guts.\", \"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", \"Why don't skeletons fight each other? They don't have the guts.\", \"Why don't skeletons fight each other? They don't have the guts.\", \"Why don't skeletons fight each other? They don't have the guts!\", 'Why don’t skeletons fight each other? They don’t have the guts.', \"Why don't skeletons fight each other? They don't have the guts.\", \"Why don't skeletons fight each other? They don't have the guts.\"]})\n", + "SimilarityScore between ind1=0 and ind2=1: 0.8843819811752456\n", + "SimilarityScore between ind1=1 and ind2=2: 0.9364324737968321\n", + "SimilarityScore between ind1=2 and ind2=3: 0.8847772710380415\n", + "SimilarityScore between ind1=3 and ind2=4: 0.8857931367895382\n", + "SimilarityScore between ind1=4 and ind2=5: 1.0\n", + "SimilarityScore between ind1=5 and ind2=6: 0.9998557731781514\n", + "SimilarityScore between ind1=6 and ind2=7: 0.1373464430793195\n", + "SimilarityScore between ind1=7 and ind2=8: 0.13729294236742365\n", + "SimilarityScore between ind1=8 and ind2=9: 1.0\n", + "SimilarityScore between ind1=9 and ind2=0: 0.8857931367895382\n", + "Avg cosine similarity: 0.775167315821409\n", + "diversity cosine-sim inverse: 0.22483268417859104\n", + "edit-dist score: 0.2422\n", "-------------------------------\n", - "possible diversity score: 1.2165215583179603\n" + "possible diversity score: 0.18786989150445282\n" ] } ], @@ -2429,62 +1715,40 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 98, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:20:51,760 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:20:51,776 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"Tell me 10 jokes. make them split with '||'. Don't say anything else besides the joke. \"}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:20:51,780 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:20:51,786 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:20:51,790 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:20:51,792 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:20:51,798 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:20:51,800 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:20:53,925 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:20:54 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'2074'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999976'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_fc11eea25331eac4ebdbdc434053f357'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923bb06dfcffd001-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:20:53,928 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:20:53,930 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:20:53,934 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:20:53,935 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:20:53,936 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:20:53,938 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:20:54 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '2074', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999976', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_fc11eea25331eac4ebdbdc434053f357', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923bb06dfcffd001-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:20:53,940 [DEBUG] openai._base_client: request_id: req_fc11eea25331eac4ebdbdc434053f357\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ - "Joke 0: [Why did the scarecrow win an award? Because he was outstanding in his field! ]\n", + "Joke 0: [Why don’t scientists trust atoms? Because they make up everything! ]\n", "Joke 1: [ Parallel lines have so much in common. It’s a shame they’ll never meet. ]\n", - "Joke 2: [ Why don’t skeletons fight each other? They don’t have the guts. ]\n", - "Joke 3: [ What do you call fake spaghetti? An impasta! ]\n", - "Joke 4: [ I would tell you a construction joke, but I'm still working on it. ]\n", - "Joke 5: [ Why couldn't the bicycle stand up by itself? It was two tired. ]\n", - "Joke 6: [ Why did the tomato turn red? Because it saw the salad dressing! ]\n", - "Joke 7: [ What did the ocean say to the beach? Nothing, it just waved. ]\n", - "Joke 8: [ Why did the math book look sad? Because it had too many problems. ]\n", - "Joke 9: [ I told my computer I needed a break, and now it won't stop sending me kit-kat ads!]\n", + "Joke 2: [ Why did the scarecrow win an award? Because he was outstanding in his field! ]\n", + "Joke 3: [ I told my wife she was drawing her eyebrows too high. She looked surprised. ]\n", + "Joke 4: [ Why don’t skeletons fight each other? They don’t have the guts. ]\n", + "Joke 5: [ What do you call fake spaghetti? An impasta! ]\n", + "Joke 6: [ What’s brown and sticky? A stick! ]\n", + "Joke 7: [ Why was the math book sad? It had too many problems. ]\n", + "Joke 8: [ Can February March? No, but April May! ]\n", + "Joke 9: [ Why was the musician arrested? She got in treble.]\n", "-----\n", - "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.513, metadata={'responses': ['Why did the scarecrow win an award? Because he was outstanding in his field! ', ' Parallel lines have so much in common. It’s a shame they’ll never meet. ', ' Why don’t skeletons fight each other? They don’t have the guts. ', ' What do you call fake spaghetti? An impasta! ', \" I would tell you a construction joke, but I'm still working on it. \", \" Why couldn't the bicycle stand up by itself? It was two tired. \", ' Why did the tomato turn red? Because it saw the salad dressing! ', ' What did the ocean say to the beach? Nothing, it just waved. ', ' Why did the math book look sad? Because it had too many problems. ', \" I told my computer I needed a break, and now it won't stop sending me kit-kat ads!\"]})\n", - "SimilarityScore between ind1=0 and ind2=1: 0.23585869748408375\n", - "SimilarityScore between ind1=1 and ind2=2: 0.030690112807883127\n", - "SimilarityScore between ind1=2 and ind2=3: 0.08483849065288684\n", - "SimilarityScore between ind1=3 and ind2=4: 0.750109080659053\n", - "SimilarityScore between ind1=4 and ind2=5: 0.894508987836746\n", - "SimilarityScore between ind1=5 and ind2=6: 0.9046943831161538\n", - "SimilarityScore between ind1=6 and ind2=7: 0.8888453958820549\n", - "SimilarityScore between ind1=7 and ind2=8: 0.8324994273641826\n", - "SimilarityScore between ind1=8 and ind2=9: 0.7777342312519292\n", - "SimilarityScore between ind1=9 and ind2=0: 0.8589345591870938\n", - "Avg cosine similarity: 0.6258713366242067\n", - "diversity cosine-sim inverse: 0.3741286633757933\n", - "edit-dist score: 0.7251\n", + "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.467, metadata={'responses': ['Why don’t scientists trust atoms? Because they make up everything! ', ' Parallel lines have so much in common. It’s a shame they’ll never meet. ', ' Why did the scarecrow win an award? Because he was outstanding in his field! ', ' I told my wife she was drawing her eyebrows too high. She looked surprised. ', ' Why don’t skeletons fight each other? They don’t have the guts. ', ' What do you call fake spaghetti? An impasta! ', ' What’s brown and sticky? A stick! ', ' Why was the math book sad? It had too many problems. ', ' Can February March? No, but April May! ', ' Why was the musician arrested? She got in treble.']})\n", + "SimilarityScore between ind1=0 and ind2=1: 0.03290772299379834\n", + "SimilarityScore between ind1=1 and ind2=2: 0.2362752728324614\n", + "SimilarityScore between ind1=2 and ind2=3: 0.8911484218391627\n", + "SimilarityScore between ind1=3 and ind2=4: 0.23763559138432722\n", + "SimilarityScore between ind1=4 and ind2=5: 0.08483849065288684\n", + "SimilarityScore between ind1=5 and ind2=6: 0.10357981441477468\n", + "SimilarityScore between ind1=6 and ind2=7: 0.21758838510790685\n", + "SimilarityScore between ind1=7 and ind2=8: 0.740438790870045\n", + "SimilarityScore between ind1=8 and ind2=9: 0.7932308818518455\n", + "SimilarityScore between ind1=9 and ind2=0: 0.24304717086492292\n", + "Avg cosine similarity: 0.35806905428121316\n", + "diversity cosine-sim inverse: 0.6419309457187868\n", + "edit-dist score: 0.7335\n", "-------------------------------\n", - "possible diversity score: 0.8707337636418844\n" + "possible diversity score: 0.6907191750125549\n" ] } ], From 23b79a3d6c8f2b9e0d6caf09e28be2203053aaeb Mon Sep 17 00:00:00 2001 From: connorchow Date: Mon, 24 Mar 2025 10:15:53 -0700 Subject: [PATCH 03/14] cleaning up full testbench --- .../model/examples/diversity_testbench.ipynb | 2 +- src/ember/examples/diversity_testbench.ipynb | 1260 ++++------------- 2 files changed, 285 insertions(+), 977 deletions(-) diff --git a/src/ember/core/registry/model/examples/diversity_testbench.ipynb b/src/ember/core/registry/model/examples/diversity_testbench.ipynb index 6e7a12da..e2f93d6f 100644 --- a/src/ember/core/registry/model/examples/diversity_testbench.ipynb +++ b/src/ember/core/registry/model/examples/diversity_testbench.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Diversity Testbench\n", + "# Diversity Testbench (duplicate?)\n", "\n", "---\n", "---" diff --git a/src/ember/examples/diversity_testbench.ipynb b/src/ember/examples/diversity_testbench.ipynb index f42c6407..9cfbfc03 100644 --- a/src/ember/examples/diversity_testbench.ipynb +++ b/src/ember/examples/diversity_testbench.ipynb @@ -14,7 +14,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Ember Package Testing (WIP)" + "## Ember Package" ] }, { @@ -24,23 +24,90 @@ "### Setup Dependencies" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "NOTE: things below this are to install required dependencies (do this in the virtual env)" + ] + }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "import logging, sys, os\n", - "from typing import Dict, Any, List" + "# %pip install -q -e .\n", + "# %pip install -q google-generativeai==0.7.2\n", + "\n", + "# embedding model dependencies\n", + "# %pip install -q openai\n", + "\n", + "# compression ratio dependencies\n", + "%pip install -q diversity==0.2.0\n", + "%pip install -q spacy==3.8.4\n", + "\n", + "# edit distance\n", + "%pip install -q python-Levenshtein\n", + "\n", + "# ensemble example\n", + "%pip install -q matplotlib" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "openai_key = os.getenv(\"OPENAI_API_KEY\")" + "# basic imports & dependencies\n", + "from __future__ import annotations\n", + "\n", + "import logging, sys, os, math, re, subprocess\n", + "from typing import Dict, Any, List, Protocol, TypeVar, Optional, Generic, Callable, Union\n", + "from abc import ABC, abstractmethod\n", + "\n", + "from diversity import compression_ratio\n", + "import Levenshtein\n", + "from dataclasses import dataclass\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "\n", + "# ember repo loads\n", + "from ember.core.registry.model.config.settings import initialize_registry\n", + "from ember.core.registry.model.base.services.model_service import ModelService\n", + "from ember.core.registry.model.base.schemas.model_info import ModelInfo\n", + "from ember.core.registry.model.base.schemas.cost import ModelCost, RateLimit\n", + "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", + "\n", + "from ember.core.registry.model import load_model, ChatResponse\n", + "from ember.core.registry.model.base.services.model_service import ModelService\n", + "\n", + "from ember.core.utils.eval.base_evaluator import IEvaluator, EvaluationResult\n", + "from ember.core.utils.eval.extractors import RegexExtractor" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "openai_key = os.getenv(\"OPENAI_API_KEY\")\n", + "\n", + "# Set global logging level to ERROR\n", + "logging.basicConfig(level=logging.ERROR)\n", + "\n", + "os.environ[\"EMBER_LOGGING_LEVEL\"] = \"ERROR\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "mounting to root directory of ember" ] }, { @@ -85,514 +152,9 @@ "!echo $PWD" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "NOTE: things below this are to install required dependencies (only do this the venv)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "# %pip install -q -e .\n", - "# %pip install -q google-generativeai==0.7.2" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Ember Repo Loads (WIP)" - ] - }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 02:00:21,378 [DEBUG] ConfigManager: Loading configuration...\n", - "2025-03-21 02:00:21,379 [DEBUG] ConfigManager: Configuration loaded successfully\n", - "2025-03-21 02:00:21,379 [INFO] ember.core.registry.model.initialization: Execute model discovery (timeout: 30 seconds per provider, running in parallel)\n", - "2025-03-21 02:00:22,003 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", - "2025-03-21 02:00:22,005 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", - "2025-03-21 02:00:22,013 [DEBUG] ember.core.registry.model.base.registry.discovery: OPENAI_API_KEY found, initialized OpenAIDiscovery successfully\n", - "2025-03-21 02:00:22,014 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", - "2025-03-21 02:00:22,015 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", - "2025-03-21 02:00:22,022 [DEBUG] ember.core.registry.model.base.registry.discovery: ANTHROPIC_API_KEY found, initialized AnthropicDiscovery successfully\n", - "2025-03-21 02:00:22,023 [DEBUG] ember.core.registry.model.base.registry.discovery: GOOGLE_API_KEY found, initialized DeepmindDiscovery successfully\n", - "2025-03-21 02:00:22,024 [INFO] ember.core.registry.model.initialization: Initiating model discovery via ModelDiscoveryService\n", - "2025-03-21 02:00:22,027 [DEBUG] openai._base_client: Request options: {'method': 'get', 'url': '/models', 'post_parser': ._parser at 0x7fd791a30220>, 'json_data': None}\n", - "2025-03-21 02:00:22,029 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Starting Anthropic model fetch via REST API...\n", - "2025-03-21 02:00:22,032 [DEBUG] openai._base_client: Sending HTTP Request: GET https://api.openai.com/v1/models\n", - "2025-03-21 02:00:22,032 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Calling Anthropic REST API: https://api.anthropic.com/v1/models with timeout=(2,5)\n", - "2025-03-21 02:00:22,033 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=5.0 socket_options=None\n", - "2025-03-21 02:00:22,037 [DEBUG] urllib3.connectionpool: Starting new HTTPS connection (1): api.anthropic.com:443\n", - "2025-03-21 02:00:22,065 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 02:00:22,067 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=5.0\n", - "2025-03-21 02:00:22,127 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 02:00:22,130 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 02:00:22,131 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 02:00:22,133 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 02:00:22,134 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 02:00:22,134 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 02:00:22,417 [DEBUG] urllib3.connectionpool: https://api.anthropic.com:443 \"GET /v1/models HTTP/1.1\" 401 86\n", - "2025-03-21 02:00:22,419 [ERROR] ember.core.registry.model.providers.anthropic.anthropic_discovery: Error fetching Anthropic models via REST API: 401 Client Error: Unauthorized for url: https://api.anthropic.com/v1/models\n", - "2025-03-21 02:00:22,420 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Using fallback models due to API request error\n", - "2025-03-21 02:00:22,421 [INFO] ember.core.registry.model.base.registry.discovery: Provider AnthropicDiscovery completed in 0.39s\n", - "2025-03-21 02:00:22,529 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 09:00:22 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'openai-version', b'2020-10-01'), (b'x-request-id', b'edeac53a55ded28a21bbef63a67b7187'), (b'openai-processing-ms', b'318'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=9oqKjTNazifaL7c8EB7goelYAka5BwxIcA8Zdpk3olw-1742547622-1.0.1.1-5Vdcdtx1vgOSNwpdHNHX064wrD84MoXWK60t5tnMCx.G5WMmpH8lXlQS4lUmO1RIBNVIfDOed7xbsshEXao9UfK2TQdWuq5k4WitIqV8xkQ; path=/; expires=Fri, 21-Mar-25 09:30:22 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=0zdaLCIygVBxhND6uISxrfeO1Q6pVOqcGGiPGaWdFUo-1742547622791-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c42302ee7f9d8-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 02:00:22,530 [INFO] httpx: HTTP Request: GET https://api.openai.com/v1/models \"HTTP/1.1 200 OK\"\n", - "2025-03-21 02:00:22,532 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 02:00:22,534 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 02:00:22,535 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 02:00:22,535 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 02:00:22,536 [DEBUG] openai._base_client: HTTP Response: GET https://api.openai.com/v1/models \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 09:00:22 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('openai-version', '2020-10-01'), ('x-request-id', 'edeac53a55ded28a21bbef63a67b7187'), ('openai-processing-ms', '318'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=9oqKjTNazifaL7c8EB7goelYAka5BwxIcA8Zdpk3olw-1742547622-1.0.1.1-5Vdcdtx1vgOSNwpdHNHX064wrD84MoXWK60t5tnMCx.G5WMmpH8lXlQS4lUmO1RIBNVIfDOed7xbsshEXao9UfK2TQdWuq5k4WitIqV8xkQ; path=/; expires=Fri, 21-Mar-25 09:30:22 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=0zdaLCIygVBxhND6uISxrfeO1Q6pVOqcGGiPGaWdFUo-1742547622791-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923c42302ee7f9d8-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", - "2025-03-21 02:00:22,537 [DEBUG] openai._base_client: request_id: edeac53a55ded28a21bbef63a67b7187\n", - "2025-03-21 02:00:22,541 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Fetched 65 models from OpenAI API\n", - "2025-03-21 02:00:22,542 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Filtered to 43 relevant models\n", - "2025-03-21 02:00:22,543 [INFO] ember.core.registry.model.base.registry.discovery: Provider OpenAIDiscovery completed in 0.52s\n", - "2025-03-21 02:00:22,545 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 43 models from OpenAIDiscovery\n", - "2025-03-21 02:00:22,547 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 5 models from AnthropicDiscovery\n", - "/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n", - "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", - "I0000 00:00:1742547622.658771 400399 check_gcp_environment.cc:61] BIOS data file does not exist or cannot be opened.\n", - "2025-03-21 02:00:22,816 [INFO] ember.core.registry.model.base.registry.discovery: Provider DeepmindDiscovery completed in 0.79s\n", - "2025-03-21 02:00:22,817 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 32 models from DeepmindDiscovery\n", - "2025-03-21 02:00:22,818 [INFO] ember.core.registry.model.base.registry.discovery: Discovered 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4o-mini', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 02:00:22,819 [DEBUG] ember.core.registry.model.initialization: Raw discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4o-mini', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 02:00:22,820 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-transcribe discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,821 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-transcribe\n", - "2025-03-21 02:00:22,822 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,822 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-12-17\n", - "2025-03-21 02:00:22,822 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-3 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,823 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-3\n", - "2025-03-21 02:00:22,823 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-2 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,823 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-2\n", - "2025-03-21 02:00:22,824 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,824 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-10-01\n", - "2025-03-21 02:00:22,825 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,825 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-10-01\n", - "2025-03-21 02:00:22,825 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,826 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview\n", - "2025-03-21 02:00:22,826 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-large discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,826 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-large\n", - "2025-03-21 02:00:22,827 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,827 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4\n", - "2025-03-21 02:00:22,828 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-05-13 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,829 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-05-13\n", - "2025-03-21 02:00:22,830 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,832 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview\n", - "2025-03-21 02:00:22,832 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,833 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview\n", - "2025-03-21 02:00:22,833 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct-0914 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,833 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct-0914\n", - "2025-03-21 02:00:22,834 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,835 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview\n", - "2025-03-21 02:00:22,835 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-1106 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,836 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-1106\n", - "2025-03-21 02:00:22,837 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,837 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview\n", - "2025-03-21 02:00:22,837 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,838 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo\n", - "2025-03-21 02:00:22,838 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,839 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-12-17\n", - "2025-03-21 02:00:22,839 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,840 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct\n", - "2025-03-21 02:00:22,841 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,841 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo\n", - "2025-03-21 02:00:22,841 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,842 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-preview\n", - "2025-03-21 02:00:22,842 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,843 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview-2025-03-11\n", - "2025-03-21 02:00:22,844 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,845 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview\n", - "2025-03-21 02:00:22,845 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-2024-07-18 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,846 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-2024-07-18\n", - "2025-03-21 02:00:22,846 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-0125 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,847 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-0125\n", - "2025-03-21 02:00:22,850 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-08-06 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,851 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-08-06\n", - "2025-03-21 02:00:22,851 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-2024-04-09 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,852 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-2024-04-09\n", - "2025-03-21 02:00:22,853 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-16k discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,853 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-16k\n", - "2025-03-21 02:00:22,854 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,855 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o\n", - "2025-03-21 02:00:22,855 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,856 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview-2024-12-17\n", - "2025-03-21 02:00:22,857 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,857 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini\n", - "2025-03-21 02:00:22,858 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-1106-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,858 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-1106-preview\n", - "2025-03-21 02:00:22,859 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-ada-002 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,859 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-ada-002\n", - "2025-03-21 02:00:22,860 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0613 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,860 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0613\n", - "2025-03-21 02:00:22,861 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,861 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview\n", - "2025-03-21 02:00:22,861 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview-2025-02-27 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,862 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview-2025-02-27\n", - "2025-03-21 02:00:22,862 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,863 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview-2025-03-11\n", - "2025-03-21 02:00:22,863 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-11-20 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,864 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-11-20\n", - "2025-03-21 02:00:22,864 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-tts discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,865 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-tts\n", - "2025-03-21 02:00:22,865 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0125-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,866 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0125-preview\n", - "2025-03-21 02:00:22,866 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-transcribe discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,869 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-transcribe\n", - "2025-03-21 02:00:22,870 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-small discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,870 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-small\n", - "2025-03-21 02:00:22,871 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,872 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview-2024-12-17\n", - "2025-03-21 02:00:22,873 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-sonnet discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,873 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-sonnet\n", - "2025-03-21 02:00:22,874 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-opus discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,874 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-opus\n", - "2025-03-21 02:00:22,874 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-haiku discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,875 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-haiku\n", - "2025-03-21 02:00:22,875 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.5-sonnet discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,876 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.5-sonnet\n", - "2025-03-21 02:00:22,877 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.7-sonnet discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,877 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.7-sonnet\n", - "2025-03-21 02:00:22,878 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.0-pro-vision-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,878 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.0-pro-vision-latest\n", - "2025-03-21 02:00:22,879 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-pro-vision discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,879 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-pro-vision\n", - "2025-03-21 02:00:22,879 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,880 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-latest\n", - "2025-03-21 02:00:22,880 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,881 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-001\n", - "2025-03-21 02:00:22,881 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-002 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,882 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-002\n", - "2025-03-21 02:00:22,884 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,884 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro\n", - "2025-03-21 02:00:22,885 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,885 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-latest\n", - "2025-03-21 02:00:22,885 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,886 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001\n", - "2025-03-21 02:00:22,887 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001-tuning discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,887 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001-tuning\n", - "2025-03-21 02:00:22,888 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,889 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash\n", - "2025-03-21 02:00:22,889 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-002 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,890 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-002\n", - "2025-03-21 02:00:22,891 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,891 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b\n", - "2025-03-21 02:00:22,892 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,893 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-001\n", - "2025-03-21 02:00:22,893 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,895 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-latest\n", - "2025-03-21 02:00:22,896 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0827 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,896 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0827\n", - "2025-03-21 02:00:22,897 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0924 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,897 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0924\n", - "2025-03-21 02:00:22,898 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,899 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp\n", - "2025-03-21 02:00:22,899 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,900 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash\n", - "2025-03-21 02:00:22,900 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,901 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-001\n", - "2025-03-21 02:00:22,901 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp-image-generation discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,902 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp-image-generation\n", - "2025-03-21 02:00:22,902 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,903 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-001\n", - "2025-03-21 02:00:22,903 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,904 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite\n", - "2025-03-21 02:00:22,904 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview-02-05 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,906 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview-02-05\n", - "2025-03-21 02:00:22,907 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,908 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview\n", - "2025-03-21 02:00:22,909 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,909 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp\n", - "2025-03-21 02:00:22,910 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp-02-05 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,910 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp-02-05\n", - "2025-03-21 02:00:22,911 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-exp-1206 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,914 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-exp-1206\n", - "2025-03-21 02:00:22,915 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-01-21 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,916 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-01-21\n", - "2025-03-21 02:00:22,916 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,917 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp\n", - "2025-03-21 02:00:22,917 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-1219 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,918 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-1219\n", - "2025-03-21 02:00:22,918 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/learnlm-1.5-pro-experimental discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,919 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/learnlm-1.5-pro-experimental\n", - "2025-03-21 02:00:22,919 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemma-3-27b-it discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 02:00:22,920 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemma-3-27b-it\n", - "2025-03-21 02:00:22,920 [DEBUG] ember.core.registry.model.initialization: Merged discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4o-mini', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 02:00:22,921 [INFO] ember.core.registry.model.initialization: Registering 80 models from discovery\n", - "2025-03-21 02:00:22,921 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-transcribe (provider: Openai)\n", - "2025-03-21 02:00:22,922 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", - "2025-03-21 02:00:22,923 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", - "2025-03-21 02:00:22,923 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 02:00:22,925 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 02:00:22,925 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 02:00:22,926 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-3 (provider: Openai)\n", - "2025-03-21 02:00:22,927 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", - "2025-03-21 02:00:22,927 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", - "2025-03-21 02:00:22,928 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-2 (provider: Openai)\n", - "2025-03-21 02:00:22,929 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", - "2025-03-21 02:00:22,930 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", - "2025-03-21 02:00:22,930 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-10-01 (provider: Openai)\n", - "2025-03-21 02:00:22,931 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", - "2025-03-21 02:00:22,932 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", - "2025-03-21 02:00:22,933 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-10-01 (provider: Openai)\n", - "2025-03-21 02:00:22,933 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", - "2025-03-21 02:00:22,934 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", - "2025-03-21 02:00:22,935 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview (provider: Openai)\n", - "2025-03-21 02:00:22,936 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", - "2025-03-21 02:00:22,937 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", - "2025-03-21 02:00:22,938 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-large (provider: Openai)\n", - "2025-03-21 02:00:22,939 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", - "2025-03-21 02:00:22,939 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", - "2025-03-21 02:00:22,940 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4 (provider: Openai)\n", - "2025-03-21 02:00:22,940 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", - "2025-03-21 02:00:22,941 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", - "2025-03-21 02:00:22,942 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-05-13 (provider: Openai)\n", - "2025-03-21 02:00:22,942 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", - "2025-03-21 02:00:22,944 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", - "2025-03-21 02:00:22,944 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview (provider: Openai)\n", - "2025-03-21 02:00:22,945 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", - "2025-03-21 02:00:22,945 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", - "2025-03-21 02:00:22,946 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview (provider: Openai)\n", - "2025-03-21 02:00:22,946 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", - "2025-03-21 02:00:22,948 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", - "2025-03-21 02:00:22,948 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct-0914 (provider: Openai)\n", - "2025-03-21 02:00:22,949 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", - "2025-03-21 02:00:22,950 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", - "2025-03-21 02:00:22,950 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview (provider: Openai)\n", - "2025-03-21 02:00:22,951 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", - "2025-03-21 02:00:22,951 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", - "2025-03-21 02:00:22,952 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-1106 (provider: Openai)\n", - "2025-03-21 02:00:22,953 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", - "2025-03-21 02:00:22,954 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", - "2025-03-21 02:00:22,954 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview (provider: Openai)\n", - "2025-03-21 02:00:22,955 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", - "2025-03-21 02:00:22,955 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", - "2025-03-21 02:00:22,956 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo (provider: Openai)\n", - "2025-03-21 02:00:22,956 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", - "2025-03-21 02:00:22,957 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", - "2025-03-21 02:00:22,958 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 02:00:22,959 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 02:00:22,960 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 02:00:22,961 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct (provider: Openai)\n", - "2025-03-21 02:00:22,961 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", - "2025-03-21 02:00:22,962 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", - "2025-03-21 02:00:22,962 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo (provider: Openai)\n", - "2025-03-21 02:00:22,962 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", - "2025-03-21 02:00:22,963 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", - "2025-03-21 02:00:22,963 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-preview (provider: Openai)\n", - "2025-03-21 02:00:22,964 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", - "2025-03-21 02:00:22,964 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", - "2025-03-21 02:00:22,965 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview-2025-03-11 (provider: Openai)\n", - "2025-03-21 02:00:22,965 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 02:00:22,965 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 02:00:22,966 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview (provider: Openai)\n", - "2025-03-21 02:00:22,966 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", - "2025-03-21 02:00:22,967 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", - "2025-03-21 02:00:22,967 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-2024-07-18 (provider: Openai)\n", - "2025-03-21 02:00:22,968 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", - "2025-03-21 02:00:22,968 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", - "2025-03-21 02:00:22,969 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-0125 (provider: Openai)\n", - "2025-03-21 02:00:22,972 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", - "2025-03-21 02:00:22,972 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", - "2025-03-21 02:00:22,973 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-08-06 (provider: Openai)\n", - "2025-03-21 02:00:22,974 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", - "2025-03-21 02:00:22,974 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", - "2025-03-21 02:00:22,975 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-2024-04-09 (provider: Openai)\n", - "2025-03-21 02:00:22,975 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", - "2025-03-21 02:00:22,976 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", - "2025-03-21 02:00:22,976 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-16k (provider: Openai)\n", - "2025-03-21 02:00:22,977 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", - "2025-03-21 02:00:22,977 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", - "2025-03-21 02:00:22,978 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o (provider: Openai)\n", - "2025-03-21 02:00:22,978 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", - "2025-03-21 02:00:22,979 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", - "2025-03-21 02:00:22,979 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 02:00:22,980 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 02:00:22,981 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 02:00:22,984 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini (provider: Openai)\n", - "2025-03-21 02:00:22,985 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", - "2025-03-21 02:00:22,986 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", - "2025-03-21 02:00:22,987 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-1106-preview (provider: Openai)\n", - "2025-03-21 02:00:22,988 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", - "2025-03-21 02:00:22,988 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", - "2025-03-21 02:00:22,989 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-ada-002 (provider: Openai)\n", - "2025-03-21 02:00:22,989 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", - "2025-03-21 02:00:22,990 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", - "2025-03-21 02:00:22,991 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0613 (provider: Openai)\n", - "2025-03-21 02:00:22,992 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", - "2025-03-21 02:00:22,993 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", - "2025-03-21 02:00:22,996 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview (provider: Openai)\n", - "2025-03-21 02:00:22,997 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", - "2025-03-21 02:00:22,997 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", - "2025-03-21 02:00:22,998 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview-2025-02-27 (provider: Openai)\n", - "2025-03-21 02:00:22,999 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", - "2025-03-21 02:00:23,002 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", - "2025-03-21 02:00:23,002 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview-2025-03-11 (provider: Openai)\n", - "2025-03-21 02:00:23,003 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 02:00:23,004 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 02:00:23,004 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-11-20 (provider: Openai)\n", - "2025-03-21 02:00:23,005 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", - "2025-03-21 02:00:23,005 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", - "2025-03-21 02:00:23,006 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-tts (provider: Openai)\n", - "2025-03-21 02:00:23,007 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", - "2025-03-21 02:00:23,007 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", - "2025-03-21 02:00:23,008 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0125-preview (provider: Openai)\n", - "2025-03-21 02:00:23,008 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", - "2025-03-21 02:00:23,009 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", - "2025-03-21 02:00:23,009 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-transcribe (provider: Openai)\n", - "2025-03-21 02:00:23,010 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", - "2025-03-21 02:00:23,010 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", - "2025-03-21 02:00:23,014 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-small (provider: Openai)\n", - "2025-03-21 02:00:23,014 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", - "2025-03-21 02:00:23,015 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", - "2025-03-21 02:00:23,015 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 02:00:23,016 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 02:00:23,016 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 02:00:23,017 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-sonnet (provider: Anthropic)\n", - "2025-03-21 02:00:23,017 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", - "2025-03-21 02:00:23,018 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", - "2025-03-21 02:00:23,019 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-opus (provider: Anthropic)\n", - "2025-03-21 02:00:23,019 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", - "2025-03-21 02:00:23,020 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", - "2025-03-21 02:00:23,020 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-haiku (provider: Anthropic)\n", - "2025-03-21 02:00:23,021 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", - "2025-03-21 02:00:23,021 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", - "2025-03-21 02:00:23,022 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.5-sonnet (provider: Anthropic)\n", - "2025-03-21 02:00:23,022 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", - "2025-03-21 02:00:23,023 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", - "2025-03-21 02:00:23,023 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.7-sonnet (provider: Anthropic)\n", - "2025-03-21 02:00:23,023 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", - "2025-03-21 02:00:23,025 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", - "2025-03-21 02:00:23,025 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.0-pro-vision-latest (provider: Google)\n", - "2025-03-21 02:00:23,026 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", - "2025-03-21 02:00:23,026 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", - "2025-03-21 02:00:23,027 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-pro-vision (provider: Google)\n", - "2025-03-21 02:00:23,027 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", - "2025-03-21 02:00:23,028 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", - "2025-03-21 02:00:23,029 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-latest (provider: Google)\n", - "2025-03-21 02:00:23,031 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", - "2025-03-21 02:00:23,034 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", - "2025-03-21 02:00:23,034 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-001 (provider: Google)\n", - "2025-03-21 02:00:23,035 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", - "2025-03-21 02:00:23,036 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", - "2025-03-21 02:00:23,037 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-002 (provider: Google)\n", - "2025-03-21 02:00:23,037 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", - "2025-03-21 02:00:23,038 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", - "2025-03-21 02:00:23,039 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro (provider: Google)\n", - "2025-03-21 02:00:23,039 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", - "2025-03-21 02:00:23,047 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", - "2025-03-21 02:00:23,048 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-latest (provider: Google)\n", - "2025-03-21 02:00:23,049 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", - "2025-03-21 02:00:23,050 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", - "2025-03-21 02:00:23,051 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001 (provider: Google)\n", - "2025-03-21 02:00:23,051 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", - "2025-03-21 02:00:23,052 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", - "2025-03-21 02:00:23,053 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001-tuning (provider: Google)\n", - "2025-03-21 02:00:23,054 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", - "2025-03-21 02:00:23,054 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", - "2025-03-21 02:00:23,055 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash (provider: Google)\n", - "2025-03-21 02:00:23,056 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", - "2025-03-21 02:00:23,057 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", - "2025-03-21 02:00:23,057 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-002 (provider: Google)\n", - "2025-03-21 02:00:23,058 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", - "2025-03-21 02:00:23,059 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", - "2025-03-21 02:00:23,060 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b (provider: Google)\n", - "2025-03-21 02:00:23,060 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", - "2025-03-21 02:00:23,061 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", - "2025-03-21 02:00:23,070 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-001 (provider: Google)\n", - "2025-03-21 02:00:23,071 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", - "2025-03-21 02:00:23,072 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", - "2025-03-21 02:00:23,073 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-latest (provider: Google)\n", - "2025-03-21 02:00:23,073 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", - "2025-03-21 02:00:23,074 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", - "2025-03-21 02:00:23,075 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0827 (provider: Google)\n", - "2025-03-21 02:00:23,076 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", - "2025-03-21 02:00:23,077 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", - "2025-03-21 02:00:23,078 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0924 (provider: Google)\n", - "2025-03-21 02:00:23,086 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", - "2025-03-21 02:00:23,087 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", - "2025-03-21 02:00:23,088 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp (provider: Google)\n", - "2025-03-21 02:00:23,089 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", - "2025-03-21 02:00:23,090 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", - "2025-03-21 02:00:23,091 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash (provider: Google)\n", - "2025-03-21 02:00:23,092 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", - "2025-03-21 02:00:23,092 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", - "2025-03-21 02:00:23,093 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-001 (provider: Google)\n", - "2025-03-21 02:00:23,096 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", - "2025-03-21 02:00:23,097 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", - "2025-03-21 02:00:23,098 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp-image-generation (provider: Google)\n", - "2025-03-21 02:00:23,099 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", - "2025-03-21 02:00:23,100 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", - "2025-03-21 02:00:23,100 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-001 (provider: Google)\n", - "2025-03-21 02:00:23,101 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", - "2025-03-21 02:00:23,102 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", - "2025-03-21 02:00:23,103 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite (provider: Google)\n", - "2025-03-21 02:00:23,103 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", - "2025-03-21 02:00:23,104 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", - "2025-03-21 02:00:23,104 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview-02-05 (provider: Google)\n", - "2025-03-21 02:00:23,104 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", - "2025-03-21 02:00:23,104 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", - "2025-03-21 02:00:23,105 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview (provider: Google)\n", - "2025-03-21 02:00:23,105 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", - "2025-03-21 02:00:23,106 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", - "2025-03-21 02:00:23,106 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp (provider: Google)\n", - "2025-03-21 02:00:23,107 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", - "2025-03-21 02:00:23,107 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", - "2025-03-21 02:00:23,108 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp-02-05 (provider: Google)\n", - "2025-03-21 02:00:23,108 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", - "2025-03-21 02:00:23,108 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", - "2025-03-21 02:00:23,109 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-exp-1206 (provider: Google)\n", - "2025-03-21 02:00:23,109 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", - "2025-03-21 02:00:23,109 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", - "2025-03-21 02:00:23,110 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-01-21 (provider: Google)\n", - "2025-03-21 02:00:23,113 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", - "2025-03-21 02:00:23,114 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", - "2025-03-21 02:00:23,114 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp (provider: Google)\n", - "2025-03-21 02:00:23,115 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", - "2025-03-21 02:00:23,116 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", - "2025-03-21 02:00:23,116 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-1219 (provider: Google)\n", - "2025-03-21 02:00:23,118 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", - "2025-03-21 02:00:23,118 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", - "2025-03-21 02:00:23,119 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/learnlm-1.5-pro-experimental (provider: Google)\n", - "2025-03-21 02:00:23,119 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", - "2025-03-21 02:00:23,120 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", - "2025-03-21 02:00:23,120 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemma-3-27b-it (provider: Google)\n", - "2025-03-21 02:00:23,121 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", - "2025-03-21 02:00:23,121 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", - "2025-03-21 02:00:23,122 [INFO] ember.core.registry.model.initialization: Registration summary: 80 new, 0 skipped, 0 failed\n", - "2025-03-21 02:00:23,124 [INFO] ember.core.registry.model.initialization: Successfully discovered and registered 80 new models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-4o-mini-2024-07-18', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4o-mini', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 02:00:23,124 [INFO] ember.core.registry.model.initialization: Discovered 80 new models in 1.74s: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13'] and 70 more\n", - "2025-03-21 02:00:23,821 [DEBUG] httpcore.connection: close.started\n", - "2025-03-21 02:00:23,823 [DEBUG] httpcore.connection: close.complete\n" - ] - } - ], - "source": [ - "# from ember.core.registry.model.model_module.lm import LMModule, LMModuleConfig\n", - "from ember.core.registry.model.config.settings import initialize_ember\n", - "from ember.core.registry.model.base.services.model_service import ModelService\n", - "from ember.core.registry.model.base.schemas.model_info import ModelInfo\n", - "from ember.core.registry.model.base.schemas.cost import ModelCost, RateLimit\n", - "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", - "\n", - "from ember.core.registry.model import load_model, ChatResponse\n", - "from ember.core.registry.model.base.services.model_service import ModelService\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": { "scrolled": true }, @@ -607,11 +169,22 @@ } ], "source": [ - "model_registry = initialize_ember(config_path=)\n", - "print(model_registry.list_models())\n", + "model_registry = initialize_registry()\n", + "# model_registry = initialize_ember()\n", "llm = ModelService(registry=model_registry)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "\n", + "### Model Registry checks \n", + "\n", + "From the code above, it should auto add models from your config files (which can displayed from printing below), but you can also add your own models as shown below!" + ] + }, { "cell_type": "code", "execution_count": 8, @@ -715,11 +288,38 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register an OpenAI text-embedding model\n", + "openai_info = ModelInfo(\n", + " id=\"openai:text-embedding-3-large\",\n", + " name=\"text-embedding-3-large\",\n", + " cost=ModelCost(input_cost_per_thousand=0.03, output_cost_per_thousand=0.06),\n", + " rate_limit=RateLimit(tokens_per_minute=80000, requests_per_minute=5000),\n", + " provider=ProviderInfo(name=\"OpenAI\", default_api_key=openai_key),\n", + " api_key=openai_key,\n", + ")\n", + "model_registry.register_model(openai_info)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Try model registry\n", + "taken from `src/ember/core/registry/model/examples/example.py`" + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model_ids: List[str] = [\n", + " \"openai:o1\",\n", " \"openai:gpt-4o\",\n", " \"openai:gpt-4o-mini\",\n", " # \"anthropic:claude-3.5-sonnet\", # API key not working\n", @@ -730,166 +330,9 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 01:47:31,411 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", - "2025-03-21 01:47:31,411 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o' using provider class 'OpenAIModel'.\n", - "2025-03-21 01:47:31,412 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o\n", - "2025-03-21 01:47:31,413 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 01:47:31,414 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", - "2025-03-21 01:47:31,415 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 01:47:31,427 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 01:47:31,428 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 01:47:31,429 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", - "2025-03-21 01:47:31,451 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 01:47:31,452 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", - "2025-03-21 01:47:31,461 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 01:47:31,462 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 01:47:31,463 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 01:47:31,464 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 01:47:31,465 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 01:47:31,465 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "➡️ Testing model: openai:gpt-4o\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 01:47:32,975 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:47:33 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1377'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_5f9891031dd6ee412bfe5be821dc436d'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=SofhfG4xIJOYKjL3oEQAMSMqtlMVqPmaNBBHje9FrXs-1742546853-1.0.1.1-CxDgL8jHzq.vx_fL0XJTsFydnKINH8wgc_lUngYj7SOLZAUJEeQr8hac4cNva8aHIH_qU8y85200abtGfzFpehSQ_FneYO5O976vWZQXK_8; path=/; expires=Fri, 21-Mar-25 09:17:33 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=BVSC_XlemJmZHqusJyDYGgHj6.ak4SFKLmWjPK6lGwg-1742546853265-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c2f5f896df96b-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 01:47:32,978 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 01:47:32,980 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 01:47:32,988 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 01:47:32,989 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 01:47:32,990 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 01:47:32,991 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 08:47:33 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('access-control-expose-headers', 'X-Request-ID'), ('openai-organization', 'user-iqhmndueuqg2ljzblqkr2tgh'), ('openai-processing-ms', '1377'), ('openai-version', '2020-10-01'), ('x-ratelimit-limit-requests', '50000'), ('x-ratelimit-limit-tokens', '150000000'), ('x-ratelimit-remaining-requests', '49999'), ('x-ratelimit-remaining-tokens', '149999987'), ('x-ratelimit-reset-requests', '1ms'), ('x-ratelimit-reset-tokens', '0s'), ('x-request-id', 'req_5f9891031dd6ee412bfe5be821dc436d'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=SofhfG4xIJOYKjL3oEQAMSMqtlMVqPmaNBBHje9FrXs-1742546853-1.0.1.1-CxDgL8jHzq.vx_fL0XJTsFydnKINH8wgc_lUngYj7SOLZAUJEeQr8hac4cNva8aHIH_qU8y85200abtGfzFpehSQ_FneYO5O976vWZQXK_8; path=/; expires=Fri, 21-Mar-25 09:17:33 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=BVSC_XlemJmZHqusJyDYGgHj6.ak4SFKLmWjPK6lGwg-1742546853265-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923c2f5f896df96b-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", - "2025-03-21 01:47:32,991 [DEBUG] openai._base_client: request_id: req_5f9891031dd6ee412bfe5be821dc436d\n", - "2025-03-21 01:47:32,996 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 01:47:33,000 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 01:47:33,002 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 01:47:33,003 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 01:47:33,005 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 01:47:33,005 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 01:47:33,007 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 01:47:33,007 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "🛎️ Service response from openai:gpt-4o:\n", - "Quantum computing leverages quantum bits (qubits), exploiting superposition and entanglement principles, enabling simultaneous processing of vast possibilities. Unlike classical bits, qubits can represent 0 and 1 simultaneously, potentially solving complex problems exponentially faster than classical computers, revolutionizing fields like cryptography, optimization, and materials science.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 01:47:33,434 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:47:33 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'384'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_c47455e6d10ded7a049fedfde39ace97'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c2f6928acf96b-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 01:47:33,437 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 01:47:33,439 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 01:47:33,443 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 01:47:33,446 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 01:47:33,448 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 01:47:33,450 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 08:47:33 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '384', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_c47455e6d10ded7a049fedfde39ace97', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923c2f6928acf96b-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 01:47:33,452 [DEBUG] openai._base_client: request_id: req_c47455e6d10ded7a049fedfde39ace97\n", - "2025-03-21 01:47:33,456 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", - "2025-03-21 01:47:33,458 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o-mini' using provider class 'OpenAIModel'.\n", - "2025-03-21 01:47:33,460 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o-mini\n", - "2025-03-21 01:47:33,461 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 01:47:33,470 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 01:47:33,473 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 01:47:33,475 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 01:47:33,479 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 01:47:33,481 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 01:47:33,485 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 01:47:33,487 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "🎯 Direct response from openai:gpt-4o:\n", - "The capital of France is Paris.\n", - "\n", - "➡️ Testing model: openai:gpt-4o-mini\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 01:47:34,916 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:47:35 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1363'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_41c953699a64c8f5e49420f537cf7fe6'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c2f6c1af8f96b-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 01:47:34,917 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 01:47:34,918 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 01:47:34,923 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 01:47:34,925 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 01:47:34,926 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 01:47:34,927 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 08:47:35 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '1363', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_41c953699a64c8f5e49420f537cf7fe6', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923c2f6c1af8f96b-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 01:47:34,928 [DEBUG] openai._base_client: request_id: req_41c953699a64c8f5e49420f537cf7fe6\n", - "2025-03-21 01:47:34,930 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 01:47:34,934 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 01:47:34,935 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 01:47:34,936 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 01:47:34,938 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 01:47:34,939 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 01:47:34,939 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 01:47:34,940 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "🛎️ Service response from openai:gpt-4o-mini:\n", - "Quantum computing harnesses the principles of quantum mechanics to process information using qubits, which can represent multiple states simultaneously. This enables vastly superior computational power for certain tasks, such as factoring large numbers or simulating molecular interactions, potentially solving problems that are currently intractable for classical computers.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 01:47:35,574 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:47:35 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'581'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_99f2a454c9dd8454feccdaa4c1581cfd'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c2f7539aef96b-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 01:47:35,576 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 01:47:35,577 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 01:47:35,579 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 01:47:35,579 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 01:47:35,580 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 01:47:35,581 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 08:47:35 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '581', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_99f2a454c9dd8454feccdaa4c1581cfd', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923c2f7539aef96b-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 01:47:35,581 [DEBUG] openai._base_client: request_id: req_99f2a454c9dd8454feccdaa4c1581cfd\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "🎯 Direct response from openai:gpt-4o-mini:\n", - "The capital of France is Paris.\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "for model_id in model_ids:\n", " try:\n", @@ -917,45 +360,9 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 01:49:49,992 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 01:49:50,000 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello!'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 01:49:50,004 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 01:49:50,005 [DEBUG] httpcore.connection: close.started\n", - "2025-03-21 01:49:50,007 [DEBUG] httpcore.connection: close.complete\n", - "2025-03-21 01:49:50,009 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", - "2025-03-21 01:49:50,035 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 01:49:50,036 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", - "2025-03-21 01:49:50,065 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 01:49:50,067 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 01:49:50,070 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 01:49:50,071 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 01:49:50,074 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 01:49:50,075 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 01:49:50,577 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 08:49:50 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'438'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999995'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_afeab9892d79b1ffddd93afd92847985'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923c32c1cf45156c-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 01:49:50,579 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 01:49:50,580 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 01:49:50,583 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 01:49:50,584 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 01:49:50,585 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 01:49:50,587 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 08:49:50 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '438', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999995', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_afeab9892d79b1ffddd93afd92847985', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923c32c1cf45156c-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 01:49:50,588 [DEBUG] openai._base_client: request_id: req_afeab9892d79b1ffddd93afd92847985\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hi there! How can I assist you today?\n" - ] - } - ], + "outputs": [], "source": [ "response = llm(prompt=\"Hello!\", model_id=\"openai:gpt-4o\")\n", "print(response.data)" @@ -970,45 +377,15 @@ "\n", "## Neural Similarity Scoring - Cosine Similarity (WIP)\n", "\n", - "- from `src/ember/core/utils/embedding_utils.py`\n", - "- from jason\n", - "- need to merge" + "- from `src/ember/core/utils/embedding_utils.py`" ] }, { "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", - "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" - ] - } - ], - "source": [ - "%pip install -q openai" - ] - }, - { - "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "from __future__ import annotations\n", - "\n", - "from abc import ABC, abstractmethod\n", - "from typing import List, Protocol\n", - "import math\n", - "\n", - "import openai\n", - "import os\n", - "\n", - "\n", "################################################################\n", "# 1) Embedding Model Interfaces & Implementations\n", "################################################################\n", @@ -1036,6 +413,29 @@ " \"\"\"\n", " ...\n", "\n", + "class Text_Embedding_Ada_002_Model:\n", + " \"\"\"Interface for embedding models.\n", + "\n", + " This protocol defines the minimal interface required to compute a text\n", + " embedding. Implementations may use local models, external APIs, or custom\n", + " neural networks.\n", + "\n", + " Methods:\n", + " embed_text: Compute the embedding for a given text.\n", + " \"\"\"\n", + "\n", + " def embed_text(self, text: str) -> List[float]:\n", + " \"\"\"Computes the embedding vector for the provided text.\n", + "\n", + " Args:\n", + " text (str): The text to be embedded.\n", + "\n", + " Returns:\n", + " List[float]: A list of floats representing the embedding vector.\n", + " \"\"\"\n", + " response = llm(model_id=\"openai:text-embedding-ada-002\", prompt=text)\n", + " return response.embedding\n", + "\n", "class Text_Embedding_3_EmbeddingModel(Protocol):\n", " \"\"\"Interface for embedding models.\n", "\n", @@ -1185,7 +585,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": { "scrolled": true }, @@ -1199,68 +599,45 @@ } ], "source": [ - "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", + "embedding_model: Text_Embedding_Ada_002_Model = Text_Embedding_Ada_002_Model()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", "\n", "text_a: str = \"Hello world!\"\n", "text_b: str = \"Hello, world??\"\n", "\n", - "score: float = calculate_text_similarity(\n", - " text1=text_a, text2=text_b, model=mock_model, metric=cosine\n", - ")\n", - "print(f\"Similarity between '{text_a}' and '{text_b}': {score}\")" + "diverse_text = [\"Bananas don't belong in briefcases\", \"Abraham Lincoln\", \"ERROR 404: Index Not Found\"]\n", + "\n", + "different_words_not_diverse_strs = [\"peanut butter and jelly\", \"bacon lettuce tomato\"]\n", + "\n", + "repetition_strs = [\"This is a sample text with lots of repetition.\", \n", + " \"This is a sample text with lots of repetition.\"]\n", + "\n", + "test_strings = [diverse_text, different_words_not_diverse_strs, repetition_strs]\n", + "\n", + "for test in test_strings:\n", + " score: float = calculate_text_similarity(\n", + " text1=test[0], text2=test[1], model=embedding_model, metric=cosine\n", + " )\n", + "\n", + " print(f\"Cosine similarity Score: {score:.4f}\")\n", + " print(\"\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "---\n", - "---\n", - "\n", - "## Compression Ratio (WIP)\n", + "## Compression Ratio\n", "\n", "from `src/ember/core/utils/eval/evaluators.py`" ] }, { "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", - "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n", - "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", - "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" - ] - } - ], - "source": [ - "%pip install -q diversity==0.2.0\n", - "%pip install -q spacy==3.8.4" - ] - }, - { - "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "from __future__ import annotations\n", - "\n", - "import re\n", - "import subprocess\n", - "from typing import Any, Dict, TypeVar, Optional, List, Generic, Callable, Union\n", - "\n", - "from ember.core.utils.eval.base_evaluator import IEvaluator, EvaluationResult\n", - "from ember.core.utils.eval.extractors import RegexExtractor\n", - "\n", - "from diversity import compression_ratio\n", - "\n", "T_out = TypeVar(\"T_out\")\n", "T_truth = TypeVar(\"T_truth\")\n", "\n", @@ -1355,7 +732,7 @@ " score = 1.0 if is_correct else 0.0\n", " return EvaluationResult(is_correct=is_correct, score=score)\n", "\n", - "class DiversityScoringEvaluator(IEvaluator[List[str], None]):\n", + "class DiversityCompressionEvaluator(IEvaluator[List[str], None]):\n", " \"\"\"\n", " Evaluator to test ensemble outputs -> score them (float)\n", " \"\"\"\n", @@ -1372,55 +749,60 @@ "\n", " # example I was thinking about:\n", " letter_sum = sum(len(response) for response in system_output)\n", - " ratio = compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100)\n", + " ratio = 1/compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100)\n", " # ratio = compression_ratio(system_output, algorithm='gzip',verbose=True)\n", " return EvaluationResult(is_correct=True,score=ratio,metadata = {'responses': system_output})\n" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "## Edit Distance (WIP)\n", - "- need to merge" + "compression_evaluator = DiversityCompressionEvaluator()\n", + "\n", + "# input_strs = [\n", + "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", + "# \"fej89qw098efjq29f38j0938j20f398jqwe098fjq98wf -- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", + "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", + "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", + "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", + "# ]\n", + "\n", + "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", + "\n", + "# input_strs = [\"This is a sample text with lots of repetition.\", \n", + "# \"This is a sample text with lots of repetition.\",\n", + "# \"This is a sample text with lots of repetition.\"]\n", + "\n", + "edit_distance = compression_evaluator.evaluate(input_strs)\n", + "\n", + "print(f\"Compression Score: {edit_distance.score:.4f}\")\n", + "print(f\"Is Correct: {edit_distance.is_correct}\")\n", + "print(f\"Metadata: {edit_distance.metadata}\")" ] }, { - "cell_type": "code", - "execution_count": 74, - "metadata": { - "collapsed": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], + "cell_type": "markdown", + "metadata": {}, "source": [ - "%pip install -q python-Levenshtein" + "## Edit Distance" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "import Levenshtein\n", - "from typing import List\n", - "from dataclasses import dataclass\n", - "\n", "@dataclass\n", "class EvaluationResult:\n", " is_correct: bool\n", " score: float\n", " metadata: dict\n", "\n", - "class EditDistanceScoringEvaluator:\n", + "class DiversityEditDistanceEvaluator:\n", "\n", " def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult:\n", " if system_output is None or len(system_output) == 0:\n", @@ -1455,7 +837,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1469,7 +851,7 @@ } ], "source": [ - "distance_evaluator = EditDistanceScoringEvaluator()\n", + "distance_evaluator = DiversityEditDistanceEvaluator()\n", "\n", "# input_strs = [\n", "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", @@ -1487,7 +869,7 @@ "\n", "edit_distance = distance_evaluator.evaluate(input_strs)\n", "\n", - "print(f\"Diversity Score: {edit_distance.score:.4f}\")\n", + "print(f\"Edit Distance Score: {edit_distance.score:.4f}\")\n", "print(f\"Is Correct: {edit_distance.is_correct}\")\n", "print(f\"Metadata: {edit_distance.metadata}\")" ] @@ -1496,27 +878,22 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Novelty Score\n", - "- need to merge" + "## Novelty Score" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "from typing import List\n", - "from dataclasses import dataclass\n", - "import numpy as np\n", - "\n", "@dataclass\n", "class EvaluationResult:\n", " is_correct: bool\n", " score: float\n", " metadata: dict\n", "\n", - "class NoveltyScoringEvaluator:\n", + "class DiversityNoveltyEvaluator:\n", " \n", " def evaluate(self, model: EmbeddingModel, system_output: List[str], **kwargs) -> EvaluationResult:\n", " if not system_output or len(system_output) == 0:\n", @@ -1550,7 +927,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1562,14 +939,14 @@ } ], "source": [ - "novelty_evaluator = NoveltyScoringEvaluator()\n", + "novelty_evaluator = DiversityNoveltyEvaluator()\n", "\n", "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", "\n", "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", "novelty = novelty_evaluator.evaluate(mock_model, input_strs)\n", "\n", - "print(f\"Diversity Score: {novelty.score:.4f}\")\n", + "print(f\"Novelty Score: {novelty.score:.4f}\")\n", "print(f\"Is Correct: {novelty.is_correct}\")\n", "print(f\"Metadata: {novelty.metadata}\")" ] @@ -1584,188 +961,122 @@ "## Putting it all together" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Function to combine all the scores (cosine similarity, compression ratio, edit distance)" + ] + }, { "cell_type": "code", - "execution_count": 82, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", "exact_evaluator = ExactMatchEvaluator()\n", - "diversity_evaluator = DiversityScoringEvaluator()\n", - "edit_dist_evaluator = EditDistanceScoringEvaluator()\n", + "compression_evaluator = DiversityCompressionEvaluator()\n", + "edit_dist_evaluator = DiversityEditDistanceEvaluator()\n", "\n", "def ensemble_diversity(strings):\n", - " compression = diversity_evaluator.evaluate(strings)\n", - " print(\"DiversityScoringEvaluator result:\", compression)\n", - " scores = list()\n", + " compression = compression_evaluator.evaluate(strings)\n", + " # print(\"compression (1/compression == compression/original) result:\", compression)\n", + " cosine_scores = list()\n", " for ind1 in range(len(strings)):\n", " ind2 = ind1+1 if ind1+1 != len(strings) else 0\n", " curr_score = calculate_text_similarity(text1=strings[ind1], text2=strings[ind2], model=mock_model, metric=cosine)\n", - " print(f\"SimilarityScore between ind1={ind1} and ind2={ind2}: {curr_score}\")\n", - " scores.append(curr_score)\n", - " avg_score = np.average(scores)\n", - " print(f\"Avg cosine similarity: {avg_score}\")\n", - " print(f\"diversity cosine-sim inverse: {1-avg_score}\")\n", + " # print(f\"SimilarityScore between ind1={ind1} and ind2={ind2}: {curr_score}\")\n", + " cosine_scores.append(curr_score)\n", + " avg_cosine_score = np.average(cosine_scores)\n", + " # print(f\"Avg cosine similarity: {avg_score}\")\n", + " # print(f\"diversity cosine-sim inverse: {1-avg_score}\")\n", " edit_distance = edit_dist_evaluator.evaluate(strings)\n", - " print(f\"edit-dist score: {edit_distance.score:.4f}\")\n", - " print(\"-------------------------------\")\n", - " print(f\"possible diversity score: {(1-avg_score) * compression.score * edit_distance.score}\")" + " # print(f\"edit-dist score: {edit_distance.score:.4f}\")\n", + " # print(\"-------------------------------\")\n", + " diversity_score = ((1 - avg_cosine_score) + min(compression.score, 1) + edit_distance.score)/3\n", + " # print(f\"possible diversity score (higher is better): {diversity_score}\")\n", + " return diversity_score" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Creating a list of strings" ] }, { "cell_type": "code", - "execution_count": 86, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=0.063936, metadata={'responses': ['hi there', 'hi', 'hello', 'yo whatup']})\n", - "SimilarityScore between ind1=0 and ind2=1: 0.5207675658482732\n", - "SimilarityScore between ind1=1 and ind2=2: 0.6088947130341378\n", - "SimilarityScore between ind1=2 and ind2=3: 0.67913155770349\n", - "SimilarityScore between ind1=3 and ind2=0: 0.9344774636399475\n", - "Avg cosine similarity: 0.6858178250564622\n", - "diversity cosine-sim inverse: 0.31418217494353784\n", - "edit-dist score: 0.8301\n", - "-------------------------------\n", - "possible diversity score: 0.0166745277343434\n" - ] - } - ], + "outputs": [], "source": [ - "# input_strs = [\n", - "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", - "# \"fej89qw098efjq29f38j0938j20f398jqwe098fjq98wf -- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", + "input_strs = []\n", + "scores = []\n", + "input_strs.append([\"This is a sample text with lots of repetition.\", \n", + " \"This is a sample text with lots of repetition.\",\n", + " \"This is a sample text with lots of repetition.\"])\n", + "\n", + "responses = []\n", + "for i in range(10):\n", + " res = llm(prompt=\"Tell me a funny joke. Keep it concise.\", model_id=\"openai:gpt-4o\").data.replace(\"\\n\", \"\")\n", + " responses.append(res)\n", + "input_strs.append(responses)\n", + "\n", + "responses = []\n", + "res = llm(prompt=\"Tell me 10 different jokes. make them split with \\'||\\'. Don't say anything else besides the joke. \", model_id=\"openai:gpt-4o\").data.replace(\"\\n\", \"\").split('||')\n", + "responses += res\n", + "input_strs.append(responses)\n", + "\n", + "# input_strs.append([\n", + "# \"Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", + "# \"In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", - "# ]\n", + "# ])\n", "\n", - "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", - "\n", - "# input_strs = [\"This is a sample text with lots of repetition.\", \n", - "# \"This is a sample text with lots of repetition.\",\n", - "# \"This is a sample text with lots of repetition.\"]\n", - "\n", - "ensemble_diversity(input_strs)" + "responses = []\n", + "res1 = llm(prompt=\"Tell me a story about how quantum computers work. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", + "responses.append(res1)\n", + "res2 = llm(prompt=\"Tell me a story about bunnies frolicking in the grass. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", + "responses.append(res2)\n", + "res3 = llm(prompt=\"Tell me a story about the pokemon pikachu and it's adventures. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", + "responses.append(res3)\n", + "res4 = llm(prompt=\"Tell me a story about a ramen shop. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", + "responses.append(res4)\n", + "input_strs.append(responses)" ] }, { "cell_type": "code", - "execution_count": 97, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 0: [Why don't scientists trust atoms?\n", - "\n", - "Because they make up everything!]\n", - "Joke 1: [Why don't skeletons fight each other? They don't have the guts!]\n", - "Joke 2: [Why don't skeletons fight each other?\n", - "\n", - "They don't have the guts.]\n", - "Joke 3: [Why don't scientists trust atoms?\n", - "\n", - "Because they make up everything!]\n", - "Joke 4: [Why don't skeletons fight each other? They don't have the guts.]\n", - "Joke 5: [Why don't skeletons fight each other? They don't have the guts.]\n", - "Joke 6: [Why don't skeletons fight each other? They don't have the guts!]\n", - "Joke 7: [Why don’t skeletons fight each other? They don’t have the guts.]\n", - "Joke 8: [Why don't skeletons fight each other? They don't have the guts.]\n", - "Joke 9: [Why don't skeletons fight each other? They don't have the guts.]\n", - "-----\n", - "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=3.45, metadata={'responses': [\"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", \"Why don't skeletons fight each other? They don't have the guts!\", \"Why don't skeletons fight each other?\\n\\nThey don't have the guts.\", \"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", \"Why don't skeletons fight each other? They don't have the guts.\", \"Why don't skeletons fight each other? They don't have the guts.\", \"Why don't skeletons fight each other? They don't have the guts!\", 'Why don’t skeletons fight each other? They don’t have the guts.', \"Why don't skeletons fight each other? They don't have the guts.\", \"Why don't skeletons fight each other? They don't have the guts.\"]})\n", - "SimilarityScore between ind1=0 and ind2=1: 0.8843819811752456\n", - "SimilarityScore between ind1=1 and ind2=2: 0.9364324737968321\n", - "SimilarityScore between ind1=2 and ind2=3: 0.8847772710380415\n", - "SimilarityScore between ind1=3 and ind2=4: 0.8857931367895382\n", - "SimilarityScore between ind1=4 and ind2=5: 1.0\n", - "SimilarityScore between ind1=5 and ind2=6: 0.9998557731781514\n", - "SimilarityScore between ind1=6 and ind2=7: 0.1373464430793195\n", - "SimilarityScore between ind1=7 and ind2=8: 0.13729294236742365\n", - "SimilarityScore between ind1=8 and ind2=9: 1.0\n", - "SimilarityScore between ind1=9 and ind2=0: 0.8857931367895382\n", - "Avg cosine similarity: 0.775167315821409\n", - "diversity cosine-sim inverse: 0.22483268417859104\n", - "edit-dist score: 0.2422\n", - "-------------------------------\n", - "possible diversity score: 0.18786989150445282\n" - ] - } - ], + "outputs": [], "source": [ - "num_jokes = 10\n", - "responses = []\n", - "\n", - "for i in range(num_jokes):\n", - " res = llm(prompt=\"Tell me a funny joke. Keep it concise.\", model_id=\"openai:gpt-4o\").data\n", - " responses.append(res)\n", - " print(f\"Joke {i}: [{res}]\")\n", - "\n", - "print(\"-----\")\n", - "ensemble_diversity(responses)" + "for i in range(len(input_strs)):\n", + " print(f\"{i} -------------------\")\n", + " for s in input_strs[i]:\n", + " print(\"\\t- \" + s)" ] }, { "cell_type": "code", - "execution_count": 98, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 0: [Why don’t scientists trust atoms? Because they make up everything! ]\n", - "Joke 1: [ Parallel lines have so much in common. It’s a shame they’ll never meet. ]\n", - "Joke 2: [ Why did the scarecrow win an award? Because he was outstanding in his field! ]\n", - "Joke 3: [ I told my wife she was drawing her eyebrows too high. She looked surprised. ]\n", - "Joke 4: [ Why don’t skeletons fight each other? They don’t have the guts. ]\n", - "Joke 5: [ What do you call fake spaghetti? An impasta! ]\n", - "Joke 6: [ What’s brown and sticky? A stick! ]\n", - "Joke 7: [ Why was the math book sad? It had too many problems. ]\n", - "Joke 8: [ Can February March? No, but April May! ]\n", - "Joke 9: [ Why was the musician arrested? She got in treble.]\n", - "-----\n", - "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.467, metadata={'responses': ['Why don’t scientists trust atoms? Because they make up everything! ', ' Parallel lines have so much in common. It’s a shame they’ll never meet. ', ' Why did the scarecrow win an award? Because he was outstanding in his field! ', ' I told my wife she was drawing her eyebrows too high. She looked surprised. ', ' Why don’t skeletons fight each other? They don’t have the guts. ', ' What do you call fake spaghetti? An impasta! ', ' What’s brown and sticky? A stick! ', ' Why was the math book sad? It had too many problems. ', ' Can February March? No, but April May! ', ' Why was the musician arrested? She got in treble.']})\n", - "SimilarityScore between ind1=0 and ind2=1: 0.03290772299379834\n", - "SimilarityScore between ind1=1 and ind2=2: 0.2362752728324614\n", - "SimilarityScore between ind1=2 and ind2=3: 0.8911484218391627\n", - "SimilarityScore between ind1=3 and ind2=4: 0.23763559138432722\n", - "SimilarityScore between ind1=4 and ind2=5: 0.08483849065288684\n", - "SimilarityScore between ind1=5 and ind2=6: 0.10357981441477468\n", - "SimilarityScore between ind1=6 and ind2=7: 0.21758838510790685\n", - "SimilarityScore between ind1=7 and ind2=8: 0.740438790870045\n", - "SimilarityScore between ind1=8 and ind2=9: 0.7932308818518455\n", - "SimilarityScore between ind1=9 and ind2=0: 0.24304717086492292\n", - "Avg cosine similarity: 0.35806905428121316\n", - "diversity cosine-sim inverse: 0.6419309457187868\n", - "edit-dist score: 0.7335\n", - "-------------------------------\n", - "possible diversity score: 0.6907191750125549\n" - ] - } - ], + "outputs": [], "source": [ - "prompts = 1\n", - "responses = []\n", - "\n", - "for i in range(prompts):\n", - " res = llm(prompt=\"Tell me 10 jokes. make them split with \\'||\\'. Don't say anything else besides the joke. \", model_id=\"openai:gpt-4o\").data.split('||')\n", - " responses += res\n", - "\n", - "if prompts == 1 and len(responses) > 1:\n", - " for i in range(len(responses)):\n", - " print(f\"Joke {i}: [{responses[i]}]\")\n", - "\n", - "print(\"-----\")\n", - "ensemble_diversity(responses)" + "scores = [ensemble_diversity(s_arr) for s_arr in input_strs]\n", + "labels = [str(number) for number in range(1, len(input_strs) + 1)]\n", + "\n", + "plt.figure(figsize=(8, 5))\n", + "plt.bar(labels, scores)\n", + "plt.xlabel(\"Response\")\n", + "plt.ylabel(\"Diversity Score\")\n", + "plt.tight_layout()\n", + "plt.show()" ] }, { @@ -1774,9 +1085,6 @@ "source": [ "---\n", "---\n", - "## Improvements TODO\n", - "- Merge all functions\n", - "- fix ensembling\n", "## Potential other cases to explore\n", "- work ensembling all \"diversity\" related metrics \n", " - add more metrics\n", From 531e0a9f13b7e242c717e4abedf0ce3970958d87 Mon Sep 17 00:00:00 2001 From: connorchow Date: Sun, 30 Mar 2025 23:47:35 -0700 Subject: [PATCH 04/14] adding previous diversity integration (note: need to fix embedding models) --- .gitignore | 5 + .../model/base/schemas/chat_schemas.py | 2 +- .../model/examples/diversity_testbench.ipynb | 1864 ----------------- .../model/providers/openai/openai_provider.py | 1 + src/ember/core/utils/embedding_utils.py | 26 + src/ember/core/utils/eval/evaluators.py | 63 +- src/ember/examples/diversity_testbench.ipynb | 565 +++-- 7 files changed, 435 insertions(+), 2091 deletions(-) create mode 100644 .gitignore delete mode 100644 src/ember/core/registry/model/examples/diversity_testbench.ipynb diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..846280fe --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +# Ignore all __pycache__ directories +**/__pycache__/ + +# Ignore all .egg-info directories or files +*.egg-info/ \ No newline at end of file diff --git a/src/ember/core/registry/model/base/schemas/chat_schemas.py b/src/ember/core/registry/model/base/schemas/chat_schemas.py index fb4649b3..9dfa2dcc 100644 --- a/src/ember/core/registry/model/base/schemas/chat_schemas.py +++ b/src/ember/core/registry/model/base/schemas/chat_schemas.py @@ -115,6 +115,6 @@ class ChatResponse(BaseModel): """ data: str - embedding: list[float] = None + embedding: list[float] = None # TODO: Fix embedding model structure raw_output: Any = None usage: Optional[UsageStats] = None diff --git a/src/ember/core/registry/model/examples/diversity_testbench.ipynb b/src/ember/core/registry/model/examples/diversity_testbench.ipynb deleted file mode 100644 index e2f93d6f..00000000 --- a/src/ember/core/registry/model/examples/diversity_testbench.ipynb +++ /dev/null @@ -1,1864 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Diversity Testbench (duplicate?)\n", - "\n", - "---\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Ember Package Testing (WIP)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Setup Dependencies" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "import logging, sys, os\n", - "from typing import Dict, Any, List\n", - "\n", - "logging.basicConfig(level=logging.ERROR)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "openai_key = os.getenv(\"OPENAI_API_KEY\")" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/root/ember/jared/ember/src/ember/core/registry/model/examples\n" - ] - } - ], - "source": [ - "# fixing dependencies if current path is /src/ember/examples/diversity_testbench.ipynb\n", - "target_dir = 'src/ember/examples'\n", - "if os.getcwd()[-18:] == target_dir:\n", - " os.chdir('../../..')\n", - "print(os.getcwd())\n", - "\n", - "project_root = os.path.abspath(os.path.join(os.getcwd(), \"../../..\"))\n", - "if project_root not in sys.path:\n", - " sys.path.insert(0, project_root)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/root/ember/jared/ember/src/ember/core/registry/model/examples\n" - ] - } - ], - "source": [ - "!echo $PWD" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "NOTE: things below this are to install required dependencies (only do this the venv)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "# %pip install -q -e .\n", - "# %pip install -q google-generativeai==0.7.2" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Ember Repo Loads (WIP)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "# from ember.core.registry.model.model_module.lm import LMModule, LMModuleConfig\n", - "from ember.core.registry.model.config.settings import initialize_registry\n", - "from ember.core.registry.model.base.services.model_service import ModelService\n", - "from ember.core.registry.model.base.schemas.model_info import ModelInfo\n", - "from ember.core.registry.model.base.schemas.cost import ModelCost, RateLimit\n", - "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", - "\n", - "from ember.core.registry.model import load_model, ChatResponse\n", - "from ember.core.registry.model.base.services.model_service import ModelService\n" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:55:38,516 [DEBUG] ConfigManager: Loading configuration...\n", - "2025-03-21 00:55:38,523 [DEBUG] ConfigManager: Configuration loaded successfully\n", - "2025-03-21 00:55:38,524 [INFO] ember.core.registry.model.initialization: Execute model discovery (timeout: 30 seconds per provider, running in parallel)\n", - "2025-03-21 00:55:38,527 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", - "2025-03-21 00:55:38,542 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", - "2025-03-21 00:55:38,572 [DEBUG] ember.core.registry.model.base.registry.discovery: OPENAI_API_KEY found, initialized OpenAIDiscovery successfully\n", - "2025-03-21 00:55:38,575 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", - "2025-03-21 00:55:38,577 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", - "2025-03-21 00:55:38,588 [DEBUG] ember.core.registry.model.base.registry.discovery: ANTHROPIC_API_KEY found, initialized AnthropicDiscovery successfully\n", - "2025-03-21 00:55:38,590 [DEBUG] ember.core.registry.model.base.registry.discovery: GOOGLE_API_KEY found, initialized DeepmindDiscovery successfully\n", - "2025-03-21 00:55:38,593 [INFO] ember.core.registry.model.initialization: Initiating model discovery via ModelDiscoveryService\n", - "2025-03-21 00:55:38,608 [DEBUG] openai._base_client: Request options: {'method': 'get', 'url': '/models', 'post_parser': ._parser at 0x7fbfe4e29940>, 'json_data': None}\n", - "2025-03-21 00:55:38,615 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Starting Anthropic model fetch via REST API...\n", - "2025-03-21 00:55:38,621 [DEBUG] openai._base_client: Sending HTTP Request: GET https://api.openai.com/v1/models\n", - "2025-03-21 00:55:38,674 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Calling Anthropic REST API: https://api.anthropic.com/v1/models with timeout=(2,5)\n", - "2025-03-21 00:55:38,676 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=5.0 socket_options=None\n", - "2025-03-21 00:55:38,702 [DEBUG] urllib3.connectionpool: Starting new HTTPS connection (1): api.anthropic.com:443\n", - "2025-03-21 00:55:38,726 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 00:55:38,727 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=5.0\n", - "2025-03-21 00:55:38,779 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 00:55:38,782 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:55:38,784 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:55:38,786 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:55:38,789 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:55:38,792 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:55:38,866 [INFO] ember.core.registry.model.base.registry.discovery: Provider DeepmindDiscovery completed in 0.25s\n", - "2025-03-21 00:55:38,893 [DEBUG] urllib3.connectionpool: https://api.anthropic.com:443 \"GET /v1/models HTTP/1.1\" 401 86\n", - "2025-03-21 00:55:38,895 [ERROR] ember.core.registry.model.providers.anthropic.anthropic_discovery: Error fetching Anthropic models via REST API: 401 Client Error: Unauthorized for url: https://api.anthropic.com/v1/models\n", - "2025-03-21 00:55:38,897 [INFO] ember.core.registry.model.providers.anthropic.anthropic_discovery: Using fallback models due to API request error\n", - "2025-03-21 00:55:38,899 [INFO] ember.core.registry.model.base.registry.discovery: Provider AnthropicDiscovery completed in 0.28s\n", - "2025-03-21 00:55:39,163 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:55:39 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'openai-version', b'2020-10-01'), (b'x-request-id', b'1e6dc2bf4213f80b492956c74129745f'), (b'openai-processing-ms', b'353'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=lGJr7UVxgaDN4HBlLITX3hjGQeXqf8MmO7ENb75MuR4-1742543739-1.0.1.1-eD8tCUx18oWut6fWHu02UdcjYqC46qA6wHnAqfb.G1O88xdXLLSnptXbqkEBF3NSfZxvpT4_vB1RRzNOL6jZ_VJ08qnkyVlZtfQePLLlvgg; path=/; expires=Fri, 21-Mar-25 08:25:39 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=jISO5ZxiQaGeAfqOUQOdrcmVaCqM3RDIsOf_LKzHB5A-1742543739496-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be3618b64eb35-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:55:39,168 [INFO] httpx: HTTP Request: GET https://api.openai.com/v1/models \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:55:39,170 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:55:39,176 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:55:39,179 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:55:39,181 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:55:39,184 [DEBUG] openai._base_client: HTTP Response: GET https://api.openai.com/v1/models \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 07:55:39 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('openai-version', '2020-10-01'), ('x-request-id', '1e6dc2bf4213f80b492956c74129745f'), ('openai-processing-ms', '353'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=lGJr7UVxgaDN4HBlLITX3hjGQeXqf8MmO7ENb75MuR4-1742543739-1.0.1.1-eD8tCUx18oWut6fWHu02UdcjYqC46qA6wHnAqfb.G1O88xdXLLSnptXbqkEBF3NSfZxvpT4_vB1RRzNOL6jZ_VJ08qnkyVlZtfQePLLlvgg; path=/; expires=Fri, 21-Mar-25 08:25:39 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=jISO5ZxiQaGeAfqOUQOdrcmVaCqM3RDIsOf_LKzHB5A-1742543739496-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923be3618b64eb35-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:55:39,190 [DEBUG] openai._base_client: request_id: 1e6dc2bf4213f80b492956c74129745f\n", - "2025-03-21 00:55:39,209 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Fetched 65 models from OpenAI API\n", - "2025-03-21 00:55:39,213 [DEBUG] ember.core.registry.model.providers.openai.openai_discovery: Filtered to 43 relevant models\n", - "2025-03-21 00:55:39,218 [INFO] ember.core.registry.model.base.registry.discovery: Provider OpenAIDiscovery completed in 0.62s\n", - "2025-03-21 00:55:39,222 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 43 models from OpenAIDiscovery\n", - "2025-03-21 00:55:39,232 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 5 models from AnthropicDiscovery\n", - "2025-03-21 00:55:39,235 [INFO] ember.core.registry.model.base.registry.discovery: Successfully received 32 models from DeepmindDiscovery\n", - "2025-03-21 00:55:39,239 [INFO] ember.core.registry.model.base.registry.discovery: Discovered 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 00:55:39,245 [DEBUG] ember.core.registry.model.initialization: Raw discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 00:55:39,254 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-transcribe discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,262 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-transcribe\n", - "2025-03-21 00:55:39,268 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,272 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-12-17\n", - "2025-03-21 00:55:39,275 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-3 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,279 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-3\n", - "2025-03-21 00:55:39,281 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:dall-e-2 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,283 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:dall-e-2\n", - "2025-03-21 00:55:39,287 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,288 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview-2024-10-01\n", - "2025-03-21 00:55:39,290 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-10-01 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,292 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-10-01\n", - "2025-03-21 00:55:39,293 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,294 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-audio-preview\n", - "2025-03-21 00:55:39,298 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-large discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,299 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-large\n", - "2025-03-21 00:55:39,301 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,302 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4\n", - "2025-03-21 00:55:39,305 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-05-13 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,308 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-05-13\n", - "2025-03-21 00:55:39,310 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,311 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview\n", - "2025-03-21 00:55:39,312 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,314 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview\n", - "2025-03-21 00:55:39,316 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct-0914 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,318 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct-0914\n", - "2025-03-21 00:55:39,324 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,327 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview\n", - "2025-03-21 00:55:39,328 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-1106 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,330 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-1106\n", - "2025-03-21 00:55:39,331 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,332 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview\n", - "2025-03-21 00:55:39,334 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,335 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo\n", - "2025-03-21 00:55:39,337 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,340 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-realtime-preview-2024-12-17\n", - "2025-03-21 00:55:39,342 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-instruct discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,346 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-instruct\n", - "2025-03-21 00:55:39,347 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,349 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo\n", - "2025-03-21 00:55:39,350 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,352 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-preview\n", - "2025-03-21 00:55:39,354 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,355 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-search-preview-2025-03-11\n", - "2025-03-21 00:55:39,357 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,358 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview\n", - "2025-03-21 00:55:39,360 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-0125 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,362 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-0125\n", - "2025-03-21 00:55:39,363 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-08-06 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,365 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-08-06\n", - "2025-03-21 00:55:39,366 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-turbo-2024-04-09 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,369 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-turbo-2024-04-09\n", - "2025-03-21 00:55:39,370 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-3.5-turbo-16k discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,372 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-3.5-turbo-16k\n", - "2025-03-21 00:55:39,373 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,375 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o\n", - "2025-03-21 00:55:39,377 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-realtime-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,380 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-realtime-preview-2024-12-17\n", - "2025-03-21 00:55:39,381 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-1106-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,383 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-1106-preview\n", - "2025-03-21 00:55:39,385 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-ada-002 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,385 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-ada-002\n", - "2025-03-21 00:55:39,387 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0613 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,392 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0613\n", - "2025-03-21 00:55:39,393 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,395 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview\n", - "2025-03-21 00:55:39,396 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4.5-preview-2025-02-27 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,398 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4.5-preview-2025-02-27\n", - "2025-03-21 00:55:39,400 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-search-preview-2025-03-11 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,402 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-search-preview-2025-03-11\n", - "2025-03-21 00:55:39,404 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-2024-11-20 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,406 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-2024-11-20\n", - "2025-03-21 00:55:39,407 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-tts discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,409 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-tts\n", - "2025-03-21 00:55:39,411 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4-0125-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,412 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4-0125-preview\n", - "2025-03-21 00:55:39,415 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-transcribe discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,418 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-transcribe\n", - "2025-03-21 00:55:39,420 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-2024-07-18 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,421 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-2024-07-18\n", - "2025-03-21 00:55:39,423 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:text-embedding-3-small discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,425 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:text-embedding-3-small\n", - "2025-03-21 00:55:39,426 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,428 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini\n", - "2025-03-21 00:55:39,429 [WARNING] ember.core.registry.model.base.registry.discovery: Model openai:gpt-4o-mini-audio-preview-2024-12-17 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,430 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for openai:gpt-4o-mini-audio-preview-2024-12-17\n", - "2025-03-21 00:55:39,432 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-sonnet discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,434 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-sonnet\n", - "2025-03-21 00:55:39,435 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-opus discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,437 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-opus\n", - "2025-03-21 00:55:39,438 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3-haiku discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,440 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3-haiku\n", - "2025-03-21 00:55:39,441 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.5-sonnet discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,444 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.5-sonnet\n", - "2025-03-21 00:55:39,445 [WARNING] ember.core.registry.model.base.registry.discovery: Model anthropic:claude-3.7-sonnet discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,446 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for anthropic:claude-3.7-sonnet\n", - "2025-03-21 00:55:39,448 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.0-pro-vision-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,449 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.0-pro-vision-latest\n", - "2025-03-21 00:55:39,451 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-pro-vision discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,452 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-pro-vision\n", - "2025-03-21 00:55:39,454 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,457 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-latest\n", - "2025-03-21 00:55:39,459 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,461 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-001\n", - "2025-03-21 00:55:39,462 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro-002 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,465 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro-002\n", - "2025-03-21 00:55:39,469 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-pro discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,470 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-pro\n", - "2025-03-21 00:55:39,471 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,473 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-latest\n", - "2025-03-21 00:55:39,475 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,476 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001\n", - "2025-03-21 00:55:39,477 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-001-tuning discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,479 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-001-tuning\n", - "2025-03-21 00:55:39,480 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,482 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash\n", - "2025-03-21 00:55:39,485 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-002 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,486 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-002\n", - "2025-03-21 00:55:39,487 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,490 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b\n", - "2025-03-21 00:55:39,491 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,494 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-001\n", - "2025-03-21 00:55:39,496 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-latest discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,498 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-latest\n", - "2025-03-21 00:55:39,502 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0827 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,505 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0827\n", - "2025-03-21 00:55:39,506 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-1.5-flash-8b-exp-0924 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,508 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-1.5-flash-8b-exp-0924\n", - "2025-03-21 00:55:39,509 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,512 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp\n", - "2025-03-21 00:55:39,514 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,516 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash\n", - "2025-03-21 00:55:39,521 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,532 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-001\n", - "2025-03-21 00:55:39,536 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-exp-image-generation discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,539 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-exp-image-generation\n", - "2025-03-21 00:55:39,540 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-001 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,541 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-001\n", - "2025-03-21 00:55:39,543 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,547 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite\n", - "2025-03-21 00:55:39,549 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview-02-05 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,550 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview-02-05\n", - "2025-03-21 00:55:39,551 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-lite-preview discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,552 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-lite-preview\n", - "2025-03-21 00:55:39,554 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,557 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp\n", - "2025-03-21 00:55:39,559 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-pro-exp-02-05 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,560 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-pro-exp-02-05\n", - "2025-03-21 00:55:39,563 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-exp-1206 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,565 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-exp-1206\n", - "2025-03-21 00:55:39,567 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-01-21 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,569 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-01-21\n", - "2025-03-21 00:55:39,572 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,574 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp\n", - "2025-03-21 00:55:39,576 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemini-2.0-flash-thinking-exp-1219 discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,578 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemini-2.0-flash-thinking-exp-1219\n", - "2025-03-21 00:55:39,581 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/learnlm-1.5-pro-experimental discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,583 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/learnlm-1.5-pro-experimental\n", - "2025-03-21 00:55:39,585 [WARNING] ember.core.registry.model.base.registry.discovery: Model google:models/gemma-3-27b-it discovered via API but not in local config; using defaults with environment API key.\n", - "2025-03-21 00:55:39,587 [DEBUG] ember.core.registry.model.base.registry.discovery: Successfully merged model info for google:models/gemma-3-27b-it\n", - "2025-03-21 00:55:39,589 [DEBUG] ember.core.registry.model.initialization: Merged discovery found 80 models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 00:55:39,590 [INFO] ember.core.registry.model.initialization: Registering 80 models from discovery\n", - "2025-03-21 00:55:39,592 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-transcribe (provider: Openai)\n", - "2025-03-21 00:55:39,597 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", - "2025-03-21 00:55:39,599 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-transcribe with provider Openai\n", - "2025-03-21 00:55:39,602 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 00:55:39,604 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:55:39,605 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:55:39,611 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-3 (provider: Openai)\n", - "2025-03-21 00:55:39,612 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", - "2025-03-21 00:55:39,613 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-3 with provider Openai\n", - "2025-03-21 00:55:39,614 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:dall-e-2 (provider: Openai)\n", - "2025-03-21 00:55:39,615 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", - "2025-03-21 00:55:39,616 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:dall-e-2 with provider Openai\n", - "2025-03-21 00:55:39,617 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview-2024-10-01 (provider: Openai)\n", - "2025-03-21 00:55:39,618 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", - "2025-03-21 00:55:39,618 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview-2024-10-01 with provider Openai\n", - "2025-03-21 00:55:39,619 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-10-01 (provider: Openai)\n", - "2025-03-21 00:55:39,623 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", - "2025-03-21 00:55:39,624 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-10-01 with provider Openai\n", - "2025-03-21 00:55:39,632 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-audio-preview (provider: Openai)\n", - "2025-03-21 00:55:39,634 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", - "2025-03-21 00:55:39,635 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-audio-preview with provider Openai\n", - "2025-03-21 00:55:39,636 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-large (provider: Openai)\n", - "2025-03-21 00:55:39,637 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", - "2025-03-21 00:55:39,638 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-large with provider Openai\n", - "2025-03-21 00:55:39,639 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4 (provider: Openai)\n", - "2025-03-21 00:55:39,640 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", - "2025-03-21 00:55:39,640 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4 with provider Openai\n", - "2025-03-21 00:55:39,641 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-05-13 (provider: Openai)\n", - "2025-03-21 00:55:39,642 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", - "2025-03-21 00:55:39,643 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-05-13 with provider Openai\n", - "2025-03-21 00:55:39,644 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview (provider: Openai)\n", - "2025-03-21 00:55:39,646 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", - "2025-03-21 00:55:39,647 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview with provider Openai\n", - "2025-03-21 00:55:39,650 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview (provider: Openai)\n", - "2025-03-21 00:55:39,653 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", - "2025-03-21 00:55:39,655 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview with provider Openai\n", - "2025-03-21 00:55:39,656 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct-0914 (provider: Openai)\n", - "2025-03-21 00:55:39,657 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", - "2025-03-21 00:55:39,659 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct-0914 with provider Openai\n", - "2025-03-21 00:55:39,660 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview (provider: Openai)\n", - "2025-03-21 00:55:39,661 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", - "2025-03-21 00:55:39,662 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview with provider Openai\n", - "2025-03-21 00:55:39,663 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-1106 (provider: Openai)\n", - "2025-03-21 00:55:39,664 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", - "2025-03-21 00:55:39,666 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-1106 with provider Openai\n", - "2025-03-21 00:55:39,668 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview (provider: Openai)\n", - "2025-03-21 00:55:39,669 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", - "2025-03-21 00:55:39,671 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview with provider Openai\n", - "2025-03-21 00:55:39,673 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo (provider: Openai)\n", - "2025-03-21 00:55:39,674 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", - "2025-03-21 00:55:39,676 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo with provider Openai\n", - "2025-03-21 00:55:39,682 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-realtime-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 00:55:39,683 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:55:39,685 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:55:39,686 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-instruct (provider: Openai)\n", - "2025-03-21 00:55:39,687 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", - "2025-03-21 00:55:39,689 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-instruct with provider Openai\n", - "2025-03-21 00:55:39,695 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo (provider: Openai)\n", - "2025-03-21 00:55:39,697 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", - "2025-03-21 00:55:39,698 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo with provider Openai\n", - "2025-03-21 00:55:39,701 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-preview (provider: Openai)\n", - "2025-03-21 00:55:39,704 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", - "2025-03-21 00:55:39,705 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-preview with provider Openai\n", - "2025-03-21 00:55:39,707 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-search-preview-2025-03-11 (provider: Openai)\n", - "2025-03-21 00:55:39,708 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 00:55:39,709 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 00:55:39,711 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview (provider: Openai)\n", - "2025-03-21 00:55:39,712 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", - "2025-03-21 00:55:39,713 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview with provider Openai\n", - "2025-03-21 00:55:39,714 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-0125 (provider: Openai)\n", - "2025-03-21 00:55:39,723 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", - "2025-03-21 00:55:39,725 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-0125 with provider Openai\n", - "2025-03-21 00:55:39,726 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-08-06 (provider: Openai)\n", - "2025-03-21 00:55:39,728 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", - "2025-03-21 00:55:39,730 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-08-06 with provider Openai\n", - "2025-03-21 00:55:39,731 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-turbo-2024-04-09 (provider: Openai)\n", - "2025-03-21 00:55:39,733 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", - "2025-03-21 00:55:39,734 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-turbo-2024-04-09 with provider Openai\n", - "2025-03-21 00:55:39,737 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-3.5-turbo-16k (provider: Openai)\n", - "2025-03-21 00:55:39,738 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", - "2025-03-21 00:55:39,744 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-3.5-turbo-16k with provider Openai\n", - "2025-03-21 00:55:39,747 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o (provider: Openai)\n", - "2025-03-21 00:55:39,749 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", - "2025-03-21 00:55:39,750 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o with provider Openai\n", - "2025-03-21 00:55:39,756 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 00:55:39,758 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:55:39,759 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-realtime-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:55:39,762 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-1106-preview (provider: Openai)\n", - "2025-03-21 00:55:39,764 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", - "2025-03-21 00:55:39,765 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-1106-preview with provider Openai\n", - "2025-03-21 00:55:39,773 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-ada-002 (provider: Openai)\n", - "2025-03-21 00:55:39,777 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", - "2025-03-21 00:55:39,782 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-ada-002 with provider Openai\n", - "2025-03-21 00:55:39,783 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0613 (provider: Openai)\n", - "2025-03-21 00:55:39,788 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", - "2025-03-21 00:55:39,791 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0613 with provider Openai\n", - "2025-03-21 00:55:39,792 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview (provider: Openai)\n", - "2025-03-21 00:55:39,793 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", - "2025-03-21 00:55:39,794 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview with provider Openai\n", - "2025-03-21 00:55:39,796 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4.5-preview-2025-02-27 (provider: Openai)\n", - "2025-03-21 00:55:39,797 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", - "2025-03-21 00:55:39,798 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4.5-preview-2025-02-27 with provider Openai\n", - "2025-03-21 00:55:39,799 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-search-preview-2025-03-11 (provider: Openai)\n", - "2025-03-21 00:55:39,801 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 00:55:39,803 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-search-preview-2025-03-11 with provider Openai\n", - "2025-03-21 00:55:39,804 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-2024-11-20 (provider: Openai)\n", - "2025-03-21 00:55:39,805 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", - "2025-03-21 00:55:39,806 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-2024-11-20 with provider Openai\n", - "2025-03-21 00:55:39,807 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-tts (provider: Openai)\n", - "2025-03-21 00:55:39,810 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", - "2025-03-21 00:55:39,813 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-tts with provider Openai\n", - "2025-03-21 00:55:39,814 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4-0125-preview (provider: Openai)\n", - "2025-03-21 00:55:39,816 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", - "2025-03-21 00:55:39,818 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4-0125-preview with provider Openai\n", - "2025-03-21 00:55:39,819 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-transcribe (provider: Openai)\n", - "2025-03-21 00:55:39,820 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", - "2025-03-21 00:55:39,821 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-transcribe with provider Openai\n", - "2025-03-21 00:55:39,823 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-2024-07-18 (provider: Openai)\n", - "2025-03-21 00:55:39,824 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", - "2025-03-21 00:55:39,828 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-2024-07-18 with provider Openai\n", - "2025-03-21 00:55:39,829 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:text-embedding-3-small (provider: Openai)\n", - "2025-03-21 00:55:39,831 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", - "2025-03-21 00:55:39,833 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:text-embedding-3-small with provider Openai\n", - "2025-03-21 00:55:39,835 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini (provider: Openai)\n", - "2025-03-21 00:55:39,837 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", - "2025-03-21 00:55:39,838 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini with provider Openai\n", - "2025-03-21 00:55:39,838 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: openai:gpt-4o-mini-audio-preview-2024-12-17 (provider: Openai)\n", - "2025-03-21 00:55:39,840 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:55:39,842 [INFO] ember.core.registry.model.initialization: Successfully registered model: openai:gpt-4o-mini-audio-preview-2024-12-17 with provider Openai\n", - "2025-03-21 00:55:39,848 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-sonnet (provider: Anthropic)\n", - "2025-03-21 00:55:39,850 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", - "2025-03-21 00:55:39,851 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-sonnet with provider Anthropic\n", - "2025-03-21 00:55:39,853 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-opus (provider: Anthropic)\n", - "2025-03-21 00:55:39,854 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", - "2025-03-21 00:55:39,856 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-opus with provider Anthropic\n", - "2025-03-21 00:55:39,858 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3-haiku (provider: Anthropic)\n", - "2025-03-21 00:55:39,860 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", - "2025-03-21 00:55:39,861 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3-haiku with provider Anthropic\n", - "2025-03-21 00:55:39,862 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.5-sonnet (provider: Anthropic)\n", - "2025-03-21 00:55:39,862 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", - "2025-03-21 00:55:39,863 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.5-sonnet with provider Anthropic\n", - "2025-03-21 00:55:39,864 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: anthropic:claude-3.7-sonnet (provider: Anthropic)\n", - "2025-03-21 00:55:39,865 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", - "2025-03-21 00:55:39,866 [INFO] ember.core.registry.model.initialization: Successfully registered model: anthropic:claude-3.7-sonnet with provider Anthropic\n", - "2025-03-21 00:55:39,867 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.0-pro-vision-latest (provider: Google)\n", - "2025-03-21 00:55:39,868 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", - "2025-03-21 00:55:39,869 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.0-pro-vision-latest with provider Google\n", - "2025-03-21 00:55:39,869 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-pro-vision (provider: Google)\n", - "2025-03-21 00:55:39,870 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", - "2025-03-21 00:55:39,871 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-pro-vision with provider Google\n", - "2025-03-21 00:55:39,871 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-latest (provider: Google)\n", - "2025-03-21 00:55:39,873 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", - "2025-03-21 00:55:39,875 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-latest with provider Google\n", - "2025-03-21 00:55:39,878 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-001 (provider: Google)\n", - "2025-03-21 00:55:39,878 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", - "2025-03-21 00:55:39,879 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-001 with provider Google\n", - "2025-03-21 00:55:39,882 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro-002 (provider: Google)\n", - "2025-03-21 00:55:39,883 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", - "2025-03-21 00:55:39,884 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro-002 with provider Google\n", - "2025-03-21 00:55:39,885 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-pro (provider: Google)\n", - "2025-03-21 00:55:39,886 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", - "2025-03-21 00:55:39,886 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-pro with provider Google\n", - "2025-03-21 00:55:39,887 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-latest (provider: Google)\n", - "2025-03-21 00:55:39,888 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", - "2025-03-21 00:55:39,889 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-latest with provider Google\n", - "2025-03-21 00:55:39,889 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001 (provider: Google)\n", - "2025-03-21 00:55:39,890 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", - "2025-03-21 00:55:39,892 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001 with provider Google\n", - "2025-03-21 00:55:39,893 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-001-tuning (provider: Google)\n", - "2025-03-21 00:55:39,894 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", - "2025-03-21 00:55:39,895 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-001-tuning with provider Google\n", - "2025-03-21 00:55:39,896 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash (provider: Google)\n", - "2025-03-21 00:55:39,897 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", - "2025-03-21 00:55:39,902 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash with provider Google\n", - "2025-03-21 00:55:39,903 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-002 (provider: Google)\n", - "2025-03-21 00:55:39,904 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", - "2025-03-21 00:55:39,905 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-002 with provider Google\n", - "2025-03-21 00:55:39,906 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b (provider: Google)\n", - "2025-03-21 00:55:39,907 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", - "2025-03-21 00:55:39,908 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b with provider Google\n", - "2025-03-21 00:55:39,909 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-001 (provider: Google)\n", - "2025-03-21 00:55:39,912 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", - "2025-03-21 00:55:39,913 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-001 with provider Google\n", - "2025-03-21 00:55:39,914 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-latest (provider: Google)\n", - "2025-03-21 00:55:39,915 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", - "2025-03-21 00:55:39,916 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-latest with provider Google\n", - "2025-03-21 00:55:39,917 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0827 (provider: Google)\n", - "2025-03-21 00:55:39,918 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", - "2025-03-21 00:55:39,919 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0827 with provider Google\n", - "2025-03-21 00:55:39,920 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-1.5-flash-8b-exp-0924 (provider: Google)\n", - "2025-03-21 00:55:39,921 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", - "2025-03-21 00:55:39,922 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-1.5-flash-8b-exp-0924 with provider Google\n", - "2025-03-21 00:55:39,923 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp (provider: Google)\n", - "2025-03-21 00:55:39,925 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", - "2025-03-21 00:55:39,929 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp with provider Google\n", - "2025-03-21 00:55:39,930 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash (provider: Google)\n", - "2025-03-21 00:55:39,934 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", - "2025-03-21 00:55:39,935 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash with provider Google\n", - "2025-03-21 00:55:39,936 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-001 (provider: Google)\n", - "2025-03-21 00:55:39,937 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", - "2025-03-21 00:55:39,938 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-001 with provider Google\n", - "2025-03-21 00:55:39,939 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-exp-image-generation (provider: Google)\n", - "2025-03-21 00:55:39,940 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", - "2025-03-21 00:55:39,940 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-exp-image-generation with provider Google\n", - "2025-03-21 00:55:39,941 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-001 (provider: Google)\n", - "2025-03-21 00:55:39,942 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", - "2025-03-21 00:55:39,943 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-001 with provider Google\n", - "2025-03-21 00:55:39,944 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite (provider: Google)\n", - "2025-03-21 00:55:39,945 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", - "2025-03-21 00:55:39,946 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite with provider Google\n", - "2025-03-21 00:55:39,947 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview-02-05 (provider: Google)\n", - "2025-03-21 00:55:39,948 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", - "2025-03-21 00:55:39,952 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview-02-05 with provider Google\n", - "2025-03-21 00:55:39,953 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-lite-preview (provider: Google)\n", - "2025-03-21 00:55:39,954 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", - "2025-03-21 00:55:39,955 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-lite-preview with provider Google\n", - "2025-03-21 00:55:39,956 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp (provider: Google)\n", - "2025-03-21 00:55:39,958 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", - "2025-03-21 00:55:39,959 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp with provider Google\n", - "2025-03-21 00:55:39,960 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-pro-exp-02-05 (provider: Google)\n", - "2025-03-21 00:55:39,960 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", - "2025-03-21 00:55:39,961 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-pro-exp-02-05 with provider Google\n", - "2025-03-21 00:55:39,963 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-exp-1206 (provider: Google)\n", - "2025-03-21 00:55:39,963 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", - "2025-03-21 00:55:39,965 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-exp-1206 with provider Google\n", - "2025-03-21 00:55:39,966 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-01-21 (provider: Google)\n", - "2025-03-21 00:55:39,968 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", - "2025-03-21 00:55:39,969 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-01-21 with provider Google\n", - "2025-03-21 00:55:39,970 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp (provider: Google)\n", - "2025-03-21 00:55:39,970 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", - "2025-03-21 00:55:39,971 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp with provider Google\n", - "2025-03-21 00:55:39,972 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemini-2.0-flash-thinking-exp-1219 (provider: Google)\n", - "2025-03-21 00:55:39,972 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", - "2025-03-21 00:55:39,973 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemini-2.0-flash-thinking-exp-1219 with provider Google\n", - "2025-03-21 00:55:39,973 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/learnlm-1.5-pro-experimental (provider: Google)\n", - "2025-03-21 00:55:39,974 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", - "2025-03-21 00:55:39,976 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/learnlm-1.5-pro-experimental with provider Google\n", - "2025-03-21 00:55:39,977 [DEBUG] ember.core.registry.model.initialization: Attempting to register discovered model: google:models/gemma-3-27b-it (provider: Google)\n", - "2025-03-21 00:55:39,977 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", - "2025-03-21 00:55:39,980 [INFO] ember.core.registry.model.initialization: Successfully registered model: google:models/gemma-3-27b-it with provider Google\n", - "2025-03-21 00:55:39,981 [INFO] ember.core.registry.model.initialization: Registration summary: 80 new, 0 skipped, 0 failed\n", - "2025-03-21 00:55:39,983 [INFO] ember.core.registry.model.initialization: Successfully discovered and registered 80 new models: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n", - "2025-03-21 00:55:39,985 [INFO] ember.core.registry.model.initialization: Discovered 80 new models in 1.46s: ['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13'] and 70 more\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['openai:gpt-4o-mini-transcribe', 'openai:gpt-4o-audio-preview-2024-12-17', 'openai:dall-e-3', 'openai:dall-e-2', 'openai:gpt-4o-audio-preview-2024-10-01', 'openai:gpt-4o-realtime-preview-2024-10-01', 'openai:gpt-4o-audio-preview', 'openai:text-embedding-3-large', 'openai:gpt-4', 'openai:gpt-4o-2024-05-13', 'openai:gpt-4o-realtime-preview', 'openai:gpt-4o-mini-audio-preview', 'openai:gpt-3.5-turbo-instruct-0914', 'openai:gpt-4o-mini-search-preview', 'openai:gpt-3.5-turbo-1106', 'openai:gpt-4o-search-preview', 'openai:gpt-4-turbo', 'openai:gpt-4o-realtime-preview-2024-12-17', 'openai:gpt-3.5-turbo-instruct', 'openai:gpt-3.5-turbo', 'openai:gpt-4-turbo-preview', 'openai:gpt-4o-mini-search-preview-2025-03-11', 'openai:gpt-4o-mini-realtime-preview', 'openai:gpt-3.5-turbo-0125', 'openai:gpt-4o-2024-08-06', 'openai:gpt-4-turbo-2024-04-09', 'openai:gpt-3.5-turbo-16k', 'openai:gpt-4o', 'openai:gpt-4o-mini-realtime-preview-2024-12-17', 'openai:gpt-4-1106-preview', 'openai:text-embedding-ada-002', 'openai:gpt-4-0613', 'openai:gpt-4.5-preview', 'openai:gpt-4.5-preview-2025-02-27', 'openai:gpt-4o-search-preview-2025-03-11', 'openai:gpt-4o-2024-11-20', 'openai:gpt-4o-mini-tts', 'openai:gpt-4-0125-preview', 'openai:gpt-4o-transcribe', 'openai:gpt-4o-mini-2024-07-18', 'openai:text-embedding-3-small', 'openai:gpt-4o-mini', 'openai:gpt-4o-mini-audio-preview-2024-12-17', 'anthropic:claude-3-sonnet', 'anthropic:claude-3-opus', 'anthropic:claude-3-haiku', 'anthropic:claude-3.5-sonnet', 'anthropic:claude-3.7-sonnet', 'google:models/gemini-1.0-pro-vision-latest', 'google:models/gemini-pro-vision', 'google:models/gemini-1.5-pro-latest', 'google:models/gemini-1.5-pro-001', 'google:models/gemini-1.5-pro-002', 'google:models/gemini-1.5-pro', 'google:models/gemini-1.5-flash-latest', 'google:models/gemini-1.5-flash-001', 'google:models/gemini-1.5-flash-001-tuning', 'google:models/gemini-1.5-flash', 'google:models/gemini-1.5-flash-002', 'google:models/gemini-1.5-flash-8b', 'google:models/gemini-1.5-flash-8b-001', 'google:models/gemini-1.5-flash-8b-latest', 'google:models/gemini-1.5-flash-8b-exp-0827', 'google:models/gemini-1.5-flash-8b-exp-0924', 'google:models/gemini-2.0-flash-exp', 'google:models/gemini-2.0-flash', 'google:models/gemini-2.0-flash-001', 'google:models/gemini-2.0-flash-exp-image-generation', 'google:models/gemini-2.0-flash-lite-001', 'google:models/gemini-2.0-flash-lite', 'google:models/gemini-2.0-flash-lite-preview-02-05', 'google:models/gemini-2.0-flash-lite-preview', 'google:models/gemini-2.0-pro-exp', 'google:models/gemini-2.0-pro-exp-02-05', 'google:models/gemini-exp-1206', 'google:models/gemini-2.0-flash-thinking-exp-01-21', 'google:models/gemini-2.0-flash-thinking-exp', 'google:models/gemini-2.0-flash-thinking-exp-1219', 'google:models/learnlm-1.5-pro-experimental', 'google:models/gemma-3-27b-it']\n" - ] - } - ], - "source": [ - "model_registry = initialize_registry()\n", - "print(model_registry.list_models())\n", - "llm = ModelService(registry=model_registry)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "text/plain": [ - "['openai:gpt-4o-mini-transcribe',\n", - " 'openai:gpt-4o-audio-preview-2024-12-17',\n", - " 'openai:dall-e-3',\n", - " 'openai:dall-e-2',\n", - " 'openai:gpt-4o-audio-preview-2024-10-01',\n", - " 'openai:gpt-4o-realtime-preview-2024-10-01',\n", - " 'openai:gpt-4o-audio-preview',\n", - " 'openai:text-embedding-3-large',\n", - " 'openai:gpt-4',\n", - " 'openai:gpt-4o-2024-05-13',\n", - " 'openai:gpt-4o-realtime-preview',\n", - " 'openai:gpt-4o-mini-audio-preview',\n", - " 'openai:gpt-3.5-turbo-instruct-0914',\n", - " 'openai:gpt-4o-mini-search-preview',\n", - " 'openai:gpt-3.5-turbo-1106',\n", - " 'openai:gpt-4o-search-preview',\n", - " 'openai:gpt-4-turbo',\n", - " 'openai:gpt-4o-realtime-preview-2024-12-17',\n", - " 'openai:gpt-3.5-turbo-instruct',\n", - " 'openai:gpt-3.5-turbo',\n", - " 'openai:gpt-4-turbo-preview',\n", - " 'openai:gpt-4o-mini-search-preview-2025-03-11',\n", - " 'openai:gpt-4o-mini-realtime-preview',\n", - " 'openai:gpt-3.5-turbo-0125',\n", - " 'openai:gpt-4o-2024-08-06',\n", - " 'openai:gpt-4-turbo-2024-04-09',\n", - " 'openai:gpt-3.5-turbo-16k',\n", - " 'openai:gpt-4o',\n", - " 'openai:gpt-4o-mini-realtime-preview-2024-12-17',\n", - " 'openai:gpt-4-1106-preview',\n", - " 'openai:text-embedding-ada-002',\n", - " 'openai:gpt-4-0613',\n", - " 'openai:gpt-4.5-preview',\n", - " 'openai:gpt-4.5-preview-2025-02-27',\n", - " 'openai:gpt-4o-search-preview-2025-03-11',\n", - " 'openai:gpt-4o-2024-11-20',\n", - " 'openai:gpt-4o-mini-tts',\n", - " 'openai:gpt-4-0125-preview',\n", - " 'openai:gpt-4o-transcribe',\n", - " 'openai:gpt-4o-mini-2024-07-18',\n", - " 'openai:text-embedding-3-small',\n", - " 'openai:gpt-4o-mini',\n", - " 'openai:gpt-4o-mini-audio-preview-2024-12-17',\n", - " 'anthropic:claude-3-sonnet',\n", - " 'anthropic:claude-3-opus',\n", - " 'anthropic:claude-3-haiku',\n", - " 'anthropic:claude-3.5-sonnet',\n", - " 'anthropic:claude-3.7-sonnet',\n", - " 'google:models/gemini-1.0-pro-vision-latest',\n", - " 'google:models/gemini-pro-vision',\n", - " 'google:models/gemini-1.5-pro-latest',\n", - " 'google:models/gemini-1.5-pro-001',\n", - " 'google:models/gemini-1.5-pro-002',\n", - " 'google:models/gemini-1.5-pro',\n", - " 'google:models/gemini-1.5-flash-latest',\n", - " 'google:models/gemini-1.5-flash-001',\n", - " 'google:models/gemini-1.5-flash-001-tuning',\n", - " 'google:models/gemini-1.5-flash',\n", - " 'google:models/gemini-1.5-flash-002',\n", - " 'google:models/gemini-1.5-flash-8b',\n", - " 'google:models/gemini-1.5-flash-8b-001',\n", - " 'google:models/gemini-1.5-flash-8b-latest',\n", - " 'google:models/gemini-1.5-flash-8b-exp-0827',\n", - " 'google:models/gemini-1.5-flash-8b-exp-0924',\n", - " 'google:models/gemini-2.0-flash-exp',\n", - " 'google:models/gemini-2.0-flash',\n", - " 'google:models/gemini-2.0-flash-001',\n", - " 'google:models/gemini-2.0-flash-exp-image-generation',\n", - " 'google:models/gemini-2.0-flash-lite-001',\n", - " 'google:models/gemini-2.0-flash-lite',\n", - " 'google:models/gemini-2.0-flash-lite-preview-02-05',\n", - " 'google:models/gemini-2.0-flash-lite-preview',\n", - " 'google:models/gemini-2.0-pro-exp',\n", - " 'google:models/gemini-2.0-pro-exp-02-05',\n", - " 'google:models/gemini-exp-1206',\n", - " 'google:models/gemini-2.0-flash-thinking-exp-01-21',\n", - " 'google:models/gemini-2.0-flash-thinking-exp',\n", - " 'google:models/gemini-2.0-flash-thinking-exp-1219',\n", - " 'google:models/learnlm-1.5-pro-experimental',\n", - " 'google:models/gemma-3-27b-it']" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "model_registry.list_models()" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [], - "source": [ - "model_ids: List[str] = [\n", - " \"openai:o1\",\n", - " \"openai:gpt-4o\",\n", - " \"openai:gpt-4o-mini\",\n", - " # \"anthropic:claude-3.5-sonnet\", # API key not working\n", - " # \"invalid:model\", # Expected to trigger an error.\n", - " # \"google:model/gemini-1.5-pro\", # need to fix model alignment\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:56:34,200 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", - "2025-03-21 00:56:34,201 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o' using provider class 'OpenAIModel'.\n", - "2025-03-21 00:56:34,202 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o\n", - "2025-03-21 00:56:34,203 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:56:34,205 [DEBUG] httpx: load_ssl_context verify=True cert=None trust_env=True http2=False\n", - "2025-03-21 00:56:34,208 [DEBUG] httpx: load_verify_locations cafile='/root/anaconda3/envs/ember_upgrade/lib/python3.11/site-packages/certifi/cacert.pem'\n", - "2025-03-21 00:56:34,228 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:56:34,230 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:56:34,232 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", - "2025-03-21 00:56:34,244 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 00:56:34,246 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "➡️ Testing model: openai:o1\n", - "❌ Error with model openai:o1: Model 'openai:o1' not found. Available models:\n", - "- openai:gpt-4o-mini-transcribe\n", - "- openai:gpt-4o-audio-preview-2024-12-17\n", - "- openai:dall-e-3\n", - "- openai:dall-e-2\n", - "- openai:gpt-4o-audio-preview-2024-10-01\n", - "- openai:gpt-4o-realtime-preview-2024-10-01\n", - "- openai:gpt-4o-audio-preview\n", - "- openai:text-embedding-3-large\n", - "- openai:gpt-4\n", - "- openai:gpt-4o-2024-05-13\n", - "- openai:gpt-4o-realtime-preview\n", - "- openai:gpt-4o-mini-audio-preview\n", - "- openai:gpt-3.5-turbo-instruct-0914\n", - "- openai:gpt-4o-mini-search-preview\n", - "- openai:gpt-3.5-turbo-1106\n", - "- openai:gpt-4o-search-preview\n", - "- openai:gpt-4-turbo\n", - "- openai:gpt-4o-realtime-preview-2024-12-17\n", - "- openai:gpt-3.5-turbo-instruct\n", - "- openai:gpt-3.5-turbo\n", - "- openai:gpt-4-turbo-preview\n", - "- openai:gpt-4o-mini-search-preview-2025-03-11\n", - "- openai:gpt-4o-mini-realtime-preview\n", - "- openai:gpt-3.5-turbo-0125\n", - "- openai:gpt-4o-2024-08-06\n", - "- openai:gpt-4-turbo-2024-04-09\n", - "- openai:gpt-3.5-turbo-16k\n", - "- openai:gpt-4o\n", - "- openai:gpt-4o-mini-realtime-preview-2024-12-17\n", - "- openai:gpt-4-1106-preview\n", - "- openai:text-embedding-ada-002\n", - "- openai:gpt-4-0613\n", - "- openai:gpt-4.5-preview\n", - "- openai:gpt-4.5-preview-2025-02-27\n", - "- openai:gpt-4o-search-preview-2025-03-11\n", - "- openai:gpt-4o-2024-11-20\n", - "- openai:gpt-4o-mini-tts\n", - "- openai:gpt-4-0125-preview\n", - "- openai:gpt-4o-transcribe\n", - "- openai:gpt-4o-mini-2024-07-18\n", - "- openai:text-embedding-3-small\n", - "- openai:gpt-4o-mini\n", - "- openai:gpt-4o-mini-audio-preview-2024-12-17\n", - "- anthropic:claude-3-sonnet\n", - "- anthropic:claude-3-opus\n", - "- anthropic:claude-3-haiku\n", - "- anthropic:claude-3.5-sonnet\n", - "- anthropic:claude-3.7-sonnet\n", - "- google:models/gemini-1.0-pro-vision-latest\n", - "- google:models/gemini-pro-vision\n", - "- google:models/gemini-1.5-pro-latest\n", - "- google:models/gemini-1.5-pro-001\n", - "- google:models/gemini-1.5-pro-002\n", - "- google:models/gemini-1.5-pro\n", - "- google:models/gemini-1.5-flash-latest\n", - "- google:models/gemini-1.5-flash-001\n", - "- google:models/gemini-1.5-flash-001-tuning\n", - "- google:models/gemini-1.5-flash\n", - "- google:models/gemini-1.5-flash-002\n", - "- google:models/gemini-1.5-flash-8b\n", - "- google:models/gemini-1.5-flash-8b-001\n", - "- google:models/gemini-1.5-flash-8b-latest\n", - "- google:models/gemini-1.5-flash-8b-exp-0827\n", - "- google:models/gemini-1.5-flash-8b-exp-0924\n", - "- google:models/gemini-2.0-flash-exp\n", - "- google:models/gemini-2.0-flash\n", - "- google:models/gemini-2.0-flash-001\n", - "- google:models/gemini-2.0-flash-exp-image-generation\n", - "- google:models/gemini-2.0-flash-lite-001\n", - "- google:models/gemini-2.0-flash-lite\n", - "- google:models/gemini-2.0-flash-lite-preview-02-05\n", - "- google:models/gemini-2.0-flash-lite-preview\n", - "- google:models/gemini-2.0-pro-exp\n", - "- google:models/gemini-2.0-pro-exp-02-05\n", - "- google:models/gemini-exp-1206\n", - "- google:models/gemini-2.0-flash-thinking-exp-01-21\n", - "- google:models/gemini-2.0-flash-thinking-exp\n", - "- google:models/gemini-2.0-flash-thinking-exp-1219\n", - "- google:models/learnlm-1.5-pro-experimental\n", - "- google:models/gemma-3-27b-it\n", - "➡️ Testing model: openai:gpt-4o\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:56:34,480 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 00:56:34,482 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:56:34,483 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:56:34,484 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:56:34,486 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:56:34,486 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:56:36,126 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:56:36 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1233'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_f8552ee7d3cda74089c40b09a667a11c'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=K4JnKFgR4hjVklhiYpYOBJWq_aNM03jL.3b6rr6i.mI-1742543796-1.0.1.1-M9iB0xiqdmYVjTNsePN_lcGUzDMaDs3HHtxqi3P9SRCyRCxwewtZvhwXe8FHpQtpTUYayHQJCuirL1j0CbPyHmViUXtbAht3Pk3Tq0dZjVQ; path=/; expires=Fri, 21-Mar-25 08:26:36 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'X-Content-Type-Options', b'nosniff'), (b'Set-Cookie', b'_cfuvid=TpdB9iIHIQ7IEdkiMOoQVOJlXDWtypVFJQW_pjMp3no-1742543796441-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be4bdafb57e21-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:56:36,131 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:56:36,135 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:56:36,145 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:56:36,147 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:56:36,149 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:56:36,151 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers([('date', 'Fri, 21 Mar 2025 07:56:36 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('access-control-expose-headers', 'X-Request-ID'), ('openai-organization', 'user-iqhmndueuqg2ljzblqkr2tgh'), ('openai-processing-ms', '1233'), ('openai-version', '2020-10-01'), ('x-ratelimit-limit-requests', '50000'), ('x-ratelimit-limit-tokens', '150000000'), ('x-ratelimit-remaining-requests', '49999'), ('x-ratelimit-remaining-tokens', '149999987'), ('x-ratelimit-reset-requests', '1ms'), ('x-ratelimit-reset-tokens', '0s'), ('x-request-id', 'req_f8552ee7d3cda74089c40b09a667a11c'), ('strict-transport-security', 'max-age=31536000; includeSubDomains; preload'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=K4JnKFgR4hjVklhiYpYOBJWq_aNM03jL.3b6rr6i.mI-1742543796-1.0.1.1-M9iB0xiqdmYVjTNsePN_lcGUzDMaDs3HHtxqi3P9SRCyRCxwewtZvhwXe8FHpQtpTUYayHQJCuirL1j0CbPyHmViUXtbAht3Pk3Tq0dZjVQ; path=/; expires=Fri, 21-Mar-25 08:26:36 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('x-content-type-options', 'nosniff'), ('set-cookie', '_cfuvid=TpdB9iIHIQ7IEdkiMOoQVOJlXDWtypVFJQW_pjMp3no-1742543796441-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '923be4bdafb57e21-SJC'), ('content-encoding', 'br'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:56:36,153 [DEBUG] openai._base_client: request_id: req_f8552ee7d3cda74089c40b09a667a11c\n", - "2025-03-21 00:56:36,172 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:56:36,178 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:56:36,181 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:56:36,185 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:56:36,192 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:56:36,194 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:56:36,197 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:56:36,199 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "🛎️ Service response from openai:gpt-4o:\n", - "Quantum computing leverages the principles of quantum mechanics to process information. Unlike classical bits, quantum bits (qubits) can exist in multiple states simultaneously (superposition). This enables quantum computers to perform complex calculations at unprecedented speeds, addressing problems in cryptography, optimization, and simulations beyond the capabilities of classical computers.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:56:36,509 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:56:36 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'248'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_6bcbc261c6340def00989716c3350a3c'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be4c85ba77e21-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:56:36,511 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:56:36,512 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:56:36,514 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:56:36,515 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:56:36,516 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:56:36,518 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:56:36 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '248', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_6bcbc261c6340def00989716c3350a3c', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923be4c85ba77e21-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:56:36,519 [DEBUG] openai._base_client: request_id: req_6bcbc261c6340def00989716c3350a3c\n", - "2025-03-21 00:56:36,520 [WARNING] ember.core.registry.model.base.registry.factory: Provider name case mismatch: 'Openai' vs 'OpenAI'. Using the registered provider.\n", - "2025-03-21 00:56:36,521 [DEBUG] ember.core.registry.model.base.registry.factory: Creating model 'openai:gpt-4o-mini' using provider class 'OpenAIModel'.\n", - "2025-03-21 00:56:36,522 [INFO] ember.core.registry.model.initialization: Instantiated model: openai:gpt-4o-mini\n", - "2025-03-21 00:56:36,524 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:56:36,531 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Explain quantum computing in 50 words'}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:56:36,533 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:56:36,535 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:56:36,536 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:56:36,537 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:56:36,541 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:56:36,542 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "🎯 Direct response from openai:gpt-4o:\n", - "The capital of France is Paris.\n", - "\n", - "➡️ Testing model: openai:gpt-4o-mini\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:56:38,149 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:56:38 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'1525'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999987'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_86c2355bc4104f135311da7c5d4abdfa'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be4ca7e3e7e21-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:56:38,152 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:56:38,155 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:56:38,159 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:56:38,161 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:56:38,163 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:56:38,164 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:56:38 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '1525', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999987', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_86c2355bc4104f135311da7c5d4abdfa', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923be4ca7e3e7e21-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:56:38,166 [DEBUG] openai._base_client: request_id: req_86c2355bc4104f135311da7c5d4abdfa\n", - "2025-03-21 00:56:38,168 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:56:38,177 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': \"What's the capital of France?\"}], 'model': 'gpt-4o-mini', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:56:38,180 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:56:38,182 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:56:38,184 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:56:38,186 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:56:38,188 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:56:38,190 [DEBUG] httpcore.http11: receive_response_headers.started request=\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "🛎️ Service response from openai:gpt-4o-mini:\n", - "Quantum computing leverages quantum bits (qubits) to perform calculations at unprecedented speeds. Unlike classical bits, qubits can exist in multiple states simultaneously due to superposition and can be entangled, allowing for complex problem-solving. This technology has the potential to revolutionize fields like cryptography, optimization, and drug discovery.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:56:38,657 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:56:38 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'412'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'30000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'29999'), (b'x-ratelimit-remaining-tokens', b'149999990'), (b'x-ratelimit-reset-requests', b'2ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_58b051d94e4dc3bd6a99637af1dbae60'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be4d4cac57e21-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:56:38,659 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:56:38,660 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:56:38,672 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:56:38,673 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:56:38,674 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:56:38,676 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:56:38 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '412', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '30000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '29999', 'x-ratelimit-remaining-tokens': '149999990', 'x-ratelimit-reset-requests': '2ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_58b051d94e4dc3bd6a99637af1dbae60', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923be4d4cac57e21-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:56:38,677 [DEBUG] openai._base_client: request_id: req_58b051d94e4dc3bd6a99637af1dbae60\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "🎯 Direct response from openai:gpt-4o-mini:\n", - "The capital of France is Paris.\n", - "\n" - ] - } - ], - "source": [ - "for model_id in model_ids:\n", - " try:\n", - " print(f\"➡️ Testing model: {model_id}\")\n", - "\n", - " # Two usage styles are demonstrated below:\n", - " # 1. Service-based invocation: Recommended for automatic usage tracking.\n", - " service_response: ChatResponse = llm.invoke_model(\n", - " model_id=model_id,\n", - " prompt=\"Explain quantum computing in 50 words\",\n", - " )\n", - " print(f\"🛎️ Service response from {model_id}:\\n{service_response.data}\\n\")\n", - "\n", - " # 2. Direct model instance usage: Useful for more granular or PyTorch-like workflows.\n", - " model = load_model(model_id=model_id, registry=model_registry)\n", - " direct_response: ChatResponse = model(\n", - " prompt=\"What's the capital of France?\"\n", - " )\n", - " print(f\"🎯 Direct response from {model_id}:\\n{direct_response.data}\\n\")\n", - "\n", - " except Exception as error:\n", - " print(f\"❌ Error with model {model_id}: {str(error)}\")\n", - " continue\n" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-03-21 00:57:15,048 [INFO] ember.core.registry.model.providers.openai.openai_provider: OpenAI forward invoked\n", - "2025-03-21 00:57:15,059 [DEBUG] openai._base_client: Request options: {'method': 'post', 'url': '/chat/completions', 'timeout': 30, 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'Hello!'}], 'model': 'gpt-4o', 'max_completion_tokens': 512, 'temperature': None}}\n", - "2025-03-21 00:57:15,062 [DEBUG] openai._base_client: Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n", - "2025-03-21 00:57:15,065 [DEBUG] httpcore.connection: close.started\n", - "2025-03-21 00:57:15,067 [DEBUG] httpcore.connection: close.complete\n", - "2025-03-21 00:57:15,069 [DEBUG] httpcore.connection: connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=30 socket_options=None\n", - "2025-03-21 00:57:15,095 [DEBUG] httpcore.connection: connect_tcp.complete return_value=\n", - "2025-03-21 00:57:15,098 [DEBUG] httpcore.connection: start_tls.started ssl_context= server_hostname='api.openai.com' timeout=30\n", - "2025-03-21 00:57:15,117 [DEBUG] httpcore.connection: start_tls.complete return_value=\n", - "2025-03-21 00:57:15,120 [DEBUG] httpcore.http11: send_request_headers.started request=\n", - "2025-03-21 00:57:15,124 [DEBUG] httpcore.http11: send_request_headers.complete\n", - "2025-03-21 00:57:15,126 [DEBUG] httpcore.http11: send_request_body.started request=\n", - "2025-03-21 00:57:15,129 [DEBUG] httpcore.http11: send_request_body.complete\n", - "2025-03-21 00:57:15,131 [DEBUG] httpcore.http11: receive_response_headers.started request=\n", - "2025-03-21 00:57:15,874 [DEBUG] httpcore.http11: receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Fri, 21 Mar 2025 07:57:16 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-expose-headers', b'X-Request-ID'), (b'openai-organization', b'user-iqhmndueuqg2ljzblqkr2tgh'), (b'openai-processing-ms', b'700'), (b'openai-version', b'2020-10-01'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'150000000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'149999996'), (b'x-ratelimit-reset-requests', b'1ms'), (b'x-ratelimit-reset-tokens', b'0s'), (b'x-request-id', b'req_e5f04f92ab9b2a181e6951edb196397d'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains; preload'), (b'cf-cache-status', b'DYNAMIC'), (b'X-Content-Type-Options', b'nosniff'), (b'Server', b'cloudflare'), (b'CF-RAY', b'923be5bbacd9cfbc-SJC'), (b'Content-Encoding', b'br'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n", - "2025-03-21 00:57:15,875 [INFO] httpx: HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-03-21 00:57:15,876 [DEBUG] httpcore.http11: receive_response_body.started request=\n", - "2025-03-21 00:57:15,878 [DEBUG] httpcore.http11: receive_response_body.complete\n", - "2025-03-21 00:57:15,879 [DEBUG] httpcore.http11: response_closed.started\n", - "2025-03-21 00:57:15,881 [DEBUG] httpcore.http11: response_closed.complete\n", - "2025-03-21 00:57:15,882 [DEBUG] openai._base_client: HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers({'date': 'Fri, 21 Mar 2025 07:57:16 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'access-control-expose-headers': 'X-Request-ID', 'openai-organization': 'user-iqhmndueuqg2ljzblqkr2tgh', 'openai-processing-ms': '700', 'openai-version': '2020-10-01', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '150000000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '149999996', 'x-ratelimit-reset-requests': '1ms', 'x-ratelimit-reset-tokens': '0s', 'x-request-id': 'req_e5f04f92ab9b2a181e6951edb196397d', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'cf-cache-status': 'DYNAMIC', 'x-content-type-options': 'nosniff', 'server': 'cloudflare', 'cf-ray': '923be5bbacd9cfbc-SJC', 'content-encoding': 'br', 'alt-svc': 'h3=\":443\"; ma=86400'})\n", - "2025-03-21 00:57:15,886 [DEBUG] openai._base_client: request_id: req_e5f04f92ab9b2a181e6951edb196397d\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hello! How can I assist you today?\n" - ] - } - ], - "source": [ - "response = llm(prompt=\"Hello!\", model_id=\"openai:gpt-4o\")\n", - "print(response.data)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "---\n", - "\n", - "## Neural Similarity Scoring - Cosine Similarity (WIP)\n", - "\n", - "- from `src/ember/core/utils/embedding_utils.py`\n", - "- from jason" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", - "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" - ] - } - ], - "source": [ - "%pip install -q openai" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [], - "source": [ - "from __future__ import annotations\n", - "\n", - "from abc import ABC, abstractmethod\n", - "from typing import List, Protocol\n", - "import math\n", - "\n", - "import openai\n", - "import os\n", - "\n", - "\n", - "################################################################\n", - "# 1) Embedding Model Interfaces & Implementations\n", - "################################################################\n", - "\n", - "\n", - "class EmbeddingModel(Protocol):\n", - " \"\"\"Interface for embedding models.\n", - "\n", - " This protocol defines the minimal interface required to compute a text\n", - " embedding. Implementations may use local models, external APIs, or custom\n", - " neural networks.\n", - "\n", - " Methods:\n", - " embed_text: Compute the embedding for a given text.\n", - " \"\"\"\n", - "\n", - " def embed_text(self, text: str) -> List[float]:\n", - " \"\"\"Computes the embedding vector for the provided text.\n", - "\n", - " Args:\n", - " text (str): The text to be embedded.\n", - "\n", - " Returns:\n", - " List[float]: A list of floats representing the embedding vector.\n", - " \"\"\"\n", - " ...\n", - "\n", - "class Text_Embedding_3_EmbeddingModel(EmbeddingModel):\n", - " \"\"\"Interface for embedding models.\n", - "\n", - " This protocol defines the minimal interface required to compute a text\n", - " embedding. Implementations may use local models, external APIs, or custom\n", - " neural networks.\n", - "\n", - " Methods:\n", - " embed_text: Compute the embedding for a given text.\n", - " \"\"\"\n", - "\n", - " def embed_text(self, text: str) -> List[float]:\n", - " \"\"\"Computes the embedding vector for the provided text.\n", - "\n", - " Args:\n", - " text (str): The text to be embedded.\n", - "\n", - " Returns:\n", - " List[float]: A list of floats representing the embedding vector.\n", - " \"\"\"\n", - " response = llm(model_id=\"openai:text-embedding-3-large\", prompt=text)\n", - " return response.data\n", - "\n", - "\n", - "class MockEmbeddingModel:\n", - " \"\"\"Mock implementation of an embedding model using naive ASCII encoding.\n", - "\n", - " This simple model converts each character in the text to a normalized ASCII\n", - " value. It is intended solely for demonstration and testing purposes.\n", - "\n", - " Methods:\n", - " embed_text: Converts text to a sequence of normalized ASCII values.\n", - " \"\"\"\n", - "\n", - " def embed_text(self, text: str) -> List[float]:\n", - " \"\"\"Embeds text by converting each character to its normalized ASCII code.\n", - "\n", - " Args:\n", - " text (str): The input text to be embedded.\n", - "\n", - " Returns:\n", - " List[float]: A list of floats representing the embedding. Returns an\n", - " empty list if the text is empty.\n", - " \"\"\"\n", - " if not text:\n", - " return []\n", - " return [ord(ch) / 256.0 for ch in text]\n", - "\n", - "\n", - "################################################################\n", - "# 2) Similarity Metric Interface & Implementations\n", - "################################################################\n", - "\n", - "\n", - "class SimilarityMetric(ABC):\n", - " \"\"\"Abstract base class for computing similarity between embedding vectors.\n", - "\n", - " Subclasses must implement the similarity method to calculate a similarity\n", - " score between two vectors.\n", - " \"\"\"\n", - "\n", - " @abstractmethod\n", - " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", - " \"\"\"Calculates the similarity between two embedding vectors.\n", - "\n", - " Args:\n", - " vec_a (List[float]): The first embedding vector.\n", - " vec_b (List[float]): The second embedding vector.\n", - "\n", - " Returns:\n", - " float: The similarity score, typically in the range [0, 1] or [-1, 1].\n", - " \"\"\"\n", - " ...\n", - "\n", - "\n", - "class CosineSimilarity(SimilarityMetric):\n", - " \"\"\"Implementation of cosine similarity for embedding vectors.\n", - "\n", - " The cosine similarity is defined as:\n", - " similarity(a, b) = (a · b) / (||a|| * ||b||)\n", - "\n", - " Returns 0.0 if either vector is empty or if any vector's norm is zero.\n", - " \"\"\"\n", - "\n", - " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", - " \"\"\"Computes cosine similarity between two embedding vectors.\n", - "\n", - " Args:\n", - " vec_a (List[float]): The first embedding vector.\n", - " vec_b (List[float]): The second embedding vector.\n", - "\n", - " Returns:\n", - " float: The cosine similarity score.\n", - " \"\"\"\n", - " if not vec_a or not vec_b:\n", - " return 0.0\n", - "\n", - " dot_product: float = sum(a * b for a, b in zip(vec_a, vec_b))\n", - " norm_a: float = math.sqrt(sum(a * a for a in vec_a))\n", - " norm_b: float = math.sqrt(sum(b * b for b in vec_b))\n", - " if norm_a == 0 or norm_b == 0:\n", - " return 0.0\n", - "\n", - " return dot_product / (norm_a * norm_b)\n", - "\n", - "\n", - "################################################################\n", - "# 3) High-Level Utility Function\n", - "################################################################\n", - "\n", - "\n", - "def calculate_text_similarity(\n", - " text1: str, text2: str, model: EmbeddingModel, metric: SimilarityMetric\n", - ") -> float:\n", - " \"\"\"Calculates text similarity using an embedding model and a similarity metric.\n", - "\n", - " This function generates embeddings for the provided texts and then computes a\n", - " similarity score using the given similarity metric.\n", - "\n", - " Args:\n", - " text1 (str): The first text string.\n", - " text2 (str): The second text string.\n", - " model (EmbeddingModel): An instance conforming to the embedding model interface.\n", - " metric (SimilarityMetric): An instance implementing a similarity metric.\n", - "\n", - " Returns:\n", - " float: The computed similarity score.\n", - " \"\"\"\n", - " embedding1: List[float] = model.embed_text(text=text1)\n", - " embedding2: List[float] = model.embed_text(text=text2)\n", - " return metric.similarity(vec_a=embedding1, vec_b=embedding2)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'mock_model' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[8], line 8\u001b[0m\n\u001b[1;32m 4\u001b[0m text_a: \u001b[38;5;28mstr\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHello world!\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 5\u001b[0m text_b: \u001b[38;5;28mstr\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHello, world??\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 7\u001b[0m score: \u001b[38;5;28mfloat\u001b[39m \u001b[38;5;241m=\u001b[39m calculate_text_similarity(\n\u001b[0;32m----> 8\u001b[0m text1\u001b[38;5;241m=\u001b[39mtext_a, text2\u001b[38;5;241m=\u001b[39mtext_b, model\u001b[38;5;241m=\u001b[39mmock_model, metric\u001b[38;5;241m=\u001b[39mcosine\n\u001b[1;32m 9\u001b[0m )\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSimilarity between \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_a\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m and \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext_b\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mscore\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n", - "\u001b[0;31mNameError\u001b[0m: name 'mock_model' is not defined" - ] - } - ], - "source": [ - "mock_model: Text_Embedding_3_EmbeddingModel = Text_Embedding_3_EmbeddingModel()\n", - "cosine: CosineSimilarity = CosineSimilarity()\n", - "\n", - "text_a: str = \"Hello world!\"\n", - "text_b: str = \"Hello, world??\"\n", - "\n", - "score: float = calculate_text_similarity(\n", - " text1=text_a, text2=text_b, model=mock_model, metric=cosine\n", - ")\n", - "print(f\"Similarity between '{text_a}' and '{text_b}': {score}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "---\n", - "\n", - "## Compression Ratio (WIP)\n", - "\n", - "- from `src/ember/core/utils/eval/evaluators.py`\n", - "- from connor" - ] - }, - { - "cell_type": "code", - "execution_count": 71, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Note: you may need to restart the kernel to use updated packages.\n", - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], - "source": [ - "%pip install -q diversity==0.2.0\n", - "%pip install -q spacy==3.8.4" - ] - }, - { - "cell_type": "code", - "execution_count": 110, - "metadata": {}, - "outputs": [], - "source": [ - "from __future__ import annotations\n", - "\n", - "import re\n", - "import subprocess\n", - "from typing import Any, Dict, TypeVar, Optional, List, Generic, Callable, Union\n", - "\n", - "from ember.core.utils.eval.base_evaluator import IEvaluator, EvaluationResult\n", - "from ember.core.utils.eval.extractors import RegexExtractor\n", - "\n", - "from diversity import compression_ratio\n", - "\n", - "T_out = TypeVar(\"T_out\")\n", - "T_truth = TypeVar(\"T_truth\")\n", - "\n", - "\n", - "class ComposedEvaluator(IEvaluator[T_out, T_truth], Generic[T_out, T_truth]):\n", - " \"\"\"Combines an output extractor with an evaluator for the extracted data.\n", - "\n", - " This evaluator first transforms the system output using the provided extractor,\n", - " then evaluates the extracted value using the specified base evaluator.\n", - "\n", - " Args:\n", - " extractor: An object with an `extract` method to process the system output.\n", - " base_evaluator (IEvaluator): An evaluator that processes the extracted output.\n", - "\n", - " Returns:\n", - " EvaluationResult: The result of the evaluation.\n", - " \"\"\"\n", - "\n", - " def __init__(\n", - " self,\n", - " extractor: Any, # Expecting an extractor with an `extract` method.\n", - " base_evaluator: IEvaluator[Any, Any],\n", - " ) -> None:\n", - " self.extractor = extractor\n", - " self.base_evaluator = base_evaluator\n", - "\n", - " def evaluate(\n", - " self, system_output: T_out, correct_answer: Any, **kwargs: Any\n", - " ) -> EvaluationResult:\n", - " \"\"\"Evaluates the provided system output against the correct answer.\n", - "\n", - " Args:\n", - " system_output (T_out): The raw output generated by the system.\n", - " correct_answer (Any): The expected correct answer.\n", - " **kwargs: Additional keyword arguments for extraction or evaluation.\n", - "\n", - " Returns:\n", - " EvaluationResult: The result of evaluating the extracted value.\n", - " \"\"\"\n", - " extracted_value = self.extractor.extract(system_output, **kwargs)\n", - " return self.base_evaluator.evaluate(extracted_value, correct_answer, **kwargs)\n", - "\n", - "\n", - "# Basic Evaluators\n", - "\n", - "\n", - "class ExactMatchEvaluator(IEvaluator[str, str]):\n", - " \"\"\"Evaluator to check for an exact match between two strings,\n", - " ignoring differences in whitespace and case.\n", - "\n", - " Example:\n", - " evaluator = ExactMatchEvaluator()\n", - " result = evaluator.evaluate(\"Hello World\", \"hello world\")\n", - "\n", - " Args:\n", - " compare_fn (Optional[Callable[[str, str], bool]]): Optional custom comparison function.\n", - " If not provided, strings are normalized (whitespace removed, lowercase) before comparison.\n", - "\n", - " Returns:\n", - " EvaluationResult: The result containing a correctness flag and a score.\n", - " \"\"\"\n", - "\n", - " def __init__(self, compare_fn: Optional[Callable[[str, str], bool]] = None) -> None:\n", - " self.compare_fn = compare_fn or self._default_compare\n", - "\n", - " def _default_compare(self, str1: str, str2: str) -> bool:\n", - " \"\"\"Default string comparison function that ignores case and whitespace.\n", - "\n", - " Args:\n", - " str1 (str): First string to compare\n", - " str2 (str): Second string to compare\n", - "\n", - " Returns:\n", - " bool: True if strings match after normalization\n", - " \"\"\"\n", - " return str1.strip().lower() == str2.strip().lower()\n", - "\n", - " def evaluate(\n", - " self, system_output: str, correct_answer: str, **kwargs: Any\n", - " ) -> EvaluationResult:\n", - " \"\"\"Evaluates whether a system output exactly matches the correct answer.\n", - "\n", - " Args:\n", - " system_output (str): The system-generated string.\n", - " correct_answer (str): The expected answer string.\n", - " **kwargs: Additional keyword arguments (unused).\n", - "\n", - " Returns:\n", - " EvaluationResult: An object with `is_correct` set to True if the normalized strings match,\n", - " along with a corresponding score.\n", - " \"\"\"\n", - " is_correct = self.compare_fn(system_output, correct_answer)\n", - " score = 1.0 if is_correct else 0.0\n", - " return EvaluationResult(is_correct=is_correct, score=score)\n", - "\n", - "class DiversityScoringEvaluator(IEvaluator[List[str], None]):\n", - " \"\"\"\n", - " Evaluator to test ensemble outputs -> score them (float)\n", - " \"\"\"\n", - " def evaluate(\n", - " self, \n", - " system_output: List[str], \n", - " **kwargs) -> EvaluationResult:\n", - " if system_output is None or len(system_output) == 0:\n", - " return EvaluationResult(is_correct=False, score=-1)\n", - "\n", - " # current compression ratio formula\n", - " # TODO: update scoring function to make it better\n", - " # -> like use token count\n", - "\n", - " # example I was thinking about:\n", - " letter_sum = sum(len(response) for response in system_output)\n", - " # ratio = compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100)\n", - " ratio = compression_ratio(system_output, algorithm='gzip',verbose=True)\n", - " return EvaluationResult(is_correct=True,score=ratio,metadata = {'responses': system_output})\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Edit Distance (WIP)\n", - "- from kathleen" - ] - }, - { - "cell_type": "code", - "execution_count": 74, - "metadata": { - "collapsed": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], - "source": [ - "%pip install -q python-Levenshtein" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import Levenshtein\n", - "from typing import List\n", - "from dataclasses import dataclass\n", - "\n", - "@dataclass\n", - "class EvaluationResult:\n", - " is_correct: bool\n", - " score: float\n", - " metadata: dict\n", - "\n", - "class EditDistanceScoringEvaluator:\n", - "\n", - " def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult:\n", - " if system_output is None or len(system_output) == 0:\n", - " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", - "\n", - " diversity_score = self.compute_distance(system_output)\n", - "\n", - " return EvaluationResult(\n", - " is_correct=True, \n", - " score=diversity_score,\n", - " metadata={'responses': system_output}\n", - " )\n", - "\n", - " def compute_distance(self, outputs: List[str]) -> float:\n", - " n = len(outputs)\n", - " if n < 2:\n", - " return 0.0\n", - "\n", - " total_distance = 0\n", - " pairs = 0\n", - "\n", - " for i in range(n):\n", - " for j in range(i + 1, n):\n", - " dist = Levenshtein.distance(outputs[i], outputs[j])\n", - " max_len = max(len(outputs[i]), len(outputs[j]))\n", - " normalized_dist = dist / max_len if max_len > 0 else 0 \n", - " total_distance += normalized_dist\n", - " pairs += 1\n", - " \n", - " return total_distance / pairs if pairs > 0 else 0.0\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Diversity Score: 0.8301\n", - "Is Correct: True\n", - "Metadata: {'responses': ['hi there', 'hi', 'hello', 'yo whatup']}\n" - ] - } - ], - "source": [ - "distance_evaluator = EditDistanceScoringEvaluator()\n", - "\n", - "# input_strs = [\n", - "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", - "# \"fej89qw098efjq29f38j0938j20f398jqwe098fjq98wf -- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", - "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", - "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", - "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", - "# ]\n", - "\n", - "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", - "\n", - "# input_strs = [\"This is a sample text with lots of repetition.\", \n", - "# \"This is a sample text with lots of repetition.\",\n", - "# \"This is a sample text with lots of repetition.\"]\n", - "\n", - "edit_distance = distance_evaluator.evaluate(input_strs)\n", - "\n", - "print(f\"Diversity Score: {edit_distance.score:.4f}\")\n", - "print(f\"Is Correct: {edit_distance.is_correct}\")\n", - "print(f\"Metadata: {edit_distance.metadata}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Novelty Score\n", - "- need to merge" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import List\n", - "from dataclasses import dataclass\n", - "import numpy as np\n", - "\n", - "@dataclass\n", - "class EvaluationResult:\n", - " is_correct: bool\n", - " score: float\n", - " metadata: dict\n", - "\n", - "class NoveltyScoringEvaluator:\n", - " \n", - " def evaluate(self, model: EmbeddingModel, system_output: List[str], **kwargs) -> EvaluationResult:\n", - " if not system_output or len(system_output) == 0:\n", - " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", - "\n", - " novelty_scores = [self.compute_novelty(r, system_output[:i]) for i, r in enumerate(system_output)]\n", - "\n", - " avg_novelty = sum(novelty_scores) / len(novelty_scores) if novelty_scores else 0.0\n", - "\n", - " return EvaluationResult(\n", - " is_correct=True,\n", - " score=avg_novelty,\n", - " metadata={'responses': system_output, 'novelty_scores': novelty_scores}\n", - " )\n", - "\n", - " def compute_novelty(self, response: str, prior_responses: List[str]) -> float:\n", - " if not prior_responses:\n", - " return 1.0\n", - "\n", - " new_embedding = self.model.embed_text(response)\n", - " prior_embeddings = [self.model.embed_text(r) for r in prior_responses]\n", - "\n", - " similarities = [\n", - " np.dot(new_embedding, prior_embedding) /\n", - " (np.linalg.norm(new_embedding) * np.linalg.norm(prior_embedding))\n", - " for prior_embedding in prior_embeddings\n", - " ]\n", - "\n", - " return 1 - max(similarities)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "EvaluationResult(is_correct=True, score=0.08368770360509659, metadata={'responses': ['Hello world!', 'Hi there!', 'Goodbye!']})\n" - ] - } - ], - "source": [ - "novelty_evaluator = NoveltyScoringEvaluator()\n", - "\n", - "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", - "\n", - "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", - "novelty = novelty_evaluator.evaluate(mock_model, input_strs)\n", - "\n", - "print(f\"Diversity Score: {novelty.score:.4f}\")\n", - "print(f\"Is Correct: {novelty.is_correct}\")\n", - "print(f\"Metadata: {novelty.metadata}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "---\n", - "\n", - "## Putting it all together" - ] - }, - { - "cell_type": "code", - "execution_count": 171, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", - "cosine: CosineSimilarity = CosineSimilarity()\n", - "exact_evaluator = ExactMatchEvaluator()\n", - "diversity_evaluator = DiversityScoringEvaluator()\n", - "edit_dist_evaluator = EditDistanceScoringEvaluator()\n", - "\n", - "def ensemble_diversity(strings):\n", - " compression = diversity_evaluator.evaluate(strings)\n", - " print(\"DiversityScoringEvaluator result:\", compression)\n", - " print(\"1/compression: \", 1/compression.score)\n", - " scores = list()\n", - " for ind1 in range(len(strings)):\n", - " ind2 = ind1+1 if ind1+1 != len(strings) else 0\n", - " curr_score = calculate_text_similarity(text1=strings[ind1], text2=strings[ind2], model=mock_model, metric=cosine)\n", - " # print(f\"SimilarityScore between ind1={ind1} and ind2={ind2}: {curr_score}\")\n", - " scores.append(curr_score)\n", - " avg_score = np.average(scores)\n", - " print(f\"Avg cosine similarity: {avg_score}\")\n", - " print(f\"diversity cosine-sim inverse: {1-avg_score}\")\n", - " edit_distance = edit_dist_evaluator.evaluate(strings)\n", - " print(f\"edit-dist score: {edit_distance.score:.4f}\")\n", - " print(\"-------------------------------\")\n", - " print(f\"possible diversity score (higher is better): {((1 - avg_score) + (min(1/compression.score, 1)) + edit_distance.score)/3}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Original Size: 140\n", - "Compressed Size: 103\n", - "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.359, metadata={'responses': ['This is a sample text with lots of repetition.', 'This is a sample text with lots of repetition.', 'This is a sample text with lots of repetition.']})\n", - "1/compression: 0.7358351729212657\n", - "Avg cosine similarity: 1.0\n", - "diversity cosine-sim inverse: 0.0\n", - "edit-dist score: 0.0000\n", - "-------------------------------\n", - "possible diversity score (higher is better): 0.24527839097375523\n" - ] - } - ], - "source": [ - "input_strs = []\n", - "scores = []\n", - "input_strs.append([\n", - " \"Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", - " \"In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", - " \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", - " \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", - " \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", - "])\n", - "# input_strs.append([\"hi there\", \"hi\", \"hello\", \"yo whatup\"])\n", - "\n", - "input_strs.append([\"This is a sample text with lots of repetition.\", \n", - " \"This is a sample text with lots of repetition.\",\n", - " \"This is a sample text with lots of repetition.\"])\n", - "\n", - "responses = []\n", - "for i in range(10):\n", - " res = llm(prompt=\"Tell me a funny joke. Keep it concise.\", model_id=\"openai:gpt-4o\").data.replace(\"\\n\", \"\")\n", - " responses.append(res)\n", - " print(f\"Joke {i}: [{res}]\")\n", - "\n", - "responses = []\n", - "res = llm(prompt=\"Tell me 10 different jokes. make them split with \\'||\\'. Don't say anything else besides the joke. \", model_id=\"openai:gpt-4o\").data.replace(\"\\n\", \"\").split('||')\n", - "responses += res\n", - "\n", - "\n", - "ensemble_diversity(input_strs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Joke 0: [Why don’t skeletons fight each other? They don’t have the guts. ]\n", - "Joke 1: [ Why did the scarecrow win an award? Because he was outstanding in his field. ]\n", - "Joke 2: [ Parallel lines have so much in common. It’s a shame they’ll never meet. ]\n", - "Joke 3: [ I told my wife she was drawing her eyebrows too high. She looked surprised. ]\n", - "Joke 4: [ I threw a boomerang a few years ago. I now live in constant fear. ]\n", - "Joke 5: [ Why don’t scientists trust atoms? Because they make up everything. ]\n", - "Joke 6: [ I told my computer I needed a break, and it gave me a Kit-Kat. ]\n", - "Joke 7: [ Why did the tomato turn red? Because it saw the salad dressing! ]\n", - "Joke 8: [ Did you hear about the cheese factory that exploded in France? There was nothing left but de-brie. ]\n", - "Joke 9: [ What’s orange and sounds like a parrot? A carrot.]\n", - "-----\n", - "Original Size: 727\n", - "Compressed Size: 470\n", - "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.547, metadata={'responses': ['Why don’t skeletons fight each other? They don’t have the guts. ', ' Why did the scarecrow win an award? Because he was outstanding in his field. ', ' Parallel lines have so much in common. It’s a shame they’ll never meet. ', ' I told my wife she was drawing her eyebrows too high. She looked surprised. ', ' I threw a boomerang a few years ago. I now live in constant fear. ', ' Why don’t scientists trust atoms? Because they make up everything. ', ' I told my computer I needed a break, and it gave me a Kit-Kat. ', ' Why did the tomato turn red? Because it saw the salad dressing! ', ' Did you hear about the cheese factory that exploded in France? There was nothing left but de-brie. ', ' What’s orange and sounds like a parrot? A carrot.']})\n", - "1/compression: 0.6464124111182935\n", - "Avg cosine similarity: 0.3749999878695394\n", - "diversity cosine-sim inverse: 0.6250000121304606\n", - "edit-dist score: 0.7371\n", - "-------------------------------\n", - "possible diversity score (higher is better): 0.6695073326061808\n" - ] - } - ], - "source": [ - "responses = []\n", - "res = llm(prompt=\"Tell me 10 different jokes. make them split with \\'||\\'. Don't say anything else besides the joke. \", model_id=\"openai:gpt-4o\").data.replace(\"\\n\", \"\").split('||')\n", - "responses += res\n", - "\n", - "if prompts == 1 and len(responses) > 1:\n", - " for i in range(len(responses)):\n", - " print(f\"Joke {i}: [{responses[i]}]\")\n", - "\n", - "print(\"-----\")\n", - "ensemble_diversity(responses)" - ] - }, - { - "cell_type": "code", - "execution_count": 179, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Story 0: [In a realm where atoms dance on quantum stages, quantum computers emerge. Unlike classical bits that stand as ones or zeroes, their quantum siblings, qubits, perform an intricate ballet of superposition. Entanglement binds qubits in ghostly embrace, whispering answers across realms. A programmer conjures an algorithm, setting the qubits into mesmerizing motion. As quantum gates usher them through a tapestry of parallel universes, qubits explore myriad solutions simultaneously. Amidst the quantum haze, decoherence threatens their delicate existence. Yet, as the dance concludes, a singular truth crystallizes, offering solutions with speed and power beyond classical comprehension.]\n", - "Story 1: [Under the golden sun, a family of bunnies frolicked in the meadow, their soft fur kissed by the gentle breeze. Snowball, with her long ears twitching, led the way through a maze of daisies. Hopscotch, her brother, leaped high, disappearing momentarily among the swaying grass. Little Thumper giggled, his tiny feet thumping in delight. The meadow was alive with laughter as the bunnies chased butterflies, their joy infectious. A curious robin joined, fluttering above, adding a chorus to their playful romp. As the sun dipped below the horizon, a serene hush settled, with the bunnies cuddled close, dreaming of tomorrow's adventures.]\n", - "Story 2: [Once upon a time, Pikachu embarked on a journey through the Enchanted Forest, known for its mystical glow. Eager to explore, Pikachu bounded over sparkling streams and under vibrant canopies. Along the path, it encountered a lost Charmander, its flame dim. Pikachu, determined to help, sparked tiny bursts of electricity to guide Charmander. Together, they made it to the Healing Spring, rejuvenating Charmander and enabling its flame to blaze bright again. Grateful, Charmander joined Pikachu, and the duo continued their adventure, forging a bond of friendship that lit up even the darkest trails ahead. Thus, their adventure began anew.]\n", - "Story 3: [In the heart of Tokyo, nestled between bustling streets, stood Haruto's ramen shop. The little shop was famed for its rich miso ramen, a recipe passed down through generations. Each morning, Haruto would rise before dawn, crafting the broth with meticulous care. Locals and tourists alike lined up eagerly, savoring the aroma wafting through the air. Young Akira, a regular since childhood, adored the warmth of each bowl. One chilly winter, Haruto, now gray-haired, smiled at Akira, handing him the ladle. With a nod, Haruto retired, knowing the flavors and memories had found a new guardian in Akira.]\n", - "-----\n", - "Original Size: 2565\n", - "Compressed Size: 1374\n", - "DiversityScoringEvaluator result: EvaluationResult(is_correct=True, score=1.867, metadata={'responses': ['In a realm where atoms dance on quantum stages, quantum computers emerge. Unlike classical bits that stand as ones or zeroes, their quantum siblings, qubits, perform an intricate ballet of superposition. Entanglement binds qubits in ghostly embrace, whispering answers across realms. A programmer conjures an algorithm, setting the qubits into mesmerizing motion. As quantum gates usher them through a tapestry of parallel universes, qubits explore myriad solutions simultaneously. Amidst the quantum haze, decoherence threatens their delicate existence. Yet, as the dance concludes, a singular truth crystallizes, offering solutions with speed and power beyond classical comprehension.', \"Under the golden sun, a family of bunnies frolicked in the meadow, their soft fur kissed by the gentle breeze. Snowball, with her long ears twitching, led the way through a maze of daisies. Hopscotch, her brother, leaped high, disappearing momentarily among the swaying grass. Little Thumper giggled, his tiny feet thumping in delight. The meadow was alive with laughter as the bunnies chased butterflies, their joy infectious. A curious robin joined, fluttering above, adding a chorus to their playful romp. As the sun dipped below the horizon, a serene hush settled, with the bunnies cuddled close, dreaming of tomorrow's adventures.\", 'Once upon a time, Pikachu embarked on a journey through the Enchanted Forest, known for its mystical glow. Eager to explore, Pikachu bounded over sparkling streams and under vibrant canopies. Along the path, it encountered a lost Charmander, its flame dim. Pikachu, determined to help, sparked tiny bursts of electricity to guide Charmander. Together, they made it to the Healing Spring, rejuvenating Charmander and enabling its flame to blaze bright again. Grateful, Charmander joined Pikachu, and the duo continued their adventure, forging a bond of friendship that lit up even the darkest trails ahead. Thus, their adventure began anew.', \"In the heart of Tokyo, nestled between bustling streets, stood Haruto's ramen shop. The little shop was famed for its rich miso ramen, a recipe passed down through generations. Each morning, Haruto would rise before dawn, crafting the broth with meticulous care. Locals and tourists alike lined up eagerly, savoring the aroma wafting through the air. Young Akira, a regular since childhood, adored the warmth of each bowl. One chilly winter, Haruto, now gray-haired, smiled at Akira, handing him the ladle. With a nod, Haruto retired, knowing the flavors and memories had found a new guardian in Akira.\"]})\n", - "1/compression: 0.5356186395286556\n", - "Avg cosine similarity: 0.8809454989311085\n", - "diversity cosine-sim inverse: 0.11905450106889148\n", - "edit-dist score: 0.7669\n", - "-------------------------------\n", - "possible diversity score (higher is better): 0.47384919069532816\n" - ] - } - ], - "source": [ - "responses = []\n", - "res1 = llm(prompt=\"Tell me a story about how quantum computers work. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", - "responses.append(res1)\n", - "res2 = llm(prompt=\"Tell me a story about bunnies frolicking in the grass. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", - "responses.append(res2)\n", - "res3 = llm(prompt=\"Tell me a story about the pokemon pikachu and it's adventures. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", - "responses.append(res3)\n", - "res4 = llm(prompt=\"Tell me a story about a ramen shop. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", - "responses.append(res4)\n", - "\n", - "if prompts == 1 and len(responses) > 1:\n", - " for i in range(len(responses)):\n", - " print(f\"Story {i}: [{responses[i]}]\")\n", - "\n", - "print(\"-----\")\n", - "ensemble_diversity(responses)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "---\n", - "## Improvements TODO\n", - "- Merge all functions\n", - "- fix ensembling\n", - "## Potential other cases to explore\n", - "- work ensembling all \"diversity\" related metrics \n", - " - add more metrics\n", - " - tune added metrics\n", - "- combination of validation/hallucination metric + ensembled diversity metric -> score" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "ember_upgrade", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/src/ember/core/registry/model/providers/openai/openai_provider.py b/src/ember/core/registry/model/providers/openai/openai_provider.py index b4896732..f0d85eac 100644 --- a/src/ember/core/registry/model/providers/openai/openai_provider.py +++ b/src/ember/core/registry/model/providers/openai/openai_provider.py @@ -333,6 +333,7 @@ def _prune_unsupported_params( kwargs.pop("temperature") return kwargs + # TODO: Fix embedding model structure def _is_embedding_model(self, model_name: str) -> bool: return model_name.startswith("text-embedding-") diff --git a/src/ember/core/utils/embedding_utils.py b/src/ember/core/utils/embedding_utils.py index 365035b7..c5eba46f 100644 --- a/src/ember/core/utils/embedding_utils.py +++ b/src/ember/core/utils/embedding_utils.py @@ -4,6 +4,9 @@ from abc import ABC, abstractmethod from typing import List, Protocol +# TODO: Fix embedding model structure +from ember.core.registry.model.base.services.model_service import ModelService + ################################################################ # 1) Embedding Model Interfaces & Implementations ################################################################ @@ -56,6 +59,29 @@ def embed_text(self, text: str) -> List[float]: return [] return [ord(ch) / 256.0 for ch in text] +# TODO: Fix embedding model structure +class Text_Embedding_Ada_002_Model: + """Interface for embedding models. + + This protocol defines the minimal interface required to compute a text + embedding. Implementations may use local models, external APIs, or custom + neural networks. + + Methods: + embed_text: Compute the embedding for a given text. + """ + + def embed_text(self, llm: ModelService, text: str) -> List[float]: + """Computes the embedding vector for the provided text. + + Args: + text (str): The text to be embedded. + + Returns: + List[float]: A list of floats representing the embedding vector. + """ + response = llm(model_id="openai:text-embedding-ada-002", prompt=text) + return response.embedding class OpenAITextEmbedding3(Protocol): """Interface for embedding models. diff --git a/src/ember/core/utils/eval/evaluators.py b/src/ember/core/utils/eval/evaluators.py index 140786a9..4f35c0c8 100644 --- a/src/ember/core/utils/eval/evaluators.py +++ b/src/ember/core/utils/eval/evaluators.py @@ -1,13 +1,15 @@ from __future__ import annotations import subprocess -from typing import Any, Callable, Generic, Optional, TypeVar +from typing import Any, List, Callable, Generic, Optional, TypeVar from .base_evaluator import EvaluationResult, IEvaluator from .extractors import RegexExtractor from diversity import compression_ratio import Levenshtein +import numpy as np +from ember.core.utils.embedding_utils import EmbeddingModel T_out = TypeVar("T_out") T_truth = TypeVar("T_truth") @@ -217,14 +219,14 @@ def evaluate( # example I was thinking about: letter_sum = sum(len(response) for response in system_output) - ratio = compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100) + ratio = 1/compression_ratio(system_output, algorithm="gzip") * min(1, len(system_output)/5) * min(1, letter_sum/100) return EvaluationResult(is_correct=True, score=ratio, metadata = {'responses': system_output}) -class CompressionRatioDiversityEvaluator(IEvaluator[List[str], None]): +class DiversityCompressionEvaluator(IEvaluator[List[str], None]): """ Evaluator to test ensemble outputs -> score them (float) """ @@ -235,29 +237,25 @@ def evaluate( if system_output is None or len(system_output) == 0: return EvaluationResult(is_correct=False, score=-1) - # example I was thinking about: + # current compression ratio formula - scaled by min num of words (5 words) + min num of chars (min 100) letter_sum = sum(len(response) for response in system_output) - ratio = compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100) + ratio = 1/compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100) + return EvaluationResult(is_correct=True,score=ratio,metadata = {'responses': system_output}) - return EvaluationResult(is_correct=True, - score=ratio, - metadata = {'responses': system_output}) - -class EditDistanceScoringEvaluator: +class DiversityEditDistanceEvaluator: - def evaluate( - self, - system_output: List[str], - **kwargs) -> EvaluationResult: + def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult: if system_output is None or len(system_output) == 0: return EvaluationResult(is_correct=False, score=-1, metadata={}) diversity_score = self.compute_distance(system_output) - return EvaluationResult(is_correct=True, - score=diversity_score, - metadata = {'responses': system_output}) + return EvaluationResult( + is_correct=True, + score=diversity_score, + metadata={'responses': system_output} + ) def compute_distance(self, outputs: List[str]) -> float: n = len(outputs) @@ -277,6 +275,37 @@ def compute_distance(self, outputs: List[str]) -> float: return total_distance / pairs if pairs > 0 else 0.0 +class DiversityNoveltyEvaluator: + + def evaluate(self, model: EmbeddingModel, system_output: List[str], **kwargs) -> EvaluationResult: + if not system_output or len(system_output) == 0: + return EvaluationResult(is_correct=False, score=-1, metadata={}) + + novelty_scores = [self.compute_novelty(model, r, system_output[:i]) for i, r in enumerate(system_output)] + + avg_novelty = sum(novelty_scores) / len(novelty_scores) if novelty_scores else 0.0 + + return EvaluationResult( + is_correct=True, + score=avg_novelty, + metadata={'responses': system_output, 'novelty_scores': novelty_scores} + ) + + def compute_novelty(self, model: EmbeddingModel, response: str, prior_responses: List[str]) -> float: + if not prior_responses: + return 1.0 + + new_embedding = model.embed_text(response) + prior_embeddings = [model.embed_text(r) for r in prior_responses] + + similarities = [ + np.dot(new_embedding, prior_embedding) / + (np.linalg.norm(new_embedding) * np.linalg.norm(prior_embedding)) + for prior_embedding in prior_embeddings + ] + + return 1 - max(similarities) + class MultipleChoiceEvaluator(IEvaluator[str, str]): """Evaluator to check if a system output contains the correct multiple-choice answer. diff --git a/src/ember/examples/diversity_testbench.ipynb b/src/ember/examples/diversity_testbench.ipynb index 9cfbfc03..d184d1fa 100644 --- a/src/ember/examples/diversity_testbench.ipynb +++ b/src/ember/examples/diversity_testbench.ipynb @@ -28,30 +28,27 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "NOTE: things below this are to install required dependencies (do this in the virtual env)" + "NOTE: things below this are to install required dependencies (do this in the virtual env)\n", + "\n", + "This may require running these commaned to setup your venv first:\n", + "- https://github.com/jaredquincy/ember/blob/main/INSTALLATION_GUIDE.md\n", + "- `uv venv`\n", + "- `uv pip install ember-ai`\n", + "- `uv pip install pip`\n", + "- `source .vemv/bin/activate`\n", + "\n", + "Plus, add your OpenAI API key -> environ\n", + "- `export OPENAI_API_KEY=` in the terminal\n", + "- `os.environ[\"OPENAI_API_KEY\"] = ` in ipynb cell" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ - "# %pip install -q -e .\n", - "# %pip install -q google-generativeai==0.7.2\n", - "\n", - "# embedding model dependencies\n", - "# %pip install -q openai\n", - "\n", - "# compression ratio dependencies\n", - "%pip install -q diversity==0.2.0\n", - "%pip install -q spacy==3.8.4\n", - "\n", - "# edit distance\n", - "%pip install -q python-Levenshtein\n", - "\n", - "# ensemble example\n", - "%pip install -q matplotlib" + "import sys, os, logging" ] }, { @@ -60,38 +57,12 @@ "metadata": {}, "outputs": [], "source": [ - "# basic imports & dependencies\n", - "from __future__ import annotations\n", - "\n", - "import logging, sys, os, math, re, subprocess\n", - "from typing import Dict, Any, List, Protocol, TypeVar, Optional, Generic, Callable, Union\n", - "from abc import ABC, abstractmethod\n", - "\n", - "from diversity import compression_ratio\n", - "import Levenshtein\n", - "from dataclasses import dataclass\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "\n", - "\n", - "\n", - "# ember repo loads\n", - "from ember.core.registry.model.config.settings import initialize_registry\n", - "from ember.core.registry.model.base.services.model_service import ModelService\n", - "from ember.core.registry.model.base.schemas.model_info import ModelInfo\n", - "from ember.core.registry.model.base.schemas.cost import ModelCost, RateLimit\n", - "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", - "\n", - "from ember.core.registry.model import load_model, ChatResponse\n", - "from ember.core.registry.model.base.services.model_service import ModelService\n", - "\n", - "from ember.core.utils.eval.base_evaluator import IEvaluator, EvaluationResult\n", - "from ember.core.utils.eval.extractors import RegexExtractor" + "# < enter api keys here! >" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -107,70 +78,93 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "mounting to root directory of ember" + "Install dependencies" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "/root/ember/jared/ember\n" + "\u001b[2mUsing Python 3.11.9 environment at: /Users/concon/research/ember-branch/ember/.venv\u001b[0m\n", + "\u001b[2mAudited \u001b[1m1 package\u001b[0m \u001b[2min 10ms\u001b[0m\u001b[0m\n" ] } ], "source": [ - "# fixing dependencies if current path is /src/ember/examples/diversity_testbench.ipynb\n", - "target_dir = 'src/ember/examples'\n", - "if os.getcwd()[-18:] == target_dir:\n", - " os.chdir('../../..')\n", - "print(os.getcwd())\n", - "\n", - "project_root = os.path.abspath(os.path.join(os.getcwd(), \"../../..\"))\n", - "if project_root not in sys.path:\n", - " sys.path.insert(0, project_root)" + "# !uv pip install -q -e .\n", + "!uv pip install -q google-generativeai==0.7.2\n", + "\n", + "# embedding model dependencies\n", + "!uv pip install -q openai\n", + "\n", + "# compression ratio dependencies\n", + "!uv pip install diversity==0.2.0\n", + "!uv pip install -q spacy==3.8.4\n", + "\n", + "# edit distance\n", + "!uv pip install -q python-Levenshtein\n", + "\n", + "# ensemble example\n", + "!uv pip install -q matplotlib" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 59, + "metadata": {}, + "outputs": [], + "source": [ + "# basic imports & dependencies\n", + "from __future__ import annotations\n", + "\n", + "# class definitions used in ember repo\n", + "import math, re, subprocess\n", + "from typing import Dict, Any, List, Protocol, TypeVar, Optional, Generic, Callable, Union\n", + "from abc import ABC, abstractmethod\n", + "\n", + "# compression related items\n", + "from diversity import compression_ratio\n", + "import Levenshtein\n", + "from dataclasses import dataclass\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import pandas as pd\n", + "\n", + "# ember repo loads\n", + "from ember.core.registry.model.config.settings import initialize_registry\n", + "from ember.core.registry.model.base.services.model_service import ModelService\n", + "from ember.core.registry.model.base.schemas.model_info import ModelInfo\n", + "from ember.core.registry.model.base.schemas.cost import ModelCost, RateLimit\n", + "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", + "\n", + "from ember.core.registry.model import load_model, ChatResponse\n", + "from ember.core.registry.model.base.services.model_service import ModelService\n", + "\n", + "from ember.core.utils.eval.base_evaluator import IEvaluator, EvaluationResult\n", + "from ember.core.utils.eval.extractors import RegexExtractor" + ] + }, + { + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/root/ember/jared/ember\n" - ] - } - ], "source": [ - "!echo $PWD" + "Setup model registry to run models" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "metadata": { "scrolled": true }, - "outputs": [ - { - "ename": "SyntaxError", - "evalue": "invalid syntax (1625361117.py, line 1)", - "output_type": "error", - "traceback": [ - " \u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[7]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[31m \u001b[39m\u001b[31mmodel_registry = initialize_ember(config_path=)\u001b[39m\n ^\n\u001b[31mSyntaxError\u001b[39m\u001b[31m:\u001b[39m invalid syntax\n" - ] - } - ], + "outputs": [], "source": [ "model_registry = initialize_registry()\n", - "# model_registry = initialize_ember()\n", "llm = ModelService(registry=model_registry)" ] }, @@ -180,14 +174,14 @@ "source": [ "---\n", "\n", - "### Model Registry checks \n", + "### Model Registry checks (**OPTIONAL**)\n", "\n", "From the code above, it should auto add models from your config files (which can displayed from printing below), but you can also add your own models as shown below!" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 20, "metadata": { "scrolled": true }, @@ -195,89 +189,52 @@ { "data": { "text/plain": [ - "['openai:gpt-4o-mini-transcribe',\n", - " 'openai:gpt-4o-audio-preview-2024-12-17',\n", + "['openai:gpt-4o-audio-preview-2024-12-17',\n", + " 'openai:gpt-4o-realtime-preview-2024-12-17',\n", " 'openai:dall-e-3',\n", " 'openai:dall-e-2',\n", " 'openai:gpt-4o-audio-preview-2024-10-01',\n", - " 'openai:gpt-4o-mini-2024-07-18',\n", + " 'openai:gpt-4o-mini-realtime-preview-2024-12-17',\n", + " 'openai:gpt-4o-mini-realtime-preview',\n", " 'openai:gpt-4o-realtime-preview-2024-10-01',\n", - " 'openai:gpt-4o-mini',\n", - " 'openai:gpt-4o-audio-preview',\n", + " 'openai:gpt-4o-transcribe',\n", + " 'openai:gpt-4o-mini-transcribe',\n", + " 'openai:gpt-4o-realtime-preview',\n", + " 'openai:gpt-4o-mini-tts',\n", " 'openai:text-embedding-3-large',\n", " 'openai:gpt-4',\n", - " 'openai:gpt-4o-2024-05-13',\n", - " 'openai:gpt-4o-realtime-preview',\n", + " 'openai:text-embedding-ada-002',\n", " 'openai:gpt-4o-mini-audio-preview',\n", + " 'openai:gpt-4o-audio-preview',\n", " 'openai:gpt-3.5-turbo-instruct-0914',\n", " 'openai:gpt-4o-mini-search-preview',\n", + " 'openai:gpt-4-0125-preview',\n", + " 'openai:gpt-4-turbo-preview',\n", " 'openai:gpt-3.5-turbo-1106',\n", " 'openai:gpt-4o-search-preview',\n", " 'openai:gpt-4-turbo',\n", - " 'openai:gpt-4o-realtime-preview-2024-12-17',\n", " 'openai:gpt-3.5-turbo-instruct',\n", " 'openai:gpt-3.5-turbo',\n", - " 'openai:gpt-4-turbo-preview',\n", " 'openai:gpt-4o-mini-search-preview-2025-03-11',\n", - " 'openai:gpt-4o-mini-realtime-preview',\n", + " 'openai:gpt-4o-2024-11-20',\n", " 'openai:gpt-3.5-turbo-0125',\n", - " 'openai:gpt-4o-2024-08-06',\n", + " 'openai:gpt-4o-2024-05-13',\n", " 'openai:gpt-4-turbo-2024-04-09',\n", " 'openai:gpt-3.5-turbo-16k',\n", - " 'openai:gpt-4o',\n", - " 'openai:gpt-4o-mini-realtime-preview-2024-12-17',\n", " 'openai:gpt-4-1106-preview',\n", - " 'openai:text-embedding-ada-002',\n", " 'openai:gpt-4-0613',\n", " 'openai:gpt-4.5-preview',\n", " 'openai:gpt-4.5-preview-2025-02-27',\n", " 'openai:gpt-4o-search-preview-2025-03-11',\n", - " 'openai:gpt-4o-2024-11-20',\n", - " 'openai:gpt-4o-mini-tts',\n", - " 'openai:gpt-4-0125-preview',\n", - " 'openai:gpt-4o-transcribe',\n", " 'openai:text-embedding-3-small',\n", - " 'openai:gpt-4o-mini-audio-preview-2024-12-17',\n", - " 'anthropic:claude-3-sonnet',\n", - " 'anthropic:claude-3-opus',\n", - " 'anthropic:claude-3-haiku',\n", - " 'anthropic:claude-3.5-sonnet',\n", - " 'anthropic:claude-3.7-sonnet',\n", - " 'google:models/gemini-1.0-pro-vision-latest',\n", - " 'google:models/gemini-pro-vision',\n", - " 'google:models/gemini-1.5-pro-latest',\n", - " 'google:models/gemini-1.5-pro-001',\n", - " 'google:models/gemini-1.5-pro-002',\n", - " 'google:models/gemini-1.5-pro',\n", - " 'google:models/gemini-1.5-flash-latest',\n", - " 'google:models/gemini-1.5-flash-001',\n", - " 'google:models/gemini-1.5-flash-001-tuning',\n", - " 'google:models/gemini-1.5-flash',\n", - " 'google:models/gemini-1.5-flash-002',\n", - " 'google:models/gemini-1.5-flash-8b',\n", - " 'google:models/gemini-1.5-flash-8b-001',\n", - " 'google:models/gemini-1.5-flash-8b-latest',\n", - " 'google:models/gemini-1.5-flash-8b-exp-0827',\n", - " 'google:models/gemini-1.5-flash-8b-exp-0924',\n", - " 'google:models/gemini-2.0-flash-exp',\n", - " 'google:models/gemini-2.0-flash',\n", - " 'google:models/gemini-2.0-flash-001',\n", - " 'google:models/gemini-2.0-flash-exp-image-generation',\n", - " 'google:models/gemini-2.0-flash-lite-001',\n", - " 'google:models/gemini-2.0-flash-lite',\n", - " 'google:models/gemini-2.0-flash-lite-preview-02-05',\n", - " 'google:models/gemini-2.0-flash-lite-preview',\n", - " 'google:models/gemini-2.0-pro-exp',\n", - " 'google:models/gemini-2.0-pro-exp-02-05',\n", - " 'google:models/gemini-exp-1206',\n", - " 'google:models/gemini-2.0-flash-thinking-exp-01-21',\n", - " 'google:models/gemini-2.0-flash-thinking-exp',\n", - " 'google:models/gemini-2.0-flash-thinking-exp-1219',\n", - " 'google:models/learnlm-1.5-pro-experimental',\n", - " 'google:models/gemma-3-27b-it']" + " 'openai:gpt-4o',\n", + " 'openai:gpt-4o-mini',\n", + " 'openai:gpt-4o-2024-08-06',\n", + " 'openai:gpt-4o-mini-2024-07-18',\n", + " 'openai:gpt-4o-mini-audio-preview-2024-12-17']" ] }, - "execution_count": 8, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -290,31 +247,42 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Error registering model: Model 'openai:text-embedding-3-large' is already registered.\n" + ] + } + ], "source": [ "# Register an OpenAI text-embedding model\n", - "openai_info = ModelInfo(\n", - " id=\"openai:text-embedding-3-large\",\n", - " name=\"text-embedding-3-large\",\n", - " cost=ModelCost(input_cost_per_thousand=0.03, output_cost_per_thousand=0.06),\n", - " rate_limit=RateLimit(tokens_per_minute=80000, requests_per_minute=5000),\n", - " provider=ProviderInfo(name=\"OpenAI\", default_api_key=openai_key),\n", - " api_key=openai_key,\n", - ")\n", - "model_registry.register_model(openai_info)" + "try:\n", + " openai_info = ModelInfo(\n", + " id=\"openai:text-embedding-3-large\",\n", + " name=\"text-embedding-3-large\",\n", + " cost=ModelCost(input_cost_per_thousand=0.03, output_cost_per_thousand=0.06),\n", + " rate_limit=RateLimit(tokens_per_minute=80000, requests_per_minute=5000),\n", + " provider=ProviderInfo(name=\"OpenAI\", default_api_key=openai_key),\n", + " api_key=openai_key,\n", + " )\n", + " model_registry.register_model(openai_info)\n", + "except ValueError as e:\n", + " print(\"Error registering model:\", e)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Try model registry\n", + "### Try model registry (**OPTIONAL**)\n", "taken from `src/ember/core/registry/model/examples/example.py`" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ @@ -330,9 +298,75 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 23, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "➡️ Testing model: openai:o1\n", + "❌ Error with model openai:o1: [Error 3002] Model 'openai:o1' not found. Available models:\n", + "- openai:gpt-4o-audio-preview-2024-12-17\n", + "- openai:gpt-4o-realtime-preview-2024-12-17\n", + "- openai:dall-e-3\n", + "- openai:dall-e-2\n", + "- openai:gpt-4o-audio-preview-2024-10-01\n", + "- openai:gpt-4o-mini-realtime-preview-2024-12-17\n", + "- openai:gpt-4o-mini-realtime-preview\n", + "- openai:gpt-4o-realtime-preview-2024-10-01\n", + "- openai:gpt-4o-transcribe\n", + "- openai:gpt-4o-mini-transcribe\n", + "- openai:gpt-4o-realtime-preview\n", + "- openai:gpt-4o-mini-tts\n", + "- openai:text-embedding-3-large\n", + "- openai:gpt-4\n", + "- openai:text-embedding-ada-002\n", + "- openai:gpt-4o-mini-audio-preview\n", + "- openai:gpt-4o-audio-preview\n", + "- openai:gpt-3.5-turbo-instruct-0914\n", + "- openai:gpt-4o-mini-search-preview\n", + "- openai:gpt-4-0125-preview\n", + "- openai:gpt-4-turbo-preview\n", + "- openai:gpt-3.5-turbo-1106\n", + "- openai:gpt-4o-search-preview\n", + "- openai:gpt-4-turbo\n", + "- openai:gpt-3.5-turbo-instruct\n", + "- openai:gpt-3.5-turbo\n", + "- openai:gpt-4o-mini-search-preview-2025-03-11\n", + "- openai:gpt-4o-2024-11-20\n", + "- openai:gpt-3.5-turbo-0125\n", + "- openai:gpt-4o-2024-05-13\n", + "- openai:gpt-4-turbo-2024-04-09\n", + "- openai:gpt-3.5-turbo-16k\n", + "- openai:gpt-4-1106-preview\n", + "- openai:gpt-4-0613\n", + "- openai:gpt-4.5-preview\n", + "- openai:gpt-4.5-preview-2025-02-27\n", + "- openai:gpt-4o-search-preview-2025-03-11\n", + "- openai:text-embedding-3-small\n", + "- openai:gpt-4o\n", + "- openai:gpt-4o-mini\n", + "- openai:gpt-4o-2024-08-06\n", + "- openai:gpt-4o-mini-2024-07-18\n", + "- openai:gpt-4o-mini-audio-preview-2024-12-17 [Recovery: Check the model name and ensure it's correctly registered] [Context: caller_file='/Users/concon/research/ember-branch/ember/src/ember/core/registry/model/base/registry/model_registry.py', caller_function='get_model', caller_lineno=144]\n", + "➡️ Testing model: openai:gpt-4o\n", + "🛎️ Service response from openai:gpt-4o:\n", + "Quantum computing utilizes quantum bits (qubits) to perform computations using principles of quantum mechanics, such as superposition and entanglement. This enables exponentially faster processing for specific tasks compared to classical computers, promising breakthroughs in fields like cryptography, optimization, and complex simulations. It remains largely experimental but rapidly advancing.\n", + "\n", + "🎯 Direct response from openai:gpt-4o:\n", + "The capital of France is Paris.\n", + "\n", + "➡️ Testing model: openai:gpt-4o-mini\n", + "🛎️ Service response from openai:gpt-4o-mini:\n", + "Quantum computing utilizes the principles of quantum mechanics to process information. Unlike classical computers, which use bits as 0s and 1s, quantum computers use qubits, allowing for superposition and entanglement. This enables them to perform complex calculations much faster, potentially revolutionizing fields like cryptography, optimization, and drug discovery.\n", + "\n", + "🎯 Direct response from openai:gpt-4o-mini:\n", + "The capital of France is Paris.\n", + "\n" + ] + } + ], "source": [ "for model_id in model_ids:\n", " try:\n", @@ -360,9 +394,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 24, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello! How can I assist you today?\n" + ] + } + ], "source": [ "response = llm(prompt=\"Hello!\", model_id=\"openai:gpt-4o\")\n", "print(response.data)" @@ -375,14 +417,14 @@ "---\n", "---\n", "\n", - "## Neural Similarity Scoring - Cosine Similarity (WIP)\n", + "## Neural Similarity Scoring - Cosine Similarity\n", "\n", "- from `src/ember/core/utils/embedding_utils.py`" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 75, "metadata": {}, "outputs": [], "source": [ @@ -585,7 +627,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 26, "metadata": { "scrolled": true }, @@ -594,7 +636,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "Similarity between 'Hello world!' and 'Hello, world??': 0.9150491464734943\n" + "Cosine similarity Score: 0.7289\n", + "\n", + "Cosine similarity Score: 0.8205\n", + "\n", + "Cosine similarity Score: 1.0000\n", + "\n" ] } ], @@ -634,7 +681,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 80, "metadata": {}, "outputs": [], "source": [ @@ -743,22 +790,27 @@ " if system_output is None or len(system_output) == 0:\n", " return EvaluationResult(is_correct=False, score=-1)\n", "\n", - " # current compression ratio formula\n", - " # TODO: update scoring function to make it better\n", - " # -> like use token count\n", - "\n", - " # example I was thinking about:\n", + " # current compression ratio formula - scaled by min num of words (5 words) + min num of chars (min 100)\n", " letter_sum = sum(len(response) for response in system_output)\n", - " ratio = 1/compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100)\n", - " # ratio = compression_ratio(system_output, algorithm='gzip',verbose=True)\n", + " ratio = 1/compression_ratio(system_output, algorithm=\"gzip\") * min(1, len(system_output)/5) * min(1, letter_sum/100)\n", " return EvaluationResult(is_correct=True,score=ratio,metadata = {'responses': system_output})\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 78, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Compression Score: 0.5766\n", + "Is Correct: True\n", + "Metadata: {'responses': ['hi there', 'hi', 'hello', 'yo whatup']}\n" + ] + } + ], "source": [ "compression_evaluator = DiversityCompressionEvaluator()\n", "\n", @@ -792,7 +844,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -837,14 +889,14 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 30, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Diversity Score: 0.8301\n", + "Edit Distance Score: 0.8301\n", "Is Correct: True\n", "Metadata: {'responses': ['hi there', 'hi', 'hello', 'yo whatup']}\n" ] @@ -878,12 +930,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Novelty Score" + "## Novelty Score (WIP)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 44, "metadata": {}, "outputs": [], "source": [ @@ -899,7 +951,7 @@ " if not system_output or len(system_output) == 0:\n", " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", "\n", - " novelty_scores = [self.compute_novelty(r, system_output[:i]) for i, r in enumerate(system_output)]\n", + " novelty_scores = [self.compute_novelty(model, r, system_output[:i]) for i, r in enumerate(system_output)]\n", "\n", " avg_novelty = sum(novelty_scores) / len(novelty_scores) if novelty_scores else 0.0\n", "\n", @@ -909,12 +961,12 @@ " metadata={'responses': system_output, 'novelty_scores': novelty_scores}\n", " )\n", "\n", - " def compute_novelty(self, response: str, prior_responses: List[str]) -> float:\n", + " def compute_novelty(self, model: EmbeddingModel, response: str, prior_responses: List[str]) -> float:\n", " if not prior_responses:\n", " return 1.0\n", "\n", - " new_embedding = self.model.embed_text(response)\n", - " prior_embeddings = [self.model.embed_text(r) for r in prior_responses]\n", + " new_embedding = model.embed_text(response)\n", + " prior_embeddings = [model.embed_text(r) for r in prior_responses]\n", "\n", " similarities = [\n", " np.dot(new_embedding, prior_embedding) /\n", @@ -934,7 +986,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "EvaluationResult(is_correct=True, score=0.08368770360509659, metadata={'responses': ['Hello world!', 'Hi there!', 'Goodbye!']})\n" + "Novelty Score: 0.3319\n", + "Is Correct: True\n", + "Metadata: {'responses': ['hi there', 'hi', 'hello', 'yo whatup'], 'novelty_scores': [1.0, 0.11153064719273964, 0.07012108639942194, 0.1458646243357714]}\n" ] } ], @@ -943,8 +997,8 @@ "\n", "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", "\n", - "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", - "novelty = novelty_evaluator.evaluate(mock_model, input_strs)\n", + "embedding_model: Text_Embedding_Ada_002_Model = Text_Embedding_Ada_002_Model()\n", + "novelty = novelty_evaluator.evaluate(embedding_model, input_strs)\n", "\n", "print(f\"Novelty Score: {novelty.score:.4f}\")\n", "print(f\"Is Correct: {novelty.is_correct}\")\n", @@ -970,11 +1024,11 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 82, "metadata": {}, "outputs": [], "source": [ - "mock_model: MockEmbeddingModel = MockEmbeddingModel()\n", + "embedding_model: Text_Embedding_Ada_002_Model = Text_Embedding_Ada_002_Model()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", "exact_evaluator = ExactMatchEvaluator()\n", "compression_evaluator = DiversityCompressionEvaluator()\n", @@ -982,22 +1036,26 @@ "\n", "def ensemble_diversity(strings):\n", " compression = compression_evaluator.evaluate(strings)\n", - " # print(\"compression (1/compression == compression/original) result:\", compression)\n", " cosine_scores = list()\n", " for ind1 in range(len(strings)):\n", " ind2 = ind1+1 if ind1+1 != len(strings) else 0\n", - " curr_score = calculate_text_similarity(text1=strings[ind1], text2=strings[ind2], model=mock_model, metric=cosine)\n", + " curr_score = calculate_text_similarity(text1=strings[ind1], text2=strings[ind2], model=embedding_model, metric=cosine)\n", " # print(f\"SimilarityScore between ind1={ind1} and ind2={ind2}: {curr_score}\")\n", " cosine_scores.append(curr_score)\n", " avg_cosine_score = np.average(cosine_scores)\n", - " # print(f\"Avg cosine similarity: {avg_score}\")\n", - " # print(f\"diversity cosine-sim inverse: {1-avg_score}\")\n", " edit_distance = edit_dist_evaluator.evaluate(strings)\n", - " # print(f\"edit-dist score: {edit_distance.score:.4f}\")\n", + "\n", + " div_cosine = 1 - avg_cosine_score\n", + " div_compression = min(compression.score, 1)\n", + " div_edit = edit_distance.score\n", + " div_ensemble_score = (div_cosine + div_compression + div_edit)/3\n", + "\n", + " # print(f\"diversity cosine-sim inverse: {div_cosine:.4f}\")\n", + " # print(f\"compression (1/compression == compression/original) result: {div_compression:.4f}\")\n", + " # print(f\"edit-dist score: {div_edit:.4f}\")\n", + " # print(f\"diversity score (higher is better): {div_ensemble_score:.4f}\")\n", " # print(\"-------------------------------\")\n", - " diversity_score = ((1 - avg_cosine_score) + min(compression.score, 1) + edit_distance.score)/3\n", - " # print(f\"possible diversity score (higher is better): {diversity_score}\")\n", - " return diversity_score" + " return div_cosine, div_compression, div_edit, div_ensemble_score" ] }, { @@ -1009,7 +1067,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 83, "metadata": {}, "outputs": [], "source": [ @@ -1030,14 +1088,6 @@ "responses += res\n", "input_strs.append(responses)\n", "\n", - "# input_strs.append([\n", - "# \"Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", - "# \"In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", - "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", - "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", - "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", - "# ])\n", - "\n", "responses = []\n", "res1 = llm(prompt=\"Tell me a story about how quantum computers work. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", "responses.append(res1)\n", @@ -1047,14 +1097,66 @@ "responses.append(res3)\n", "res4 = llm(prompt=\"Tell me a story about a ramen shop. Make it 100 words. Don't say anything else besides the story. \", model_id=\"openai:gpt-4o\").data\n", "responses.append(res4)\n", - "input_strs.append(responses)" + "input_strs.append(responses)\n", + "\n", + "input_strs.append([\n", + " \"Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", + " \"In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", + " \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", + " \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", + " \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", + "])" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 84, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 -------------------\n", + "\t- This is a sample text with lots of repetition.\n", + "\t- This is a sample text with lots of repetition.\n", + "\t- This is a sample text with lots of repetition.\n", + "1 -------------------\n", + "\t- Why don’t skeletons fight each other? They don’t have the guts.\n", + "\t- Why don't skeletons fight each other? They don't have the guts.\n", + "\t- Why did the scarecrow win an award? Because he was outstanding in his field!\n", + "\t- Why don't skeletons fight each other? They don't have the guts!\n", + "\t- Why did the scarecrow win an award?Because he was outstanding in his field!\n", + "\t- Why don't skeletons fight each other? They don't have the guts!\n", + "\t- Why don't skeletons fight each other? They don't have the guts.\n", + "\t- Why don't skeletons fight each other? They don't have the guts.\n", + "\t- Why don’t skeletons fight each other? They don’t have the guts.\n", + "\t- Why don't skeletons fight each other? They don't have the guts.\n", + "2 -------------------\n", + "\t- Why don't scientists trust atoms? Because they make up everything! \n", + "\t- I told my wife she was drawing her eyebrows too high. She looked surprised. \n", + "\t- Why did the scarecrow win an award? Because he was outstanding in his field! \n", + "\t- I told my computer I needed a break, and it said, \"No problem, I'll go to sleep.\" \n", + "\t- How does a penguin build its house? Igloos it together! \n", + "\t- Why don't skeletons fight each other? They don't have the guts. \n", + "\t- I used to play piano by ear, but now I use my hands. \n", + "\t- What do you call fake spaghetti? An impasta. \n", + "\t- Why did the bicycle collapse? It was two-tired. \n", + "\t- Want to hear a construction joke? Oh, sorry, I'm still working on it.\n", + "3 -------------------\n", + "\t- In a serene forest, nestled within the roots of an ancient oak, a hidden village harnessed quantum computers. These magical machines harnessed qubits, capable of existing simultaneously in multiple states, like leaves whispering secrets to the wind. Villagers marveled as these quantum leaves, entangled in a delicate dance, solved complex problems at unprecedented speeds. Guiding whispers directed them through intricate patterns, while the village's wise sage, Schrödinger, invoked superposition spells, merging probabilities into certainty. With each computation completed, the villagers celebrated, knowing their world was forever changed by the mystical harmony of quantum entanglement under the watchful oak.\n", + "\t- In a sunlit meadow, a group of bunnies frolicked in the lush, emerald grass. Jasper, the adventurous one, led the way, his fluffy tail bouncing playfully. Poppy and Luna followed close behind, their ears perked and alert. The air shimmered with the fragrance of blooming daisies, and butterflies danced above them like tiny, delicate kites. Jasper paused, discovering a hidden patch of clover, prompting a joyous feast. As the sun dipped toward the horizon, painting the sky in hues of pink and gold, the bunnies settled into a cozy circle, content and safe under the watchful gaze of the moon.\n", + "\t- In the lush forests of Viridian, Pikachu found an ancient map leading to the Thunder Stone of Legends. Eager for adventure, it sprinted past towering oaks and over babbling brooks. Along the way, Pikachu met a lost Pidgey, guiding it safely back to its nest. Grateful, the Pidgey gifted Pikachu a feather for luck. Further on, an angry Onix blocked the path, but Pikachu’s electric agility dazzled the rocky giant, earning its respect. Reaching the hidden grotto, Pikachu uncovered the Thunder Stone, glowing brilliantly. Empowered, Pikachu returned home, its spirit alive with newfound wisdom and friends aplenty.\n", + "\t- Nestled in a quiet Tokyo alley, Ichiro’s ramen shop was legendary. Locals whispered about his secret broth, simmered for hours with a mysterious spice blend. One rainy evening, a weary traveler stumbled in, drawn by the rich aroma. As the traveler savored the first bite, memories flooded back—his grandmother’s kitchen and long-lost family gatherings. The warmth of the broth melted years of solitude. Ichiro, sensing the traveler’s nostalgia, shared a knowing smile. In that cozy nook, where stories weaved into noodles, everyone discovered more than a meal; they found connection, hope, and for the traveler, a place called home.\n", + "4 -------------------\n", + "\t- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\n", + "\t- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\n", + "\t- Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\n", + "\t- The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\n", + "\t- Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\n" + ] + } + ], "source": [ "for i in range(len(input_strs)):\n", " print(f\"{i} -------------------\")\n", @@ -1064,21 +1166,61 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 85, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA90AAAJOCAYAAACqS2TfAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAVWFJREFUeJzt3QmcVWX9P/AHUDYXUJFFRNFcAEVQEEPLLQyXyi01s0BT6pdSGmqK/QQ1E00jrEjUxC1LyjTNBVMUcw0DNRcwNRVcEHABBQWE+b++z/915zcDAww4hxlm3u/X6zRzzz333OfeOZqf832WRmVlZWUJAAAAqHGNa/6UAAAAQBC6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBYBmNGjVK5513XlrX3XjjjalLly5p/fXXT61bt67Rcx9//PGpc+fOqa6aOHFi/jvGz4aqvlzHAOs6oRuA5bzyyivpe9/7Xtp2221T8+bN08Ybb5z22muvdPnll6ePP/64tptHNUybNi0H48997nPp6quvTlddddUKj41gFgGttLVs2TJttdVW6atf/Wq69tpr08KFC1N98Ic//CGNGjWqxs9b+t5OOumkKp//yU9+Un7MnDlzVvv8jz32WP4bffDBBzXQWgDWtkZlZWVla/1dAaiz7rrrrnTUUUelZs2apQEDBqSdd945LVq0KD3yyCPpL3/5Sw5yKwtw9cEnn3yS1ltvvbytq8aMGZO+//3vp5deeiltt912Kz02At3555+frrjiirThhhvmkP3mm2+me++9Nwe+XXbZJd15552pU6dO5a9ZvHhxWrp0ab5O6qJoW1y3TZs2TY0b//8aw1e+8pX03HPPpddee61G3yvCdNyciu2dd97J71lR3Lx6++2383U1e/bs1KZNm9U6/2WXXZbOPPPM9Oqrr65W74L6cB0D1Af+LQxAufiP+m984xtp6623Tg888EDq0KFD+XOnnHJKevnll3Mor49KIa0UntZ1s2bNyj9Xp1v517/+9UqBcNiwYemmm27KN1/iRswTTzxR/lx0Wa/Nv9GqRNBem3/HAw88MN1xxx3pnnvuSYceemj5/rhpEf9cHXnkkfmmVdHq23UMUB/oXg5AuZ///Ofpo48+Stdcc02lwF0SFdNTTz21/PGnn36afvrTn+YuzFHxjCrcOeecs1x35NgfVcYYX9u7d+/UokWL1L179/Lxtrfeemt+HCGhV69e6amnnqr0+qiuRwX2v//9b+rfv3/aYIMN0hZbbJEuuOCCtGyHragK7rnnnmmzzTbL7xPnu+WWW6qsTg4ePDiHyp122im3f/z48VWOhf3www/Taaedlj9HHNe2bdt0wAEHpClTplQ655///Of8fvG+EV6/9a1v5YpxVZ8l9h922GH598033zydccYZacmSJdX6O/32t78tb3N8D3FDpGLX42jn8OHD8+9x7s8ytve4447L3ab/+c9/pvvuu6/S5yhVXaPqvemmm6YTTjhhudfPmzcv/13j85XE9RHti+spPkNU0H/84x8vd92s7G9088035+96o402ysMf4vqJ4Q8rGtO977775htGr7/+enlX72h/XO9xPVW8rkveeOON1KRJkzRixIhVfk8dO3ZMe++9d+7CXlG0PdoWPUaqEt9rBPZWrVrlbv377LNPevTRR8ufj79bVLnDNttsU972UrV+da7jENfdiSeemK+bODbOGT0iIqiX/pbR62H77bfPf7f45+gLX/hCpb89AKtHpRuAcn/7299yV9gIrdURYez666/PFdLTTz89B4gIKFOnTk233XZbpWOjSv7Nb34zjxWPMBrhOMYMRzfoCOonn3xyPi5ef/TRR6cXX3yxvFtwiEAa4eTzn/98vjkQwSKCWwT/CN8lEby+9rWv5bAYQSLCWVRpo3v0IYccUqlNUc3/05/+lENLhOQVdd39n//5nxzc47hu3bqld999N3e3j8+522675WOuu+66HDp33333/Bmim3G0JQJU3ESoWHGOzxI3D/bYY4/8Pdx///3pF7/4Rb55EQGoOl3B+/Xrl4+N7ym6hT/55JP5vaICHeOWb7jhhvw3KHUZjy7ia+rb3/52HlLw97//Pd9sWFa85+GHH55vnlx55ZWVulf/9a9/zWE6elCUKrHx94nv77vf/W7q2rVrevbZZ9Mvf/nL9J///Ccfv6q/UQTAY489Nn3pS19Kl1xyST4u/hbx+asKz6Vx1XPnzs1BOt4rxPcSW7R93LhxaeTIkTlkl/zxj3/MN3XiWqqOuL7j/SPIx3nj2owbMUOGDMldvZcVn+2ggw7KNw/iWo7rPcbQ77///unhhx9Offr0SUcccUT+XqIt0e5ST4S4mbKy76gqb731Vj5n3KCJ7z4m2YsQHtf2ggUL8t8trq+4fuOf7Tg2bpr861//yjeYqvrbA1ANMaYbAObOnRsl47JDDz20Wsc//fTT+fiTTjqp0v4zzjgj73/ggQfK92299dZ532OPPVa+79577837WrRoUfb666+X77/yyivz/gcffLB838CBA/O+H/zgB+X7li5dWnbIIYeUNW3atGz27Nnl+xcsWFCpPYsWLSrbeeedy/bff/9K++N8jRs3Lnv++eeX+2zx3PDhw8sft2rVquyUU05Z4XcR79G2bdv8Ph9//HH5/jvvvDOfa9iwYct9lgsuuKDSOXbdddeyXr16rfA9wqxZs/Ln/fKXv1y2ZMmS8v2/+c1v8jnHjh1bvi/aH/sqfjcrsqpj33///fz84YcfXulzxN912b/n3/72t0qvPfjgg8u23Xbb8sc33nhj/t4ffvjhSseNGTMmv/7RRx9d5d/o1FNPLdt4443LPv300xV+prh+lr2O4nqp2OZl237PPfdU2r/LLruU7bPPPit8j4rtjOvjvffey3+f+IzhrrvuKmvUqFHZa6+9ttx3HNfv9ttvX9a/f//8e8Xrd5tttik74IADyvddeuml+bWvvvpqle9d3et4wIAB+dgnn3xyuWNLbejRo0f+ngCoObqXA5BFRStEd93quPvuu/PPqOJVFBXvsOzY76gQ9+3bt/xxVHlDVPVipuxl90dX8mVFJa+k1K02qtlRKS6Jrt0l77//fq5ufvGLX1yuK3iIrrzRrlWJKnVU8aNSWJWoBMYY6qjWVxxHG5X1qCZWNQ4+qucVRRur+swVxeeMzxtd3Sv2Ahg0aFDuYl3UePuo2pa62a9I/B2jyhoV44rff1SljznmmPJ9UfmN6nZ8LzGTd2mL14cHH3xwlX+j+HvMnz+/xro8R6+B6G4dXbRLYsK1f//737lXRnVtsskmuTdGVKVDdDWPXiMxR8Kynn766TzJXVTHo+dE6XuIzxUV/H/84x+5V0B1VOc6jnNFL4LoXRJDPJYV/zyVvtvnn38+tw2AmiF0A5BFaFtVsKooxsZG8Ft2Zuz27dvn/3CP5yuqGKxDjGENFWfErrg/AltF8V7R9b2iHXbYIf+sOBt1dCOPLugRfmOccXTDjS7WEb6XFeNZqyO6s0cIi7ZGl9voglsxIJc+64477rjcayNcLvtdRNsqdg8uBbZlP/OyVvQ+0S04vptl36emRHfpVd2QiRmyY7Kw22+/vXxsdnQ3jzHCFUN3hLkIdfH5K26lv2VpAriV/Y3i5kYcH12zt9xyy/Sd73ynfBzzmohrK7qQRyiNbtYhAnj8nWJowuqIEB03A6ZPn57PF4+rUgq1AwcOXO67+N3vfpe/w6qu2apU5zqOWdPjxtqKxpaXxFCN6H4e32+MRY/x5HHzAYA1J3QDUB66o9oX4XJ1lCpkq1JxrGx19q/JipYxDjbGC0dYisnGohofASiCT1Xnq1gVX5kYYx4h+9e//nX+ji699NI8aVXMVL0mVvSZ66rSNbGqpcdi3HbctCl9LzHOOG469OjRo1LFNcJc/F2q2kpj+1f2N4qJ7KJSHLOFx987quMRwCPArqmYoT1uLkRQjmslqtQx+V/pJlB1RXtigrJoSwTnuHaqUqpix7W0ou+i1MNgVap7HVdHTAb3yiuvpLFjx+aAHjcAYt6C+AnAmjGRGgDlImTEhFmPP/54pa7gVYkusxEcomIX3YVLYgKxqJRV1aX2s4j3iuBbqoiGmGAqlCaOiiWZInDH+tIV14+Oyak+q5jNPQJhbFGNjSDys5/9LIe90meNSc1K3aRLYl9NfRcV36di1T+6nMeyVNFNugg33nhj/hmTv60qsMX3FF3MY8brmOArJjCrKCaLe+aZZ3IX6uresKlKVPejq3RscW3E3yUmcTv33HNXeHNgZe8XAXPXXXfNFe6onkelOm6yrK4IwDEr/e9///t8baxoTe74Hko3u1b1d/ss31NJVNDjvapzU600E31scSMi/q7RuyMmVwNg9al0A1Aulm2K5ZPiP64jPC8rKmClZZkOPvjg/DNmyq4oZoAOy84UXhN+85vflP8e1ch4HDNnR4ArVZAjoFRceiu6ni87I/bqiHMt2803Kq1R8S51o44xsrEvZmKvuOxVVHxjVu2a+i4inEXY/NWvflWpch9LvEUbi/jOo+IbVc64CVP6nlfWTTtmso9Z8COox+zdFbuWh6j8xozZV1999XKv//jjj/OY5lWJMdDLvm9pdvZllx2rKK7tlXXZjlnaY4b2uKZjqawIzWsilkeL2cjjBsCKxIzlEbxj9vpS9/1lu4NXbHeouCzc6orvKG4GxN8m5iBYVul6Wva7jWp73MRY2fcKwMqpdANQLkJAhKwISlG9ji63UQGMSupjjz2WJ8GK9ZlDdBmOLrRRGY8wEJM5TZo0KS8hFv9xv99++9Vo26KCHeN24z1jsrUItDFxWCw3VhofHaEzQn9MZhVdyqMiPXr06Bwa1nRcanSXjspnhMn4zBFCYkKzWKIrlvkKEfxj6aqoDMb3EMtZlZYMiyr8j370oxr5DuJzDh06NC8ZFp8xujJH1Tu60sdSZasz6VdVYumo+Hzx945gHD0GYhmu+Nzxt6+OuHaiQhyhM7qRV+wFUQq20e08JpKLbuF77bVXvrExbdq0vD/es6qJviqKm0Lvvfde7lUQf5sYyx7v2bNnz+Xeb9mgG1X4mPwvvq/4rFEpL4lrJm48xVJrsRxb/F3XRHxfFbvUrygEx82MCPYxVCGunVjrO773+F6iKh0BudTuEL0Gogt/tCvaXQrj1XXRRRflmwpxjZaWa3v77bfz3zaWcIu5GGJCtljTPN4zKt4R0EvL5QGwhmpwJnQA6on//Oc/ZYMGDSrr3LlzXgJpo402Kttrr73Kfv3rX5d98skn5cctXry47Pzzz89LHK2//vplnTp1Khs6dGilY0Is01TVMkSlpZYqimWRYn8sk1RxeaoNNtig7JVXXsnLZbVs2bKsXbt2eTmkiktnhWuuuSYvxdSsWbOyLl26lF177bXlyzWt6r2rWmpp4cKFZWeeeWZeSim+h2hH/P7b3/52udeNGzcuL/0V773pppuWHXfccWVvvPFGpWNKn2VZVbVxRWKJsPhs8Z3H9/D9738/L+tV1flWZ8mw0ta8efOyLbfcsuwrX/lKXoZs2b9n6XNUtfxWLD0V10Gc58ILL1zhEmuXXHJJ2U477ZS/q0022SQvlxbXUixdt6q/0S233JKvg1imLa7Prbbaqux73/te2dtvv73SJcM++uijsm9+85tlrVu3zs9V1f5Y4mzZ5e1WZWXX0qr+Hk899VTZEUccUbbZZpvl7yLadPTRR5dNmDCh0nE//elPyzp27JiX/Kq4fFh1r+OSWJ4vlg7bfPPN8/vFcm7x+rjOQ/zN+vTpk7+jWM4vrrOf/exn+W8GwJppFP+zpoEdANaGqK5Hta2qbrhQkw4//PD07LPPppdffrm2mwJAPWFMNwBASrmrdQxZiC7wAFBTjOkGABq0mPk9xq7HGOsYL/29732vtpsEQD2i0g0ANGgPPfRQrm5H+I6JANu3b1/bTQKgHjGmGwAAAAqi0g0AAAAFEboBAACgIA1uIrWlS5emt956K2200UapUaNGtd0cAAAA1kExUvvDDz9MW2yxRWrceMX17AYXuiNwd+rUqbabAQAAQD0wY8aMtOWWW67w+QYXuqPCXfpiNt5449puDgAAAOugefPm5YJuKWOuSIML3aUu5RG4hW4AAAA+i1UNWzaRGgAAABRE6AYAAICCCN0AAABQkAY3pru6lixZkhYvXlzbzaCOWn/99VOTJk1quxkAAEAdJ3RXsdbazJkz0wcffFDbTaGOa926dWrfvr313gEAgBUSupdRCtxt27ZNLVu2FKio8sbMggUL0qxZs/LjDh061HaTAACAOkroXqZLeSlwb7bZZrXdHOqwFi1a5J8RvON60dUcAACoionUKiiN4Y4KN6xK6Tox9h8AAFgRobsKupRTHa4TAABgVYRuAAAAKIjQTbVcd911ebZuAAAAqs9EatXU+ey71ur7vXbxIWs08/rPfvazdNddd6U333wzT/DVs2fPdNppp6UvfelLn6k9xxxzTDr44INT0SZOnJj222+/9P777wv5AADAOk/oridee+21tNdee+Wgeumll6bu3bvnCb7uvffedMopp6Rp06Z95tm6SzN2AwAAUD26l9cTJ598cp7Ya9KkSenII49MO+ywQ9ppp53SkCFD0hNPPJGPmT59ejr00EPThhtumDbeeON09NFHp3feeaf8HM8880yuMm+00Ub5+V69eqV//etfVXYvP++883IV/cYbb0ydO3dOrVq1St/4xjfShx9+WH7M0qVL04gRI9I222yTA3uPHj3SLbfcslqfq/S+cfOga9euue0HHnhgevvtt/Pzf//731Pz5s3zUm8VnXrqqWn//fdfw28TAACgZgjd9cB7772Xxo8fnyvaG2ywwXLPR2iNAByBO4596KGH0n333Zf++9//5m7jJccdd1zacsst05NPPpkmT56czj777LT++uuv8H1feeWV9Ne//jXdeeedeYvzXnzxxeXPR+C+4YYb0pgxY9Lzzz+ffvSjH6Vvfetb+bjVsWDBgnTZZZflgP+Pf/wj3zw444wz8nPRbT4+31/+8pdK662PGzcufx4AAIDapHt5PfDyyy+nsrKy1KVLlxUeM2HChPTss8+mV199NXXq1Cnvi0Ac1fAI2bvvvnsOs2eeeWb5ebbffvuVvm8E+ahER2U8fPvb387vE+PKFy5cmC666KJ0//33p759++bnt9122/TII4+kK6+8Mu2zzz7V/nzRTT6C++c+97n8ePDgwemCCy7Ivzdp0iRX2P/whz+kE088sfyzRuU7Kv4AAAC1SaW7HojAvSpTp07NYbsUuEO3bt1ylTieC9EV/aSTTkr9+vXLFeuoZK9MdCsvBe7QoUOHNGvWrPIbAVGhPuCAA3KX8NIWQb903gj8pf0HHXTQCt+nZcuW5YF72fcJUdGOCdjeeuut/Pimm25KhxxyiInYAACAWqfSXQ9ERTrGc3/WydJinPY3v/nNPPv5Pffck4YPH55uvvnmdPjhh1d5/LJdz6MNUf0OH330Uf4Z5+rYsWOl45o1a5Z/3n333bmKHVY2SVtV71PxRkNU6SOUR1u///3vp9tuuy1X4AEAAGqb0F0PbLrppql///5p9OjR6Yc//OFy47qjq3VMQjZjxoy8lardL7zwQn4uKt4lMQFbbDH++thjj03XXnvtCkP3ysQ5I1xHl/UVdSXfeuutU02JandUuGNMeuPGjXOlGwAAoLbpXl5PROCOCcT69OmTJxV76aWXcrfxX/3qV3lMdXQZj2XEIpxOmTIlz3I+YMCAHIh79+6dPv744zxWOrppv/766+nRRx/NY70jrK+J6HYek51FeL/++utzl/J431//+tf5cU0rfa4YT/71r3+9vJoOAABQm1S664mYpKwUOk8//fS8pNbmm2+el/264oorcpfs22+/Pf3gBz9Ie++9d64Gx9JbEYJLE5K9++67OYjHMmJt2rRJRxxxRDr//PPXuE0//elPcxtiFvOYKT3GWO+2227pnHPOSTVtu+22yzcc4mbCqFGjavz8AAAAa6JRWXVm4apH5s2bl9eUnjt3bl6LuqJPPvkkz+4d60rH2s+wMq4XAKh7ul/fPdUFzw58trabANRitqxI93IAAAAoiNANAAAABRG6AQAAoCBCNwAAABTE7OUAQJVMSAUAn51KNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0Uy917tw5jRo1qrabAQAANHBmL6+u81qt5febu9ovmTlzZvrZz36W7rrrrvTmm2+mtm3bpp49e6bTTjstfelLX0oNyZNPPpk22GCD2m4GAADQwAnd9cRrr72W9tprr9S6det06aWXpu7du6fFixene++9N51yyilp2rRpqS6Jtq2//vqFnX/zzTcv7NwAAADVpXt5PXHyySenRo0apUmTJqUjjzwy7bDDDmmnnXZKQ4YMSU888UQ+Zvr06enQQw9NG264Ydp4443T0Ucfnd55553yc5x33nm5Mj527Ni01VZb5ePivEuWLEk///nPU/v27XP1PKrpFcX7XnHFFemggw5KLVq0SNtuu2265ZZbKt0QiGPGjRuX9tlnn9S8efN000035ed+97vfpa5du+Z9Xbp0Sb/97W/LX7do0aI0ePDg1KFDh/z81ltvnUaMGJGfKysry+2NdjZr1ixtscUW6Yc//OEKu5dX97PfeOON+bWtWrVK3/jGN9KHH35Yw38pAACgIVHprgfee++9NH78+ByGq+pSHdXvpUuXlofOhx56KH366ae5An7MMcekiRMnlh/7yiuvpHvuuSefL37/+te/nv773//mEB+ve+yxx9J3vvOd1K9fv7THHnuUv+7cc89NF198cbr88stzcI3A+uyzz+ZAXXL22WenX/ziF2nXXXctD97Dhg1Lv/nNb/K+p556Kg0aNCh/hoEDB6Zf/epX6Y477kh/+tOfcrieMWNG3sJf/vKX9Mtf/jLdfPPN+eZCdK1/5plnqvx+Vuez//Wvf0133nlnev/993Mwj8+07E0GAACA6hK664GXX345V36jUrwiEyZMyCH41VdfTZ06dcr7brjhhhxYY/zz7rvvXh5Qo9K90UYbpW7duqX99tsvvfjii+nuu+9OjRs3TjvuuGO65JJL0oMPPlgpdB911FHppJNOyr//9Kc/Tffdd1/69a9/XalyHWPLjzjiiPLHw4cPzyG8tG+bbbZJL7zwQrryyitz6I7q9Pbbb5++8IUv5Ep5VLpL4rmovEf4j27qEcr79OnzmT/7ddddlz97+Pa3v51fK3QDAABrSvfyeiAC96pMnTo1B85S6AwRqqMKHs+VRNfqUugM7dq1y8dF4K64b9asWZXO37dv3+UeVzxv6N27d/nv8+fPz5XlE088MVegS9uFF16Y94fjjz8+Pf300znoR9fxv//975VC/scff5y7skd1/LbbbssV7Jr87NGtfdnPCQAAsDqE7nogqsFRCa6JydKWndwszlvVvqgKr66KXd8/+uij/PPqq6/Owbq0Pffcc+Vj0HfbbbdcnY7KeQTs6O4d3d1DBOiowEclPcaRx9jzvffeO0/QtqZq6nMCAACUCN31wKabbpr69++fRo8enSvIy/rggw/y2OqKY6JDdOWO56Lq+1mVgnLFxxXHcy8rquUx+VmMF99uu+0qbdHNvCQmPYux1xHOYyK2GMsdY9hDhO2vfvWreex3jM1+/PHHczfyZRX92QEAAFbEmO56IgJ3LBkW45ovuOCCtMsuu+Tu1jG2OmYWj5AZy4gdd9xxeVbveC6qwzGbeMVu32vqz3/+cz5PjL+OCdJiFvVrrrlmpa85//zzc7fxmCn8wAMPTAsXLkz/+te/8iRmMev6yJEjcxfvmGQturfHe8Q47ugWHmOvY1b1GFfesmXL9Pvf/z6H8Irjvkti3HeRnx0AAGBFVLrriRjbPGXKlDzx2emnn5523nnndMABB+SJwCJ0R1fp22+/PW2yySa5G3YE0XhNVI9rQgTomEk8wn5MUvbHP/5xlVXkmHgtlgy79tprcyiOEBxhulTpjvHVsVRZBOOY7CyWHitN6BbBO6rfcaMh3vP+++9Pf/vb39Jmm2223PsU/dkBAABWpFFZdWbhqkfmzZuXK6tz587NXZcr+uSTT/IY4gh9saQV1ROhNiYyO+yww1JD4noB6rvu13dPdcGzA5cfOgQr4roF6kK2rEilGwAAAAoidAMAAEBBTKTGZ9bARigAAABUm0o3AAAAFEToBgAAgIII3QAAAFAQoRsAAADqc+gePXp06ty5c17reI899kiTJk1a4bH77rtvXhd62e2QQw5Zq20GAACAOh+6x40bl4YMGZKGDx+epkyZknr06JH69++fZs2aVeXxt956a3r77bfLt+eeey41adIkHXXUUWu97QAAAFCnQ/fIkSPToEGD0gknnJC6deuWxowZk1q2bJnGjh1b5fGbbrppat++ffl233335eOF7tUXPQT++te/5t9fe+21/Pjpp5+u7WYBAADUG7W6TveiRYvS5MmT09ChQ8v3NW7cOPXr1y89/vjj1TrHNddck77xjW+kDTbYoMCWptT9+u5pbXp24LOrdfzxxx+frr/++uX2R6+B8ePHr/L1nTp1yj0H2rRpkx9PnDgx7bfffun9999PrVu3Xq22AAAAUAdC95w5c9KSJUtSu3btKu2Px9OmTVvl62Psd3Qvj+C9IgsXLsxbybx581J9deCBB6Zrr7220r5mzZpV67XRRT96DgAAAGvX2i7w1VThj3Wke/lnEWG7e/fuqU+fPis8ZsSIEalVq1blW1R066sI2BW73se2ySab5OdeeumltPfee+fJ6qIbf3TLr6hi9/L4ParcIV4f+6OSDgAAwDoUuqMrc1RY33nnnUr74/Gqqq7z589PN998czrxxBNXelx0XZ87d275NmPGjNTQLF26NB1xxBGpadOm6Z///GceN3/WWWet8Pi4MfGXv/wl//7iiy/mbueXX375WmwxAABA/VCr3csjBPbq1StNmDAhHXbYYeUBMR4PHjx4pa/985//nLuNf+tb31pl9be6XazXdXfeeWfacMMNK+0755xzUu/evXN3/XvvvTdtscUWef9FF12UDjrooCrPEzdCYsK60LZtW2O6AQAA1sXQHWK5sIEDB+ZgGN3ER40alavYMZt5GDBgQOrYsWPuJr5s1/II6ptttlkttbzuiS7hV1xxRaV9EZ5vvPHGXL0uBe7Qt2/fWmghAABAw1LrofuYY45Js2fPTsOGDUszZ85MPXv2zLNtlyZXmz59ep7RvKLo8vzII4+kv//977XU6ropZnDfbrvtarsZAAAA1JXQHaIr+Yq6k8fSVcvacccdU1lZ2VpoWf3QtWvXPJY9xmZ36NAh73viiSdW2fU/xOzyAAAArMOhm5oRY9yjt0BF6623Xl73fIcddsjd+C+99NK8bNpPfvKTlZ5r6623zrOWxzjxgw8+OLVo0WK58eIAAADU4yXDqCy65Uclu+L2hS98IXfPv+2229LHH3+cx82fdNJJ6Wc/+9lKzxXj6M8///x09tln567+q5rYDgAAgOWpdNeTheKvu+66vK1IVLoffvjhSvsqdtHv3Lnzcl32zz333LwBAACwZlS6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEEsGQYAdcl5rVKdsc1Wtd0CAFjnqXQDAABAQYRuAAAAKIjQTa157bXXUqNGjdLTTz+9wmMmTpyYj/nggw8+03t17tw5jRo16jOdAwAAYHUZ011NU7t0Xavv13Xa1NU6/vjjj0/XX3/9cvv79++fxo8fX4MtAwAAoLqE7nrkwAMPTNdee22lfc2aNau19gAAADR0upfXIxGw27dvX2nbZJNN8nPRRft3v/tdOvzww1PLli3T9ttvn+64447y177//vvpuOOOS5tvvnlq0aJFfr5igJ8xY0Y6+uijU+vWrdOmm26aDj300Nw9vGKl/bDDDksXXXRRateuXT7uggsuSJ9++mk688wz82u23HLL5W4KhGnTpqU999wzNW/ePO28887poYceWunnfOSRR9IXv/jF3M5OnTqlH/7wh2n+/Pnlz8+aNSt99atfzc9vs8026aabbvrM3y0AAMCaELobkPPPPz8H53//+9/p4IMPziH7vffey8+de+656YUXXkj33HNPmjp1arriiitSmzZt8nOLFy/O3dQ32mij9PDDD6dHH300bbjhhrmyvmjRovLzP/DAA+mtt95K//jHP9LIkSPT8OHD01e+8pUc/P/5z3+m//mf/0nf+9730htvvFGpXRHKTz/99PTUU0+lvn375sD87rvvVvkZXnnllfy+Rx55ZP4c48aNyyF88ODBlW4AxE2CBx98MN1yyy3pt7/9bQ7iAAAAa5vQXY/ceeedOQxX3KLyXDGMHnvssWm77bbL+z/66KM0adKk/Nz06dPTrrvumnr37p0nHevXr18OvyGC7dKlS3OlvHv37qlr1665Yh2viYnOSqKa/atf/SrtuOOO6Tvf+U7+uWDBgnTOOefkyvnQoUNT06ZNc0iuKAJzhOg4b4T9Vq1apWuuuabKzzhixIh8s+C0007L54wKebznDTfckD755JP0n//8J984uPrqq9PnP//51KtXr3yujz/+uKBvHQAAYMWM6a5H9ttvvxxaK4ogXLLLLruU/77BBhukjTfeuLwC/P3vfz8H3ylTpqQvf/nLuat4BNrwzDPPpJdffjlXuiuKkBuV55KddtopNW78f/dxopt5dBcvadKkSdpss82WqzpHdbtkvfXWy8E/qu1VibZEhbtil/GysrJ8U+DVV1/NoTvOEWG7pEuXLrm7OwAAwNomdNcjEaSjir0i66+/fqXHMc47wmo46KCD0uuvv57uvvvudN9996UvfelL6ZRTTkmXXXZZrohHiK1qbHSMAV/Z+Vf2nmsi2hJd1GMc97K22mqrHLoBAADqCt3LqRSgBw4cmH7/+9/nNa2vuuqqvH+33XZLL730Umrbtm0O9RW36Ar+WT3xxBPlv8fEa5MnT85dzasSbYmx58u2I7bouh5V7dI5Sl588cXPvM43AADAmhC665GFCxemmTNnVtrmzJlTrdcOGzYs3X777bkb+fPPP5/Hh5eCb4yhjknVYsbymEgtunHHWO6oNi87KdqaGD16dLrtttvyLOZRXY+Z1GNMeFXOOuus9Nhjj+Vx4E8//XS+GRDtLk2kFuPIY6K1qIbH5G0Rvk866aQ8kzkAAMDaJnTXI+PHj08dOnSotH3hC1+o1mujShwTncW477333juPv7755pvzc7HEWMxIHt23jzjiiBzGTzzxxDymO8aFf1YXX3xx3nr06JEnWYulzEozpy8r2hdLikU38lg2LCZ/ixsGW2yxRfkxMclbPN5nn31ye7/73e/mKj0AAMDa1qgsZqFqQObNm5e7RM+dO3e5wBghMqq4sbZzrBkNK+N6AQpx3mcftlNTum+zVaoLnh34bG03gXVI9+u7p7rAdcvqcN3Wv2xZkUo3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInRXoYHNLccacp0AAACrInRXsP766+efCxYsqO2msA4oXSel6wYAAGBZ6y23pwGLtalbt26dZs2aVb4+daNGjWq7WdTBCncE7rhO4nqJ6wYAAKAqQvcy2rdvn3+WgjesSATu0vUCAABQFaF7GVHZ7tChQ2rbtm1avHhxbTeHOiq6lKtwAwAAqyJ0r0AEKqEKAACAz8JEagAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEFqPXSPHj06de7cOTVv3jztscceadKkSSs9/oMPPkinnHJK6tChQ2rWrFnaYYcd0t13373W2gsAAADVtV6qRePGjUtDhgxJY8aMyYF71KhRqX///unFF19Mbdu2Xe74RYsWpQMOOCA/d8stt6SOHTum119/PbVu3bpW2g8AAAB1NnSPHDkyDRo0KJ1wwgn5cYTvu+66K40dOzadffbZyx0f+99777302GOPpfXXXz/viyo5AAAA1EW11r08qtaTJ09O/fr1+7/GNG6cHz/++ONVvuaOO+5Iffv2zd3L27Vrl3beeed00UUXpSVLlqzFlgMAAEAdr3TPmTMnh+UIzxXF42nTplX5mv/+97/pgQceSMcdd1wex/3yyy+nk08+OS1evDgNHz68ytcsXLgwbyXz5s2r4U8CAAAAdXQitdWxdOnSPJ77qquuSr169UrHHHNM+slPfpK7pa/IiBEjUqtWrcq3Tp06rdU2AwAA0HDVWuhu06ZNatKkSXrnnXcq7Y/H7du3r/I1MWN5zFYeryvp2rVrmjlzZu6uXpWhQ4emuXPnlm8zZsyo4U8CAAAAdSx0N23aNFerJ0yYUKmSHY9j3HZV9tprr9ylPI4r+c9//pPDeJyvKrGs2MYbb1xpAwAAgHrfvTyWC7v66qvT9ddfn6ZOnZq+//3vp/nz55fPZj5gwIBcqS6J52P28lNPPTWH7ZjpPCZSi4nVAAAAoK6p1SXDYkz27Nmz07Bhw3IX8Z49e6bx48eXT642ffr0PKN5SYzHvvfee9OPfvSjtMsuu+R1uiOAn3XWWbX4KQAAAKAOhu4wePDgvFVl4sSJy+2LrudPPPHEWmgZAAAANKDZywEAAGBdInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCrFfUiQEAasLULl1TXdF12tTabgIA6xiVbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACA+hy6R48enTp37pyaN2+e9thjjzRp0qQVHnvdddelRo0aVdridQAAAFDX1HroHjduXBoyZEgaPnx4mjJlSurRo0fq379/mjVr1gpfs/HGG6e33367fHv99dfXapsBAABgnQjdI0eOTIMGDUonnHBC6tatWxozZkxq2bJlGjt27ApfE9Xt9u3bl2/t2rVbq20GAACAOh+6Fy1alCZPnpz69ev3fw1q3Dg/fvzxx1f4uo8++ihtvfXWqVOnTunQQw9Nzz///FpqMQAAAKwjoXvOnDlpyZIly1Wq4/HMmTOrfM2OO+6Yq+C33357+v3vf5+WLl2a9txzz/TGG29UefzChQvTvHnzKm0AAADQILqXr66+ffumAQMGpJ49e6Z99tkn3XrrrWnzzTdPV155ZZXHjxgxIrVq1ap8i+o4AAAA1PvQ3aZNm9SkSZP0zjvvVNofj2OsdnWsv/76adddd00vv/xylc8PHTo0zZ07t3ybMWNGjbQdAAAA6nTobtq0aerVq1eaMGFC+b7oLh6Po6JdHdE9/dlnn00dOnSo8vlmzZrl2c4rbgAAALA2rJdqWSwXNnDgwNS7d+/Up0+fNGrUqDR//vw8m3mIruQdO3bM3cTDBRdckD7/+c+n7bbbLn3wwQfp0ksvzUuGnXTSSbX8SQAAAKCOhe5jjjkmzZ49Ow0bNixPnhZjtcePH18+udr06dPzjOYl77//fl5iLI7dZJNNcqX8sccey8uNAQAAQF1S66E7DB48OG9VmThxYqXHv/zlL/MGAAAAdd06N3s5AAAArCuEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAXQrdn376abr//vvTlVdemT788MO876233kofffRRTbcPAAAA1lnrre4LXn/99XTggQem6dOnp4ULF6YDDjggbbTRRumSSy7Jj8eMGVNMSwEAAKC+V7pPPfXU1Lt37/T++++nFi1alO8//PDD04QJE2q6fQAAANBwKt0PP/xweuyxx1LTpk0r7e/cuXN68803a7JtAAAA0LAq3UuXLk1LlixZbv8bb7yRu5kDAAAAaxi6v/zlL6dRo0aVP27UqFGeQG348OHp4IMPXt3TAQAAQL212t3LL7vssjyRWrdu3dInn3ySvvnNb6aXXnoptWnTJv3xj38sppUAALAOmdqla6oruk6bWttNgAZttUN3p06d0jPPPJPGjRuXf0aV+8QTT0zHHXdcpYnVAAAAoKFbrdC9ePHi1KVLl3TnnXfmkB0bAAAAUANjutdff/3cpRwAAAAoYCK1U045JV1yySXp008/Xd2XAgAAQIOy2mO6n3zyyTRhwoT097//PXXv3j1tsMEGlZ6/9dZba7J9AAAA0HBCd+vWrdORRx5ZTGsAAACgIYfua6+9tpiWAAAAQEMP3SWzZ89OL774Yv59xx13TJtvvnlNtgsAAAAa3kRq8+fPT9/5zndShw4d0t577523LbbYIq/VvWDBgmJaCQAAAA0hdA8ZMiQ99NBD6W9/+1v64IMP8nb77bfnfaeffnoxrQQAAICG0L38L3/5S7rlllvSvvvuW77v4IMPTi1atEhHH310uuKKK2q6jQAAANAwKt3Rhbxdu3bL7W/btq3u5QAAAPBZQnffvn3T8OHD0yeffFK+7+OPP07nn39+fg4AAABYw+7ll19+eerfv3/acsstU48ePfK+Z555JjVv3jzde++9q3s6AAAAqLdWO3TvvPPO6aWXXko33XRTmjZtWt537LHHpuOOOy6P6wYAAAA+wzrdLVu2TIMGDVqTl7KuOq9VqjPOm1vbLQAAAChmTPeIESPS2LFjl9sf+y655JLVPR0AAADUW6sduq+88srUpUuX5fbvtNNOacyYMTXVLgAAAGh4oXvmzJmpQ4cOy+3ffPPN09tvv11T7QIAAICGF7o7deqUHn300eX2x74tttiiptoFAAAADW8itZhA7bTTTkuLFy9O+++/f943YcKE9OMf/zidfvrpRbQRAAAAGkboPvPMM9O7776bTj755LRo0aK8L9boPuuss9LQoUOLaCMAAAA0jNDdqFGjPEv5ueeem6ZOnZrX5t5+++1Ts2bNimkhAAB1W11aWnSbrWq7BQCfbUx3yYYbbph23333tNFGG6VXXnklLV26dE1PBQAAAA07dMc63CNHjqy077vf/W7adtttU/fu3dPOO++cZsyYUUQbAQAAoH6H7quuuiptsskm5Y/Hjx+frr322nTDDTekJ598MrVu3Tqdf/75RbUTAAAA6u+Y7pdeein17t27/PHtt9+eDj300HTcccflxxdddFE64YQTimklAAAA1OdK98cff5w23njj8sePPfZY2nvvvcsfRzfzmTNn1nwLAQAAoL5Xurfeeus0efLk/HPOnDnp+eefT3vttVf58xG4W7WqQzNXAgAArIhZ96lroXvgwIHplFNOyWH7gQceSF26dEm9evWqVPmOydQAAACA1QzdP/7xj9OCBQvSrbfemtq3b5/+/Oc/V3r+0UcfTccee2x1TwcAAAD1XrVDd+PGjdMFF1yQt6osG8IBAACgoav2RGoAAADA6hG6AQAAoCBCNwAAABRE6AYAAIC6EroffPDBYloCAAAADT10H3jggelzn/tcuvDCC9OMGTNqpBGjR49OnTt3Ts2bN0977LFHmjRpUrVed/PNN6dGjRqlww47rEbaAQAAALUaut988800ePDgdMstt6Rtt9029e/fP/3pT39KixYtWqMGjBs3Lg0ZMiQNHz48TZkyJfXo0SOfc9asWSt93WuvvZbOOOOM9MUvfnGN3hcAAADqXOhu06ZN+tGPfpSefvrp9M9//jPtsMMO6eSTT05bbLFF+uEPf5ieeeaZ1TrfyJEj06BBg9IJJ5yQunXrlsaMGZNatmyZxo4du8LXLFmyJB133HHp/PPPz8EfAAAA6t1EarvttlsaOnRornx/9NFHOSj36tUrV5+ff/75Vb4+quOTJ09O/fr1+78GNW6cHz/++OMrfN0FF1yQ2rZtm0488cRVvsfChQvTvHnzKm0AAABQZ0P34sWLc/fygw8+OG299dbp3nvvTb/5zW/SO++8k15++eW876ijjlrleebMmZOr1u3atau0Px7PnDmzytc88sgj6ZprrklXX311tdo6YsSI1KpVq/KtU6dO1fyUAAAAsJZD9w9+8IPUoUOH9L3vfS93LX/qqadyVfqkk05KG2ywQZ4Q7bLLLkvTpk1LNe3DDz9M3/72t3Pgjm7u1RGV+Llz55ZvNTX5GwAAAKzKemk1vfDCC+nXv/51OuKII1KzZs2qPCYCcXWWFovjmjRpkivkFcXj9u3bL3f8K6+8kidQ++pXv1q+b+nSpf//g6y3XnrxxRfzzOoVRRtX1E4AAACoU5XumGU8uo4vG2Q//fTT9I9//KM8AO+zzz6rPFfTpk3zGPAJEyZUCtHxuG/fvssd36VLl/Tss8/mSdxK29e+9rW033775d91HQcAAGCdrnRHwH377bfzRGYVRdfteC7GaK+OWC5s4MCBqXfv3qlPnz5p1KhRaf78+Xk28zBgwIDUsWPHPDY71vHeeeedK72+devW+eey+wEAAGCdC91lZWWpUaNGy+1/991385ju1XXMMcek2bNnp2HDhuXJ03r27JnGjx9fPrna9OnT84zmAAAAUG9Dd4zhDhG4jz/++Erdy6O6/e9//zvtueeea9SIWHIstqpMnDhxpa+97rrr1ug9AQAAoM6E7lhuq1Tp3mijjVKLFi0qjc3+/Oc/nwYNGlRMKwEAACjU1C5dU13RddrU1OBC97XXXpt/xpJgZ5xxxhp1JQcAAICGZL01mb0cAAAAqKHQvdtuu+VlvDbZZJO06667VjmRWsmUKVOqc0oAAACo96oVug899NDyidMOO+ywotsEAAAADSd0V+xSrns5AAAAVM9qL4A9Y8aM9MYbb5Q/njRpUjrttNPSVVddtbqnAgAAgHpttUP3N7/5zfTggw/m32fOnJn69euXg/dPfvKTdMEFFxTRRgAAAGgYofu5555Lffr0yb//6U9/St27d0+PPfZYuummm9J1111XRBsBAACgYYTuxYsXl0+qdv/996evfe1r+fcuXbqkt99+u+ZbCAAAAA0ldO+0005pzJgx6eGHH0733XdfOvDAA/P+t956K2222WZFtBEAAAAaRui+5JJL0pVXXpn23XffdOyxx6YePXrk/XfccUd5t3MAAACgmkuGlZSVlaVtt902TZ8+PX366adpk002KX/uu9/9bmrZsmURbQQAAID6X+mO0L3ddtvlWcsrBu7QuXPn1LZt25puHwAAADSM0N24ceO0/fbbp3fffbe4FgEAAEBDHdN98cUXpzPPPDMvHQYAAADU0JjuMGDAgLRgwYI8gVrTpk1TixYtKj3/3nvvre4pAQAAoF5a7dA9atSoYloCAAAADT10Dxw4sJiWAAAAQEMf0x1eeeWV9L//+795ne5Zs2blfffcc096/vnna7p9AAAA0HBC90MPPZS6d++e/vnPf6Zbb701ffTRR3n/M888k4YPH15EGwEAAKBhhO6zzz47XXjhhem+++7LE6mV7L///umJJ56o6fYBAABAwwndzz77bDr88MOX29+2bds0Z86cmmoXAAAANLzQ3bp16/T2228vt/+pp55KHTt2rKl2AQAAQMML3d/4xjfSWWedlWbOnJkaNWqUli5dmh599NF0xhln5DW8AQAAgDUM3RdddFHq0qVL6tSpU55ErVu3bmnvvfdOe+65Z57RHAAAAFjDdbpj8rSrr746nXvuuem5557LwXvXXXdN22+//eqeCgAAAOq11Q7djzzySPrCF76Qttpqq7wBAAAANdS9PJYG22abbdI555yTXnjhhdV9OQAAADQYqx2633rrrXT66aenhx56KO28886pZ8+e6dJLL01vvPFGMS0EAACAhhK627RpkwYPHpxnLH/llVfSUUcdla6//vrUuXPnXAUHAAAA1jB0VxTdzM8+++x08cUXp+7du+fqNwAAAPAZQ3dUuk8++eTUoUOH9M1vfjN3Nb/rrrvW9HQAAABQ76z27OVDhw5NN998cx7bfcABB6TLL788HXrooally5bFtBAAAAAaSuj+xz/+kc4888x09NFH5/HdAAAAQA2F7uhWDgAAANRQ6L7jjjvSQQcdlNZff/38+8p87Wtfq84pAQAAoN6rVug+7LDD0syZM1Pbtm3z7yvSqFGjtGTJkppsHwAAANTv0L106dIqfwcAAAAKWqcbAAAAqKGJ1KLKfd1116Vbb701vfbaa7k7+TbbbJO+/vWvp29/+9v5MQAAALCale6ysrI8SdpJJ52U3nzzzdS9e/e00047pddffz0df/zx6fDDD6/uqQAAAKBBqHalOyrcsUb3hAkT0n777VfpuQceeCBPsHbDDTekAQMGFNFOAAAAqL+V7j/+8Y/pnHPOWS5wh/333z+dffbZ6aabbqrp9gEAAED9D93//ve/04EHHrjC52Md72eeeaam2gUAAAANJ3S/9957qV27dit8Pp57//33a6pdAAAA0HBC95IlS9J66614CHiTJk3Sp59+WlPtAgAAgIYzkVrMXh6zlDdr1qzK5xcuXFiT7QIAAICGE7oHDhy4ymPMXA4AAABrELqvvfba6h4KAAAArM6YbgAAAGD1CN0AAABQEKEbAAAA6nPoHj16dOrcuXNq3rx52mOPPdKkSZNWeOytt96aevfunVq3bp022GCD1LNnz3TjjTeu1fYCAADAOhG6x40bl4YMGZKGDx+epkyZknr06JH69++fZs2aVeXxm266afrJT36SHn/88fTvf/87nXDCCXm7995713rbAQAAoE6H7pEjR6ZBgwbl4NytW7c0ZsyY1LJlyzR27Ngqj993333T4Ycfnrp27Zo+97nPpVNPPTXtsssu6ZFHHlnrbQcAAIA6G7oXLVqUJk+enPr16/d/DWrcOD+OSvaqlJWVpQkTJqQXX3wx7b333lUes3DhwjRv3rxKGwAAANT70D1nzpy0ZMmS1K5du0r74/HMmTNX+Lq5c+emDTfcMDVt2jQdcsgh6de//nU64IADqjx2xIgRqVWrVuVbp06davxzAAAAQJ3sXr4mNtpoo/T000+nJ598Mv3sZz/LY8InTpxY5bFDhw7NIb20zZgxY623FwAAgIZpvdp88zZt2qQmTZqkd955p9L+eNy+ffsVvi66oG+33Xb595i9fOrUqbmiHeO9l9WsWbO8AQAAQIOqdEf38F69euVx2SVLly7Nj/v27Vvt88RrYuw2AAAA1CW1WukO0TV84MCBee3tPn36pFGjRqX58+fn2czDgAEDUseOHXMlO8TPODZmLo+gfffdd+d1uq+44opa/iQAAABQx0L3Mccck2bPnp2GDRuWJ0+L7uLjx48vn1xt+vTpuTt5SQTyk08+Ob3xxhupRYsWqUuXLun3v/99Pg8AAADUJbUeusPgwYPzVpVlJ0i78MIL8wawSue1SnXGeXNruwUAANSCdXL2cgAAAFgXCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAD1eckwAKhNnc++K9UVrzWv7RYAADVJpRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgZi8HWAu6X9891QXPDny2tpsAANCgqHQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUJD1ijoxAHXP1C5dU13RddrU2m4CAEDhVLoBAACgICrdAADroM5n35Xqitea13YLAOoulW4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQn0P36NGjU+fOnVPz5s3THnvskSZNmrTCY6+++ur0xS9+MW2yySZ569ev30qPBwAAgAYbuseNG5eGDBmShg8fnqZMmZJ69OiR+vfvn2bNmlXl8RMnTkzHHntsevDBB9Pjjz+eOnXqlL785S+nN998c623HQAAAOp06B45cmQaNGhQOuGEE1K3bt3SmDFjUsuWLdPYsWOrPP6mm25KJ598curZs2fq0qVL+t3vfpeWLl2aJkyYsNbbDgAAAHU2dC9atChNnjw5dxEvb1DjxvlxVLGrY8GCBWnx4sVp0003rfL5hQsXpnnz5lXaAAAAoN6H7jlz5qQlS5akdu3aVdofj2fOnFmtc5x11llpiy22qBTcKxoxYkRq1apV+Rbd0QEAAKBBdC//LC6++OJ08803p9tuuy1PwlaVoUOHprlz55ZvM2bMWOvtBAAAoGFarzbfvE2bNqlJkybpnXfeqbQ/Hrdv336lr73sssty6L7//vvTLrvsssLjmjVrljcAAABoUJXupk2bpl69elWaBK00KVrfvn1X+Lqf//zn6ac//WkaP3586t2791pqLQAAAKxDle4Qy4UNHDgwh+c+ffqkUaNGpfnz5+fZzMOAAQNSx44d89jscMkll6Rhw4alP/zhD3lt79LY7w033DBvAAAAUFfUeug+5phj0uzZs3OQjgAdS4FFBbs0udr06dPzjOYlV1xxRZ71/Otf/3ql88Q63+edd95abz8AAADU2dAdBg8enLeqTJw4sdLj1157bS21CgAAABrw7OUAAABQlwndAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAqyXlEnhvpuapeuqa7oOm1qbTcBAACogko3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgvobu0aNHp86dO6fmzZunPfbYI02aNGmFxz7//PPpyCOPzMc3atQojRo1aq22FQAAANaZ0D1u3Lg0ZMiQNHz48DRlypTUo0eP1L9//zRr1qwqj1+wYEHadttt08UXX5zat2+/1tsLAAAA60zoHjlyZBo0aFA64YQTUrdu3dKYMWNSy5Yt09ixY6s8fvfdd0+XXnpp+sY3vpGaNWu21tsLAAAA60ToXrRoUZo8eXLq16/f/zWmceP8+PHHH6+tZgEAAECNWa+23njOnDlpyZIlqV27dpX2x+Np06bV2PssXLgwbyXz5s2rsXMDAABAnQzda8uIESPS+eefX9vNgAaj89l3pbritea13QIAABq6Wute3qZNm9SkSZP0zjvvVNofj2tykrShQ4emuXPnlm8zZsyosXMDAABAnQzdTZs2Tb169UoTJkwo37d06dL8uG/fvjX2PjHh2sYbb1xpAwAAgHrfvTyWCxs4cGDq3bt36tOnT153e/78+Xk28zBgwIDUsWPH3EW8NPnaCy+8UP77m2++mZ5++um04YYbpu222642PwoAAADUrdB9zDHHpNmzZ6dhw4almTNnpp49e6bx48eXT642ffr0PKN5yVtvvZV23XXX8seXXXZZ3vbZZ580ceLEWvkMAAAAUGcnUhs8eHDeqrJskO7cuXMqKytbSy0DAACAdXRMNwAAANR3QjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUJD1ijoxAABARZ3PvivVFa81r+0W0FCodAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAOpz6B49enTq3Llzat68edpjjz3SpEmTVnr8n//859SlS5d8fPfu3dPdd9+91toKAAAA60zoHjduXBoyZEgaPnx4mjJlSurRo0fq379/mjVrVpXHP/bYY+nYY49NJ554YnrqqafSYYcdlrfnnnturbcdAAAA6nToHjlyZBo0aFA64YQTUrdu3dKYMWNSy5Yt09ixY6s8/vLLL08HHnhgOvPMM1PXrl3TT3/607Tbbrul3/zmN2u97QAAAFBnQ/eiRYvS5MmTU79+/f6vQY0b58ePP/54la+J/RWPD1EZX9HxAAAAUFvWq7V3TinNmTMnLVmyJLVr167S/ng8bdq0Kl8zc+bMKo+P/VVZuHBh3krmzp2bf86bN68GPkEDsrAs1Rl15G/30ZIlqa6oS9fz0oULUl0xr1HduW6XfFw3rhfXbdVct1Vz3S7PdVs11+3yXLdVc91WzXVbt6/bVbWxrKys7obutWHEiBHp/PPPX25/p06daqU91ICLW9V2C+qeVr6TqtStb2Vqqgv6pDrEdVuluvWtuG6X47qtUt36Vly3y3HdVqlufSuu23X5uv3www9Tq5W0t1ZDd5s2bVKTJk3SO++8U2l/PG7fvn2Vr4n9q3P80KFD80RtJUuXLk3vvfde2myzzVKjRo1q5HOwdu8mxQ2TGTNmpI033ri2mwPV4rplXeS6ZV3kumVd5Lpdd0WFOwL3FltssdLjajV0N23aNPXq1StNmDAhz0BeCsXxePDgwVW+pm/fvvn50047rXzffffdl/dXpVmzZnmrqHXr1jX6OVj74l9I/qXEusZ1y7rIdcu6yHXLush1u25aWYW7znQvjyr0wIEDU+/evVOfPn3SqFGj0vz58/Ns5mHAgAGpY8eOuZt4OPXUU9M+++yTfvGLX6RDDjkk3Xzzzelf//pXuuqqq2r5kwAAAEAdC93HHHNMmj17dho2bFieDK1nz55p/Pjx5ZOlTZ8+Pc9oXrLnnnumP/zhD+l///d/0znnnJO233779Ne//jXtvPPOtfgpAAAAoA6G7hBdyVfUnXzixInL7TvqqKPyRsMTQwWGDx++3JABqMtct6yLXLesi1y3rItct/Vfo7JVzW8OAAAArJH/67cNAAAA1CihGwAAAAoidAMAAEBBhG7WGaNHj06dO3dOzZs3T3vssUeaNGlSbTcJVuof//hH+upXv5q22GKL1KhRo7zSAtRlsTzn7rvvnjbaaKPUtm3bdNhhh6UXX3yxtpsFK3XFFVekXXbZpXyN4759+6Z77rmntpsFq+Xiiy/O/61w2mmn1XZTKIDQzTph3LhxeU33mNlxypQpqUePHql///5p1qxZtd00WKH58+fnazVuGMG64KGHHkqnnHJKeuKJJ9J9992XFi9enL785S/naxnqqi233DIHlsmTJ6d//etfaf/990+HHnpoev7552u7aVAtTz75ZLryyivzzSPqJ7OXs06IynZUX37zm9/kx0uXLk2dOnVKP/jBD9LZZ59d282DVYq717fddluuHMK6Yvbs2bniHWF87733ru3mQLVtuumm6dJLL00nnnhibTcFVuqjjz5Ku+22W/rtb3+bLrzwwtSzZ880atSo2m4WNUylmzpv0aJF+e51v379yvc1btw4P3788cdrtW0A9dncuXPLAwysC5YsWZJuvvnm3DsjuplDXRe9iw455JBK/51L/bNebTcAVmXOnDn5/0TbtWtXaX88njZtWq21C6A+ix5FMbZwr732SjvvvHNtNwdW6tlnn80h+5NPPkkbbrhh7lnUrVu32m4WrFTcIIphk9G9nPpN6AYAqqy+PPfcc+mRRx6p7abAKu24447p6aefzr0zbrnlljRw4MA8LELwpq6aMWNGOvXUU/P8GTFJMPWb0E2d16ZNm9SkSZP0zjvvVNofj9u3b19r7QKorwYPHpzuvPPOPAN/TFIFdV3Tpk3Tdtttl3/v1atXrhxefvnleXIqqIti6GRMCBzjuUuiZ2f8ezfmMFq4cGH+71/qB2O6WSf+jzT+D3TChAmVuj3GY+O1AGpOzK0agTu65j7wwANpm222qe0mwRqJ/06I0AJ11Ze+9KU8LCJ6aJS23r17p+OOOy7/LnDXLyrdrBNiubDoKhb/MurTp0+e1TEmSTnhhBNqu2mw0hlJX3755fLHr776av4/0piUaquttqrVtsGKupT/4Q9/SLfffnteq3vmzJl5f6tWrVKLFi1qu3lQpaFDh6aDDjoo/3v1ww8/zNfwxIkT07333lvbTYMVin/HLjtfxgYbbJA222wz82jUQ0I364RjjjkmL10zbNiw/B+BsZzC+PHjl5tcDeqSWC92v/32q3TzKMQNpOuuu64WWwZVu+KKK/LPfffdt9L+a6+9Nh1//PG11CpYueiiO2DAgPT222/nG0Sx1nEE7gMOOKC2mwaQWacbAAAACmJMNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBoA66vjjj0+NGjXK2/rrr5+22Wab9OMf/zh98skntd00AKCa1qvugQDA2nfggQema6+9Ni1evDhNnjw5DRw4MIfwSy65pLabBgBUg0o3ANRhzZo1S+3bt0+dOnVKhx12WOrXr1+677778nNLly5NI0aMyBXwFi1apB49eqRbbrml/LXvv/9+Ou6449Lmm2+en99+++1zgA+vvfZaDu8333xz2nPPPVPz5s3TzjvvnB566KFK7x+P+/Tpk9vRoUOHdPbZZ6dPP/20/Pl99903/fCHP8wV+E033TS39bzzzit/vqysLD/eaqut8jm22GKLfHzJwoUL0xlnnJE6duyYNthgg7THHnukiRMnFvqdAsDaJHQDwDriueeeS4899lhq2rRpfhyB+4YbbkhjxoxJzz//fPrRj36UvvWtb5UH53PPPTe98MIL6Z577klTp05NV1xxRWrTpk2lc5555pnp9NNPT0899VTq27dv+upXv5refffd/Nybb76ZDj744LT77runZ555Jr/+mmuuSRdeeGGlc1x//fU5MP/zn/9MP//5z9MFF1xQfmPgL3/5S/rlL3+ZrrzyyvTSSy+lv/71r6l79+7lrx08eHB6/PHHc/j/97//nY466qhc3Y9jAaA+aFQWt6ABgDo5pvv3v/99rkJHdTmqwo0bN05/+tOf0le+8pVcWb7//vtzWC456aST0oIFC9If/vCH9LWvfS2H7LFjxy537qh0R4X84osvTmeddVbeF+8R+37wgx/kyvVPfvKTHJojsEdVPPz2t7/Nx8+dOze3JSrdS5YsSQ8//HD5uaMyvv/+++dzjxw5MgfuuGEQ49Irmj59etp2223zz6iAl0Q1P85x0UUXFfK9AsDaZEw3ANRh++23X64wz58/P1eM11tvvXTkkUfmynaE6wMOOKDS8YsWLUq77rpr/v373/9+PnbKlCnpy1/+cu6eHl3JK6oY2OPcvXv3ziE7xM94vhS4w1577ZU++uij9MYbb+Qu42GXXXapdM7ohj5r1qz8e1SuR40alcN1VLCjch7V9HivZ599Ngf2HXbYodLr4+bCZpttVkPfIADULqEbAOqw6La93Xbb5d+jYh3jtqOLd4y/DnfddVceD11RjJ0OBx10UHr99dfT3Xffnbt7f+lLX0qnnHJKuuyyy2q0jctWsCOkx3jzEGPRX3zxxVyRjzacfPLJ6dJLL81d4CO8N2nSJE8QFz8r2nDDDWu0jQBQW4zpBoB1RHTnPuecc9L//u//pm7duuVwHV2zI5RX3CLolsQkajHjeXRTj4rzVVddVemcTzzxRPnv0b08AnDXrl3z4/gZ460rjkR79NFH00YbbZS23HLLarc7JnGL6vavfvWrPElanDOq3FGRj0p3VMWX/QwxIRsA1Acq3QCwDonu2jH5WYyTjlm/Y/K0qCp/4QtfyOOsIxRvvPHGOWgPGzYs9erVK+200065y/add95ZHqhLRo8enWc1j/3RfT1mPP/Od76Tn4uqdAT1GOMdE55FxXr48OFpyJAh+QZAdVx33XU5WMes5C1btszhP0L41ltvnbuQx+zqAwYMSL/4xS9yCJ89e3aaMGFC7rJ+yCGHFPIdAsDaJHQDwDokxkJHAI5Zwl999dVcyY5ZzP/73/+m1q1bp9122y1Xw0PMcj506NA8aVoE3S9+8Yt5lvCKYrKz2J5++ulcYb7jjjvKZziPbuvRNT1CfnRrj4nbTjzxxFxpr65oU5w/gnqE75i5/G9/+1v5mO1YwixmQ48Z1GO29Hjvz3/+83miOACoD8xeDgANUGn28lgqrGfPnrXdHACot4zpBgAAgIII3QAAAFAQ3csBAACgICrdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAACQivH/AKoDC46N7J+pAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "scores = [ensemble_diversity(s_arr) for s_arr in input_strs]\n", "labels = [str(number) for number in range(1, len(input_strs) + 1)]\n", "\n", - "plt.figure(figsize=(8, 5))\n", - "plt.bar(labels, scores)\n", + "df = pd.DataFrame(\n", + " scores,\n", + " columns=['Cosine-Inv', 'Compression', 'Edit', 'Ensembled']\n", + ")\n", + "\n", + "plt.figure(figsize=(10, 6))\n", + "\n", + "x = range(len(df))\n", + "width = 0.2\n", + "\n", + "plt.bar([i - width*1.5 for i in x], df['Cosine-Inv'], width=width, label='Cosine-Inv')\n", + "plt.bar([i - width*0.5 for i in x], df['Compression'], width=width, label='Compression')\n", + "plt.bar([i + width*0.5 for i in x], df['Edit'], width=width, label='Edit')\n", + "plt.bar([i + width*1.5 for i in x], df['Ensembled'], width=width, label='Ensembled')\n", + "\n", + "plt.xticks(x, df.index if 'Input' not in df.columns else df['Input'])\n", + "\n", "plt.xlabel(\"Response\")\n", "plt.ylabel(\"Diversity Score\")\n", + "plt.legend()\n", + "plt.title(\"Comparison of Diversity Metrics\")\n", "plt.tight_layout()\n", "plt.show()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example above:\n", + "- 1. Base case of just the same sentence every time\n", + "- 2. Asking the LLM 10 times for a joke (with the default temp being low, this should output the same joke most of the time)\n", + "- 3. Asking the LLM once for 10 different jokes\n", + "- 4. Asking the LLM 4 different times to create a story about 4 random topics (pre-selected)\n", + "- 5. Pre-selected LLM call, where it was told to produce 4, ~500 word paragraphs where it explained the exact same thing in a different way\n" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1091,11 +1233,16 @@ " - tune added metrics\n", "- combination of validation/hallucination metric + ensembled diversity metric -> score" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "ember_upgrade", + "display_name": ".venv", "language": "python", "name": "python3" }, @@ -1109,7 +1256,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.11" + "version": "3.11.9" } }, "nbformat": 4, From 36f3b13522e2f220a85b30ce564b822f1f03244c Mon Sep 17 00:00:00 2001 From: connorchow Date: Wed, 2 Apr 2025 00:36:10 -0700 Subject: [PATCH 05/14] fixing+adding evaluators/embbedding model/operator and updating testbench --- .../operator/core/diversity_scorer.py | 55 ++++++++++++ src/ember/core/utils/embedding_utils.py | 6 +- src/ember/core/utils/eval/evaluators.py | 49 +++++++++-- src/ember/examples/diversity_testbench.ipynb | 87 ++++++++++--------- 4 files changed, 145 insertions(+), 52 deletions(-) create mode 100644 src/ember/core/registry/operator/core/diversity_scorer.py diff --git a/src/ember/core/registry/operator/core/diversity_scorer.py b/src/ember/core/registry/operator/core/diversity_scorer.py new file mode 100644 index 00000000..3112f83a --- /dev/null +++ b/src/ember/core/registry/operator/core/diversity_scorer.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from collections import Counter +from typing import List, Optional + +from ember.core.registry.operator.base.operator_base import Operator +from ember.core.registry.specification.specification import Specification +from ember.core.types import EmberModel + +from ember.core.utils.eval.evaluators import DiversityEnsembledEvaluator +from ember.core.utils.embedding_utils import Text_Embedding_Ada_002_Model +from ember.core.registry.model.base.services.model_service import ModelService + + +class DiversityScoringOperatorInputs(EmberModel): + """Input model for DiversityScoringOperator. + + Attributes: + responses (List[str]): A list of response strings. + """ + + responses: List[str] + model_service: ModelService + + +class DiversityScoringOperatorOutputs(EmberModel): + """Output model for DiversityScoringOperator. + + Attributes: + responses (List[str]): A list of response strings. + diversity score (int): A score representing the diversity between all responses. + + """ + + responses: List[str] + diversity_score: int + + +class DiversityScoringOperator( + Operator[DiversityScoringOperatorInputs, DiversityScoringOperatorOutputs] +): + """Operator to aggregate all responses and run a score of a diversity-based metric.""" + + specification: Specification = Specification( + input_model=DiversityScoringOperatorInputs, + structured_output=DiversityScoringOperatorOutputs, + ) + + def forward( + self, *, inputs: DiversityScoringOperatorInputs + ) -> DiversityScoringOperatorOutputs: + if not inputs.responses or not inputs.model_service: + return {"responses": None, "diversity_score": 0} + + return {"responses": inputs.responses, "divserity_score": DiversityEnsembledEvaluator().evaluate(inputs.responses, embedding_model=Text_Embedding_Ada_002_Model(llm=inputs.model_service))['score']} \ No newline at end of file diff --git a/src/ember/core/utils/embedding_utils.py b/src/ember/core/utils/embedding_utils.py index c5eba46f..963f27ad 100644 --- a/src/ember/core/utils/embedding_utils.py +++ b/src/ember/core/utils/embedding_utils.py @@ -70,8 +70,10 @@ class Text_Embedding_Ada_002_Model: Methods: embed_text: Compute the embedding for a given text. """ + def __init__(self, llm: ModelService): + self.llm = llm - def embed_text(self, llm: ModelService, text: str) -> List[float]: + def embed_text(self, text: str) -> List[float]: """Computes the embedding vector for the provided text. Args: @@ -80,7 +82,7 @@ def embed_text(self, llm: ModelService, text: str) -> List[float]: Returns: List[float]: A list of floats representing the embedding vector. """ - response = llm(model_id="openai:text-embedding-ada-002", prompt=text) + response = self.llm(model_id="openai:text-embedding-ada-002", prompt=text) return response.embedding class OpenAITextEmbedding3(Protocol): diff --git a/src/ember/core/utils/eval/evaluators.py b/src/ember/core/utils/eval/evaluators.py index 4f35c0c8..5ffbae1f 100644 --- a/src/ember/core/utils/eval/evaluators.py +++ b/src/ember/core/utils/eval/evaluators.py @@ -6,10 +6,11 @@ from .base_evaluator import EvaluationResult, IEvaluator from .extractors import RegexExtractor +# diversity imports from diversity import compression_ratio import Levenshtein import numpy as np -from ember.core.utils.embedding_utils import EmbeddingModel +from ember.core.utils.embedding_utils import EmbeddingModel, CosineSimilarity, calculate_text_similarity T_out = TypeVar("T_out") T_truth = TypeVar("T_truth") @@ -205,24 +206,54 @@ def evaluate( # Composite Evaluator Example - -class CosineSimilarityScoringEvaluator(IEvaluator[List[str], None]): +class DiversityEnsembledEvaluator(IEvaluator[List[str], None]): """ Evaluator to test ensemble outputs -> score them (float) """ def evaluate( self, - system_output: List[str], + system_output: List[str], embedding_model: EmbeddingModel, **kwargs) -> EvaluationResult: - if system_output is None or len(system_output) == 0: + if system_output is None or len(system_output) == 0 or embedding_model == None: return EvaluationResult(is_correct=False, score=-1) + if len(system_output) == 1: + return EvaluationResult(is_correct=True, score=0) + + + div_cosine = 1 - DiversityCosineSimilarityEvaluator().evaluate(system_output, embedding_model)['score'] + div_compression = min(DiversityCosineSimilarityEvaluator().evaluate(system_output)['score'], 1) + div_edit = DiversityEditDistanceEvaluator.evaluate(system_output)['score'] + + div_ensemble_score = (div_cosine + div_compression + div_edit)/3 - # example I was thinking about: - letter_sum = sum(len(response) for response in system_output) - ratio = 1/compression_ratio(system_output, algorithm="gzip") * min(1, len(system_output)/5) * min(1, letter_sum/100) + return EvaluationResult(is_correct=True, + score=div_ensemble_score, + metadata = {'responses': system_output}) +class DiversityCosineSimilarityEvaluator(IEvaluator[List[str], None]): + """ + Evaluator to test ensemble outputs -> score them (float) + """ + + def evaluate( + self, + system_output: List[str], embedding_model: EmbeddingModel, + **kwargs) -> EvaluationResult: + if system_output is None or len(system_output) == 0 or embedding_model == None: + return EvaluationResult(is_correct=False, score=-1) + if len(system_output) == 1: + return EvaluationResult(is_correct=True, score=0) + + cosine: CosineSimilarity = CosineSimilarity() + + cosine_scores = list() + for ind1 in range(len(system_output)): + ind2 = ind1+1 if ind1+1 != len(system_output) else 0 + curr_score = calculate_text_similarity(text1=system_output[ind1], text2=system_output[ind2], model=embedding_model, metric=cosine) + cosine_scores.append(curr_score) + avg_cosine_score = np.average(cosine_scores) return EvaluationResult(is_correct=True, - score=ratio, + score=avg_cosine_score, metadata = {'responses': system_output}) diff --git a/src/ember/examples/diversity_testbench.ipynb b/src/ember/examples/diversity_testbench.ipynb index d184d1fa..a7eee462 100644 --- a/src/ember/examples/diversity_testbench.ipynb +++ b/src/ember/examples/diversity_testbench.ipynb @@ -172,9 +172,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "---\n", - "\n", - "### Model Registry checks (**OPTIONAL**)\n", + "### ModelRegistry checks (**OPTIONAL**)\n", "\n", "From the code above, it should auto add models from your config files (which can displayed from printing below), but you can also add your own models as shown below!" ] @@ -276,7 +274,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Try model registry (**OPTIONAL**)\n", + "### Test example of ModelRegistry (**OPTIONAL**)\n", "taken from `src/ember/core/registry/model/examples/example.py`" ] }, @@ -417,14 +415,21 @@ "---\n", "---\n", "\n", - "## Neural Similarity Scoring - Cosine Similarity\n", + "## Diversity Scores" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Neural Similarity Scoring - Cosine Similarity\n", "\n", "- from `src/ember/core/utils/embedding_utils.py`" ] }, { "cell_type": "code", - "execution_count": 75, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -478,42 +483,42 @@ " response = llm(model_id=\"openai:text-embedding-ada-002\", prompt=text)\n", " return response.embedding\n", "\n", - "class Text_Embedding_3_EmbeddingModel(Protocol):\n", - " \"\"\"Interface for embedding models.\n", + "# class Text_Embedding_3_EmbeddingModel(Protocol):\n", + "# \"\"\"Interface for embedding models.\n", "\n", - " This protocol defines the minimal interface required to compute a text\n", - " embedding. Implementations may use local models, external APIs, or custom\n", - " neural networks.\n", + "# This protocol defines the minimal interface required to compute a text\n", + "# embedding. Implementations may use local models, external APIs, or custom\n", + "# neural networks.\n", "\n", - " Methods:\n", - " embed_text: Compute the embedding for a given text.\n", - " \"\"\"\n", + "# Methods:\n", + "# embed_text: Compute the embedding for a given text.\n", + "# \"\"\"\n", "\n", - " def __init__(self, api_key: str = None):\n", - " \"\"\"Initializes the embedding model with the OpenAI API key.\n", + "# def __init__(self, api_key: str = None):\n", + "# \"\"\"Initializes the embedding model with the OpenAI API key.\n", "\n", - " Args:\n", - " api_key (str): OpenAI API key for authentication.\n", - " \"\"\"\n", - " self.api_key = api_key or os.environ.get(\"OPENAI_API_KEY\")\n", - " if not self.api_key:\n", - " raise ValueError(\"OpenAI API key must be provided or set in the environment variable OPENAI_API_KEY.\")\n", - " openai.api_key = self.api_key\n", + "# Args:\n", + "# api_key (str): OpenAI API key for authentication.\n", + "# \"\"\"\n", + "# self.api_key = api_key or os.environ.get(\"OPENAI_API_KEY\")\n", + "# if not self.api_key:\n", + "# raise ValueError(\"OpenAI API key must be provided or set in the environment variable OPENAI_API_KEY.\")\n", + "# openai.api_key = self.api_key\n", "\n", - " def embed_text(self, text: str) -> List[float]:\n", - " \"\"\"Computes the embedding vector for the provided text.\n", + "# def embed_text(self, text: str) -> List[float]:\n", + "# \"\"\"Computes the embedding vector for the provided text.\n", "\n", - " Args:\n", - " text (str): The text to be embedded.\n", + "# Args:\n", + "# text (str): The text to be embedded.\n", "\n", - " Returns:\n", - " List[float]: A list of floats representing the embedding vector.\n", - " \"\"\"\n", - " response = openai.Embedding.create(\n", - " model=\"text-embedding-3\",\n", - " input=text\n", - " )\n", - " return response[\"data\"][0][\"embedding\"]\n", + "# Returns:\n", + "# List[float]: A list of floats representing the embedding vector.\n", + "# \"\"\"\n", + "# response = openai.Embedding.create(\n", + "# model=\"text-embedding-3\",\n", + "# input=text\n", + "# )\n", + "# return response[\"data\"][0][\"embedding\"]\n", "\n", "\n", "class MockEmbeddingModel:\n", @@ -674,9 +679,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Compression Ratio\n", + "### Compression Ratio\n", "\n", - "from `src/ember/core/utils/eval/evaluators.py`" + "- from `src/ember/core/utils/eval/evaluators.py`" ] }, { @@ -839,7 +844,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Edit Distance" + "### Edit Distance" ] }, { @@ -930,7 +935,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Novelty Score (WIP)" + "### Novelty Score (WIP)" ] }, { @@ -1012,7 +1017,7 @@ "---\n", "---\n", "\n", - "## Putting it all together" + "## Ensembled Score Example" ] }, { @@ -1227,7 +1232,7 @@ "source": [ "---\n", "---\n", - "## Potential other cases to explore\n", + "## Potential Exploration\n", "- work ensembling all \"diversity\" related metrics \n", " - add more metrics\n", " - tune added metrics\n", From 6183e2835f59d9ea5225140086dfcd1e59ddf21c Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Wed, 2 Apr 2025 15:33:22 -0700 Subject: [PATCH 06/14] changes to evaluators.py and cosine similarity --- src/ember/core/utils/eval/evaluators.py | 246 +++++++++++++++++------- 1 file changed, 174 insertions(+), 72 deletions(-) diff --git a/src/ember/core/utils/eval/evaluators.py b/src/ember/core/utils/eval/evaluators.py index 5ffbae1f..be6b6f65 100644 --- a/src/ember/core/utils/eval/evaluators.py +++ b/src/ember/core/utils/eval/evaluators.py @@ -10,7 +10,10 @@ from diversity import compression_ratio import Levenshtein import numpy as np -from ember.core.utils.embedding_utils import EmbeddingModel, CosineSimilarity, calculate_text_similarity +from ember.core.utils.embedding_utils import (EmbeddingModel, + CosineSimilarity, + calculate_text_similarity, + Text_Embedding_Ada_002_Model) T_out = TypeVar("T_out") T_truth = TypeVar("T_truth") @@ -207,85 +210,156 @@ def evaluate( # Composite Evaluator Example class DiversityEnsembledEvaluator(IEvaluator[List[str], None]): + """Evaluator that combines multiple diversity metrics to assess ensemble output diversity. + + Computes diversity as an average of cosine similarity, compression ratio, and edit distance. + The higher this score is, the more diverse your text. + + Args: + system_output (List[str]): List of generated outputs from the system. + embedding_model (EmbeddingModel): The embedding model to compute cosine similarity. + + Returns: + EvaluationResult: Average of the three diversity scores with `is_correct=True`. """ - Evaluator to test ensemble outputs -> score them (float) - """ + def evaluate( - self, - system_output: List[str], embedding_model: EmbeddingModel, - **kwargs) -> EvaluationResult: - if system_output is None or len(system_output) == 0 or embedding_model == None: + self, + system_output: List[str], + embedding_model: EmbeddingModel, + **kwargs + ) -> EvaluationResult: + if not system_output or embedding_model is None: return EvaluationResult(is_correct=False, score=-1) + if len(system_output) == 1: return EvaluationResult(is_correct=True, score=0) - - - div_cosine = 1 - DiversityCosineSimilarityEvaluator().evaluate(system_output, embedding_model)['score'] - div_compression = min(DiversityCosineSimilarityEvaluator().evaluate(system_output)['score'], 1) - div_edit = DiversityEditDistanceEvaluator.evaluate(system_output)['score'] - - div_ensemble_score = (div_cosine + div_compression + div_edit)/3 - - return EvaluationResult(is_correct=True, - score=div_ensemble_score, - metadata = {'responses': system_output}) + + # Lower cosine similarity --> more diverse + cosine_score = 1.0 - DiversityCosineSimilarityEvaluator().evaluate(system_output, embedding_model).score + # higher compression score --> more diverse + compression_score = DiversityCompressionEvaluator().evaluate(system_output).score + # higher edit distance --> more diverse + edit_score = DiversityEditDistanceEvaluator().evaluate(system_output).score + + avg_diversity = (cosine_score + compression_score + edit_score) / 3 + + return EvaluationResult( + is_correct=True, + score=avg_diversity, + metadata={"responses": system_output} + ) + class DiversityCosineSimilarityEvaluator(IEvaluator[List[str], None]): + """Evaluator that computes average pairwise cosine similarity between outputs. + + Lower average cosine similarity implies greater semantic diversity. + + Args: + system_output (List[str]): List of generated outputs from the system. + embedding_model (EmbeddingModel): The embedding model used to compute cosine similarity. + + Returns: + EvaluationResult: Result with average similarity score and output metadata. """ - Evaluator to test ensemble outputs -> score them (float) - """ - + # TODO: + # def __init__(self, embedding_model): + # self.embedding_model = embedding_model + def evaluate( - self, - system_output: List[str], embedding_model: EmbeddingModel, - **kwargs) -> EvaluationResult: - if system_output is None or len(system_output) == 0 or embedding_model == None: + self, + system_output: List[str], + embedding_model: EmbeddingModel, + **kwargs + ) -> EvaluationResult: + if not system_output or embedding_model is None: return EvaluationResult(is_correct=False, score=-1) + if len(system_output) == 1: return EvaluationResult(is_correct=True, score=0) - - cosine: CosineSimilarity = CosineSimilarity() - cosine_scores = list() - for ind1 in range(len(system_output)): - ind2 = ind1+1 if ind1+1 != len(system_output) else 0 - curr_score = calculate_text_similarity(text1=system_output[ind1], text2=system_output[ind2], model=embedding_model, metric=cosine) - cosine_scores.append(curr_score) - avg_cosine_score = np.average(cosine_scores) - return EvaluationResult(is_correct=True, - score=avg_cosine_score, - metadata = {'responses': system_output}) + cosine = CosineSimilarity() + scores = [] + + # IDEA: Compute embedding vectors for all system_output --> get the average + # Then compute cosine similarity between all other outputs + + # Compare every possible combination of system_output vectors + for i in range(len(system_output)): + for j in range(i + 1, len(system_output)): + sim = calculate_text_similarity( + system_output[i], system_output[j], embedding_model, metric=cosine + ) + scores.append(sim) + + avg_score = float(np.average(scores)) + + return EvaluationResult( + is_correct=True, + score=avg_score, + metadata={"responses": system_output} + ) class DiversityCompressionEvaluator(IEvaluator[List[str], None]): + """Evaluator that measures diversity using a compression ratio heuristic. + + Lower compression ratio indicates higher textual diversity. The final score is scaled + based on a minimum number of responses (5) and minimum total character count (100). + + Args: + system_output (List[str]): List of generated responses. + + Returns: + EvaluationResult: Scaled diversity score based on compression. """ - Evaluator to test ensemble outputs -> score them (float) - """ + def evaluate( - self, - system_output: List[str], - **kwargs) -> EvaluationResult: - if system_output is None or len(system_output) == 0: + self, + system_output: List[str], + **kwargs + ) -> EvaluationResult: + if not system_output: return EvaluationResult(is_correct=False, score=-1) - # current compression ratio formula - scaled by min num of words (5 words) + min num of chars (min 100) - letter_sum = sum(len(response) for response in system_output) - ratio = 1/compression_ratio(system_output) * min(1, len(system_output)/5) * min(1, letter_sum/100) - return EvaluationResult(is_correct=True,score=ratio,metadata = {'responses': system_output}) + total_chars = sum(len(r) for r in system_output) + # ratio = (size of compressed data) / (size of uncompressed data) + # Higher ratio is --> more diverse + ratio = 1 / compression_ratio(system_output) + # Penalize inputs with few words (hard to measure) and inputs with very few characters + # Note that this is a temporary patch for compression_ratio does not normalizing over word length + scaled_score = ratio * min(1, len(system_output) / 5) * min(1, total_chars / 100) + + return EvaluationResult( + is_correct=True, + score=scaled_score, + metadata={"responses": system_output} + ) class DiversityEditDistanceEvaluator: + """Evaluator that measures lexical diversity using normalized Levenshtein edit distance. + + Computes average pairwise normalized edit distance across all outputs. + + Args: + system_output (List[str]): List of generated responses. + + Returns: + EvaluationResult: Average normalized edit distance score. + """ def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult: - if system_output is None or len(system_output) == 0: + if not system_output: return EvaluationResult(is_correct=False, score=-1, metadata={}) - diversity_score = self.compute_distance(system_output) + score = self.compute_distance(system_output) return EvaluationResult( - is_correct=True, - score=diversity_score, - metadata={'responses': system_output} + is_correct=True, + score=score, + metadata={"responses": system_output} ) def compute_distance(self, outputs: List[str]) -> float: @@ -293,49 +367,77 @@ def compute_distance(self, outputs: List[str]) -> float: if n < 2: return 0.0 - total_distance = 0 - pairs = 0 + total_distance = 0.0 + num_pairs = 0 for i in range(n): for j in range(i + 1, n): dist = Levenshtein.distance(outputs[i], outputs[j]) max_len = max(len(outputs[i]), len(outputs[j])) - normalized_dist = dist / max_len if max_len > 0 else 0 - total_distance += normalized_dist - pairs += 1 - - return total_distance / pairs if pairs > 0 else 0.0 + norm_dist = dist / max_len if max_len > 0 else 0 + total_distance += norm_dist + num_pairs += 1 + + return total_distance / num_pairs if num_pairs > 0 else 0.0 + class DiversityNoveltyEvaluator: - - def evaluate(self, model: EmbeddingModel, system_output: List[str], **kwargs) -> EvaluationResult: - if not system_output or len(system_output) == 0: + """Evaluator that measures novelty of each output relative to previously generated ones. + + For each response, computes its cosine distance from all prior responses. + Higher novelty implies lower similarity to prior outputs. + + Args: + model (EmbeddingModel): Embedding model used for computing cosine similarity. + system_output (List[str]): List of outputs ordered by generation. + + Returns: + EvaluationResult: Average novelty score across the sequence. + """ + + def evaluate( + self, + model: EmbeddingModel, + system_output: List[str], + **kwargs + ) -> EvaluationResult: + if not system_output: return EvaluationResult(is_correct=False, score=-1, metadata={}) - novelty_scores = [self.compute_novelty(model, r, system_output[:i]) for i, r in enumerate(system_output)] + novelty_scores = [ + self.compute_novelty(model, r, system_output[:i]) + for i, r in enumerate(system_output) + ] - avg_novelty = sum(novelty_scores) / len(novelty_scores) if novelty_scores else 0.0 + avg_score = float(np.mean(novelty_scores)) if novelty_scores else 0.0 return EvaluationResult( is_correct=True, - score=avg_novelty, - metadata={'responses': system_output, 'novelty_scores': novelty_scores} + score=avg_score, + metadata={ + "responses": system_output, + "novelty_scores": novelty_scores + } ) - def compute_novelty(self, model: EmbeddingModel, response: str, prior_responses: List[str]) -> float: + def compute_novelty( + self, + model: EmbeddingModel, + response: str, + prior_responses: List[str] + ) -> float: if not prior_responses: return 1.0 - new_embedding = model.embed_text(response) - prior_embeddings = [model.embed_text(r) for r in prior_responses] + new_emb = model.embed_text(response) + prior_embs = [model.embed_text(r) for r in prior_responses] similarities = [ - np.dot(new_embedding, prior_embedding) / - (np.linalg.norm(new_embedding) * np.linalg.norm(prior_embedding)) - for prior_embedding in prior_embeddings + np.dot(new_emb, pe) / (np.linalg.norm(new_emb) * np.linalg.norm(pe)) + for pe in prior_embs ] - return 1 - max(similarities) + return 1.0 - max(similarities) class MultipleChoiceEvaluator(IEvaluator[str, str]): From 130ca319c8e7546988db1319e40ae3fbf1d826ac Mon Sep 17 00:00:00 2001 From: connorchow Date: Wed, 2 Apr 2025 16:43:57 -0700 Subject: [PATCH 07/14] updating diversity scorer + evaluator to including embedding updates --- .../operator/core/diversity_scorer.py | 17 +++++--- src/ember/core/utils/eval/evaluators.py | 43 ++++++++++++------- 2 files changed, 40 insertions(+), 20 deletions(-) diff --git a/src/ember/core/registry/operator/core/diversity_scorer.py b/src/ember/core/registry/operator/core/diversity_scorer.py index 3112f83a..7d7c0300 100644 --- a/src/ember/core/registry/operator/core/diversity_scorer.py +++ b/src/ember/core/registry/operator/core/diversity_scorer.py @@ -8,9 +8,9 @@ from ember.core.types import EmberModel from ember.core.utils.eval.evaluators import DiversityEnsembledEvaluator -from ember.core.utils.embedding_utils import Text_Embedding_Ada_002_Model -from ember.core.registry.model.base.services.model_service import ModelService +from ember.core.registry.model.examples.provider_extension_guide import EmbeddingProviderModel +import logging class DiversityScoringOperatorInputs(EmberModel): """Input model for DiversityScoringOperator. @@ -20,8 +20,6 @@ class DiversityScoringOperatorInputs(EmberModel): """ responses: List[str] - model_service: ModelService - class DiversityScoringOperatorOutputs(EmberModel): """Output model for DiversityScoringOperator. @@ -45,6 +43,11 @@ class DiversityScoringOperator( input_model=DiversityScoringOperatorInputs, structured_output=DiversityScoringOperatorOutputs, ) + def __init__(self, *, embedding_model: EmbeddingProviderModel) -> None: + self.embedding_model = embedding_model + if self.embedding_model is None: + logging.warning("DiversityScoringEvaluator isn't initialized with an embedding model") + def forward( self, *, inputs: DiversityScoringOperatorInputs @@ -52,4 +55,8 @@ def forward( if not inputs.responses or not inputs.model_service: return {"responses": None, "diversity_score": 0} - return {"responses": inputs.responses, "divserity_score": DiversityEnsembledEvaluator().evaluate(inputs.responses, embedding_model=Text_Embedding_Ada_002_Model(llm=inputs.model_service))['score']} \ No newline at end of file + score = DiversityEnsembledEvaluator(embedding_model=self.embedding_model).evaluate(inputs.responses).score + # logger instead + logging.info(f"DiversityScoringOperator's score from {len(inputs.responses)} responses: {score}") + + return {"responses": inputs.responses, "diversity_score": score} \ No newline at end of file diff --git a/src/ember/core/utils/eval/evaluators.py b/src/ember/core/utils/eval/evaluators.py index be6b6f65..78198656 100644 --- a/src/ember/core/utils/eval/evaluators.py +++ b/src/ember/core/utils/eval/evaluators.py @@ -10,10 +10,12 @@ from diversity import compression_ratio import Levenshtein import numpy as np -from ember.core.utils.embedding_utils import (EmbeddingModel, - CosineSimilarity, +from ember.core.utils.embedding_utils import (CosineSimilarity, calculate_text_similarity, - Text_Embedding_Ada_002_Model) + ) +from ember.core.registry.model.examples.provider_extension_guide import EmbeddingProviderModel + +import logging T_out = TypeVar("T_out") T_truth = TypeVar("T_truth") @@ -222,21 +224,28 @@ class DiversityEnsembledEvaluator(IEvaluator[List[str], None]): Returns: EvaluationResult: Average of the three diversity scores with `is_correct=True`. """ + def __init__(self, embedding_model: EmbeddingProviderModel): + self.embedding_model = embedding_model + if self.embedding_model is None: + logging.warning("DiversityEnsembledEvaluator isn't initialized with an embedding model") def evaluate( self, system_output: List[str], - embedding_model: EmbeddingModel, **kwargs ) -> EvaluationResult: - if not system_output or embedding_model is None: + if not system_output: + logging.debug("DiversityEnsembledEvaluator didn't receive an output") + return EvaluationResult(is_correct=False, score=-1) + if self.embedding_model is None: + logging.debug("DiversityEnsembledEvaluator wasn't initialized with an embedding model") return EvaluationResult(is_correct=False, score=-1) - if len(system_output) == 1: + logging.debug("DiversityEnsembledEvaluator only received one string of text") return EvaluationResult(is_correct=True, score=0) # Lower cosine similarity --> more diverse - cosine_score = 1.0 - DiversityCosineSimilarityEvaluator().evaluate(system_output, embedding_model).score + cosine_score = 1.0 - DiversityCosineSimilarityEvaluator(embedding_model=self.embedding_model).evaluate(system_output).score # higher compression score --> more diverse compression_score = DiversityCompressionEvaluator().evaluate(system_output).score # higher edit distance --> more diverse @@ -263,33 +272,37 @@ class DiversityCosineSimilarityEvaluator(IEvaluator[List[str], None]): Returns: EvaluationResult: Result with average similarity score and output metadata. """ - # TODO: - # def __init__(self, embedding_model): - # self.embedding_model = embedding_model + def __init__(self, embedding_model: EmbeddingProviderModel): + self.embedding_model = embedding_model + if self.embedding_model is None: + logging.warning("DiversityCosineEvaluator isn't initialized with an embedding model") def evaluate( self, system_output: List[str], - embedding_model: EmbeddingModel, **kwargs ) -> EvaluationResult: - if not system_output or embedding_model is None: + if not system_output: + logging.debug("DiversityCosineEvaluator didn't receive an output") + return EvaluationResult(is_correct=False, score=-1) + if self.embedding_model is None: + logging.debug("DiversityCosineEvaluator wasn't initialized with an embedding model") return EvaluationResult(is_correct=False, score=-1) - if len(system_output) == 1: + logging.deubg("DiversityCosineEvaluator only received one string of text") return EvaluationResult(is_correct=True, score=0) cosine = CosineSimilarity() scores = [] - # IDEA: Compute embedding vectors for all system_output --> get the average + # TODO IDEA: Compute embedding vectors for all system_output --> get the average # Then compute cosine similarity between all other outputs # Compare every possible combination of system_output vectors for i in range(len(system_output)): for j in range(i + 1, len(system_output)): sim = calculate_text_similarity( - system_output[i], system_output[j], embedding_model, metric=cosine + system_output[i], system_output[j], self.embedding_model, metric=cosine ) scores.append(sim) From b5f8143af1d520ba28bb89e19cf4344b9382ccc1 Mon Sep 17 00:00:00 2001 From: connorchow Date: Wed, 2 Apr 2025 17:15:45 -0700 Subject: [PATCH 08/14] updating evaluators --- src/ember/core/utils/eval/evaluators.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ember/core/utils/eval/evaluators.py b/src/ember/core/utils/eval/evaluators.py index 78198656..8d5511e6 100644 --- a/src/ember/core/utils/eval/evaluators.py +++ b/src/ember/core/utils/eval/evaluators.py @@ -410,7 +410,7 @@ class DiversityNoveltyEvaluator: def evaluate( self, - model: EmbeddingModel, + model: EmbeddingProviderModel, system_output: List[str], **kwargs ) -> EvaluationResult: @@ -435,7 +435,7 @@ def evaluate( def compute_novelty( self, - model: EmbeddingModel, + model: EmbeddingProviderModel, response: str, prior_responses: List[str] ) -> float: From abf461fd14de3acddde95e05b187b58eb58cd692 Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Fri, 4 Apr 2025 21:22:10 -0700 Subject: [PATCH 09/14] Integrated capability models and examples using OpenAI extended model, moved diversity operators from `evaluators.py` to `diversity_evaluators.py`, renamed `ember/core/utils/logigng.py` to ../`ember_logging.py`, added more robust exmaple on diversity operator usage --- src/ember/core/app_context.py | 2 +- .../model/providers/openai/openai_provider.py | 299 +++++++++++++- .../model/providers/provider_capability.py | 310 +++++++++++++++ src/ember/core/utils/embedding_utils.py | 86 ++-- .../utils/{logging.py => ember_logging.py} | 4 +- .../core/utils/eval/diversity_evaluators.py | 373 ++++++++++++++++++ src/ember/core/utils/eval/evaluators.py | 255 ------------ .../operators/diversity_operators_example.py | 107 +++++ 8 files changed, 1109 insertions(+), 327 deletions(-) create mode 100644 src/ember/core/registry/model/providers/provider_capability.py rename src/ember/core/utils/{logging.py => ember_logging.py} (97%) create mode 100644 src/ember/core/utils/eval/diversity_evaluators.py create mode 100644 src/ember/examples/operators/diversity_operators_example.py diff --git a/src/ember/core/app_context.py b/src/ember/core/app_context.py index 10d2ee45..b42c1f6f 100644 --- a/src/ember/core/app_context.py +++ b/src/ember/core/app_context.py @@ -21,7 +21,7 @@ from ember.core.registry.model.base.registry.model_registry import ModelRegistry from ember.core.registry.model.base.services.usage_service import UsageService from ember.core.registry.model.initialization import initialize_registry -from ember.core.utils.logging import configure_logging +from ember.core.utils.ember_logging import configure_logging # Re-import for patching to work correctly import logging diff --git a/src/ember/core/registry/model/providers/openai/openai_provider.py b/src/ember/core/registry/model/providers/openai/openai_provider.py index e83792b4..5b45819e 100644 --- a/src/ember/core/registry/model/providers/openai/openai_provider.py +++ b/src/ember/core/registry/model/providers/openai/openai_provider.py @@ -71,10 +71,10 @@ """ import logging -from typing import Any, Dict, Final, List, Optional, cast +from typing import Any, Dict, Final, List, Optional, cast, ClassVar import openai -from pydantic import Field, field_validator +from pydantic import Field, field_validator, ConfigDict, BaseModel from requests.exceptions import HTTPError from tenacity import retry, stop_after_attempt, wait_exponential @@ -84,7 +84,7 @@ ChatResponse, ProviderParams, ) -from ember.core.registry.model.base.schemas.model_info import ModelInfo +from ember.core.registry.model.base.schemas.model_info import ModelInfo, ProviderInfo from ember.core.registry.model.base.utils.model_registry_exceptions import ( InvalidPromptError, ProviderAPIError, @@ -96,6 +96,15 @@ ) from ember.plugin_system import provider +from ember.core.registry.model.providers.provider_capability import ( + EmbeddingRequest, + EmbeddingResponse, + EmbeddingProviderModel, + CompletionRequest, + CompletionResponse, + TextCompletionProviderModel, +) +import os class OpenAIProviderParams(ProviderParams): """OpenAI-specific provider parameters for fine-tuning API requests. @@ -347,10 +356,6 @@ def _prune_unsupported_params( logger.debug("Removing 'temperature' parameter for model: %s", model_name) kwargs.pop("temperature") return kwargs - - # TODO: Fix embedding model structure - def _is_embedding_model(self, model_name: str) -> bool: - return model_name.startswith("text-embedding-") @retry( wait=wait_exponential(min=1, max=10), stop=stop_after_attempt(3), reraise=True @@ -442,3 +447,283 @@ def forward(self, request: ChatRequest) -> ChatResponse: message=f"API error: {str(exc)}", cause=exc, ) + +class OpenAICompletionParameters(BaseModel): + """Parameter conversion for OpenAI, specifically text completion requests. + + Handles parameter validation and conversion between Ember's universal format + and OpenAI's specific API requirements. + + Attributes: + prompt: The text prompt to complete. + max_tokens: Maximum number of tokens to generate. + temperature: Controls randomness (0.0-2.0). + stop_sequences: Sequences that signal end of generation. + """ + + model_config = ConfigDict( + protected_namespaces=(), # Disable Pydantic's protected namespace checks + ) + + prompt: str + max_tokens: Optional[int] = Field(default=50) + temperature: Optional[float] = Field(default=0.7, ge=0.0, le=2.0) + stop_sequences: Optional[List[str]] = None + + def to_openai_kwargs(self) -> Dict[str, Any]: + """Converting parameters to OpenAI API format. + + Returns: + Dictionary of parameters for the OpenAI API. + """ + kwargs: Dict[str, Any] = { + "prompt": self.prompt, + "max_tokens": self.max_tokens, + "temperature": self.temperature, + } + + if self.stop_sequences: + kwargs["stop"] = self.stop_sequences + + return kwargs + +@provider("OpenAIExtended") +class OpenAIExtendedModel(TextCompletionProviderModel, EmbeddingProviderModel): + """Extended OpenAI provider supporting chat, text completion, and embeddings. + + This class implements a provider that supports multiple model types through + capability interfaces. + + Attributes: + PROVIDER_NAME: Provider name for registration with the plugin system. + CAPABILITIES: Capability flags showing supported model types. + """ + + PROVIDER_NAME: ClassVar[str] = "OpenAIExtended" + CAPABILITIES: ClassVar[Dict[str, bool]] = { + "chat": True, + "completion": True, + "embedding": True, + } + + def create_client(self) -> Any: + """Creating and configuring the OpenAI client. + + Retrieves the API key from the model information and configures the client. + + Returns: + The configured OpenAI client. + + Raises: + ProviderAPIError: If API key is missing or invalid. + """ + import openai + + api_key: Optional[str] = self.model_info.get_api_key() + if not api_key: + raise ProviderAPIError("OpenAI API key is missing or invalid.") + + openai.api_key = api_key + return openai + + def forward(self, request: ChatRequest) -> ChatResponse: + """Processing a chat request (implementing BaseProviderModel). + + This method provides the standard chat functionality required by + the BaseProviderModel interface. + + Args: + request: Chat request to process. + + Returns: + Chat response from the model. + + Raises: + InvalidPromptError: If prompt is empty. + ProviderAPIError: For unexpected errors during API calls. + """ + # Implementation would match OpenAIModel's forward method + # This is a simplified placeholder + if not request.prompt: + raise InvalidPromptError("OpenAI prompt cannot be empty.") + + # Implementation details would mirror the standard OpenAIModel + # Return placeholder + return ChatResponse(data="Chat implementation placeholder") + + def complete(self, request: CompletionRequest) -> CompletionResponse: + """Processing a text completion request. + + Implements text completion capabilities using the OpenAI completions API. + + Args: + request: Text completion request. + + Returns: + Completion response from the model. + + Raises: + InvalidPromptError: If prompt is empty. + ProviderAPIError: For unexpected errors during API calls. + """ + if not request.prompt: + raise InvalidPromptError("OpenAI completion prompt cannot be empty.") + + logger.info( + "OpenAI completion invoked", + extra={ + "provider": self.PROVIDER_NAME, + "model_name": self.model_info.name, + "prompt_length": len(request.prompt), + }, + ) + + # Convert universal parameters to OpenAI format + openai_parameters = OpenAICompletionParameters( + prompt=request.prompt, + max_tokens=request.max_tokens, + temperature=request.temperature, + stop_sequences=request.stop_sequences, + ) + openai_kwargs = openai_parameters.to_openai_kwargs() + + # Add provider-specific parameters + provider_params = cast(OpenAICompletionParameters, request.provider_params) + openai_kwargs.update( + {k: v for k, v in provider_params.items() if v is not None} + ) + + try: + # Request timeout from parameters or default + timeout = openai_kwargs.pop("timeout", 30) + + # Make the API call + response = self.client.completions.create( + model=self.model_info.name, + timeout=timeout, + **openai_kwargs, + ) + + # Extract completion text + text = response.choices[0].text.strip() + + # Calculate usage statistics + # For simplicity, we assume a usage calculator is implemented elsewhere + usage_stats = ( + None # self.usage_calculator.calculate(response, self.model_info) + ) + + return CompletionResponse( + text=text, + raw_output=response, + usage=usage_stats, + ) + + except Exception as exc: + logger.exception("Unexpected error in OpenAIExtendedModel.complete()") + raise ProviderAPIError(str(exc)) from exc + + def embed(self, request: EmbeddingRequest) -> EmbeddingResponse: + """Generating embeddings for the input text(s). + + Implements embedding capabilities using the OpenAI embeddings API. + + Args: + request: Embedding request with input text(s). + + Returns: + Embedding response with vector representations. + + Raises: + InvalidPromptError: If input is empty. + ProviderAPIError: For unexpected errors during API calls. + """ + # Use the provided model or default to the model in model_info + model_name = request.model or self.model_info.name + + input_text = request.input + if not input_text: + raise InvalidPromptError("Input text for embeddings cannot be empty.") + + logger.info( + "OpenAI embeddings invoked", + extra={ + "provider": self.PROVIDER_NAME, + "model_name": model_name, + "input_type": "batch" if isinstance(input_text, list) else "single", + }, + ) + + try: + # Make the API call + response = self.client.embeddings.create( + model=model_name, + input=input_text, + timeout=30, + ) + + # Extract embeddings + if isinstance(input_text, list): + print(f"batch processing") + # For batch processing + embeddings = [item.embedding for item in response.data] + else: + # For single text input + embeddings = response.data[0].embedding + + # Get dimensions from the first embedding + if isinstance(embeddings, list) and isinstance(embeddings[0], list): + dimensions = len(embeddings[0]) + else: + dimensions = len(embeddings) + + # Calculate usage statistics (implementation would depend on your system) + usage_stats = ( + None # self.usage_calculator.calculate(response, self.model_info) + ) + + return EmbeddingResponse( + embeddings=embeddings, + model=model_name, + dimensions=dimensions, + raw_output=response, + usage=usage_stats, + ) + + except Exception as exc: + logger.exception("Unexpected error in OpenAIExtendedModel.embed()") + raise ProviderAPIError(str(exc)) from exc + + +def create_openai_embedding_model(model_name: str = "text-embedding-ada-002") -> OpenAIExtendedModel: + """ + Tool for creating an OpenAI embedding model by passing the embedding model name. + + Args: + model_name: Name of particular embedding model endpoint as specified by the OpenAI API + + Returns: + OpenAIExtendedModel initialized to serve model_name; None if model could not + be created + + Raises: + InvalidPromptError: If input is empty. + ProviderAPIError: For unexpected errors during API calls. + """ + # All OpenAI embedding models contain "text-embedding" in their model name + if "text-embedding" not in model_name: + return None + + model_info = ModelInfo( + id="openai:gpt-4o", + name=model_name, + provider=ProviderInfo( + name="OpenAI", + default_api_key=os.environ.get("OPENAI_API_KEY"), + base_url="https://api.openai.com/v1", + ) + ) + + embedding_model = OpenAIExtendedModel(model_info) + + return embedding_model \ No newline at end of file diff --git a/src/ember/core/registry/model/providers/provider_capability.py b/src/ember/core/registry/model/providers/provider_capability.py new file mode 100644 index 00000000..169a4c64 --- /dev/null +++ b/src/ember/core/registry/model/providers/provider_capability.py @@ -0,0 +1,310 @@ +import logging +from typing import Any, ClassVar, Dict, List, Optional, TypeVar, Union + +import numpy as np +from pydantic import BaseModel, ConfigDict, Field, field_validator +from typing_extensions import Protocol, TypedDict + +from ember.core.registry.model.base.schemas.chat_schemas import ( + ProviderParams, +) +from ember.core.registry.model.base.schemas.usage import UsageStats +from ember.core.registry.model.base.utils.model_registry_exceptions import ( + InvalidPromptError, + ProviderAPIError, +) +from ember.core.registry.model.providers.base_provider import BaseProviderModel + +logger = logging.getLogger(__name__) + +class CompletionRequest(BaseModel): + """Universal text completion request model. + + Similar to ChatRequest but designed for single-turn text completion. + Used for traditional completion models that predate chat-oriented models. + + Attributes: + prompt: The text prompt to complete. + max_tokens: Optional maximum number of tokens to generate. + temperature: Optional sampling temperature controlling randomness. + stop_sequences: Optional list of sequences that signal the end of generation. + provider_params: Provider-specific parameters as a flexible dictionary. + """ + + model_config = ConfigDict( + protected_namespaces=(), # Disable Pydantic's protected namespace checks + ) + + prompt: str + max_tokens: Optional[int] = None + temperature: Optional[float] = Field(default=0.7, ge=0.0, le=2.0) + stop_sequences: Optional[List[str]] = None + provider_params: ProviderParams = Field(default_factory=dict) + + +class CompletionResponse(BaseModel): + """Universal text completion response model. + + Standardizes the response format for text completion models. + + Attributes: + text: The generated completion text. + raw_output: The unprocessed provider-specific response data. + usage: Optional usage statistics for token counting and cost tracking. + """ + + model_config = ConfigDict( + protected_namespaces=(), # Disable Pydantic's protected namespace checks + ) + + text: str + raw_output: Any = None + usage: Optional[UsageStats] = None + + +class EmbeddingRequest(BaseModel): + """Request model for generating vector embeddings from text. + + Used to generate semantic vector representations that capture the meaning + of input text, suitable for similarity comparisons, clustering, and search. + + Attributes: + input: Text input(s) to embed - can be a single string or list of strings. + model: Optional specific embedding model to use when the provider has multiple. + provider_params: Provider-specific parameters as a flexible dictionary. + """ + + model_config = ConfigDict( + protected_namespaces=(), # Disable Pydantic's protected namespace checks + ) + + input: Union[str, List[str]] + model: Optional[str] = None + provider_params: ProviderParams = Field(default_factory=dict) + + @field_validator("input") + def validate_input(cls, value: Union[str, List[str]]) -> Union[str, List[str]]: + """Validating the input text is not empty. + + Args: + value: The input text(s) to validate. + + Returns: + The validated input value. + + Raises: + ValueError: If input is empty string or empty list. + """ + if isinstance(value, str) and not value.strip(): + raise ValueError("Input text cannot be empty") + if isinstance(value, list) and ( + len(value) == 0 or all(not t.strip() for t in value) + ): + raise ValueError("Input list cannot be empty or contain only empty strings") + return value + + +class EmbeddingResponse(BaseModel): + """Response model containing vector embeddings. + + Contains numerical vector representations of input text that capture semantic meaning. + + Attributes: + embeddings: Vector representation(s) of the input text(s). + model: Name of the embedding model used. + dimensions: The dimensionality of the embedding vectors. + raw_output: The unprocessed provider-specific response data. + usage: Optional usage statistics for token counting and cost tracking. + """ + + model_config = ConfigDict( + protected_namespaces=(), # Disable Pydantic's protected namespace checks + ) + + embeddings: Union[List[float], List[List[float]]] + model: str + dimensions: int + raw_output: Any = None + usage: Optional[UsageStats] = None + + +# Type variable for implementation-specific typing +ModelT = TypeVar("ModelT", bound="CapabilityModel") + +class TextCompletionCapable(Protocol): + """Protocol defining the interface for text completion models. + + Provider implementations supporting text completion should implement this protocol. + """ + + def complete(self, request: CompletionRequest) -> CompletionResponse: + """Processing a text completion request. + + Args: + request: The text completion request. + + Returns: + The text completion response. + """ + ... + + def complete_text(self, prompt: str, **kwargs: Any) -> CompletionResponse: + """Convenience method for simple text completion. + + Args: + prompt: The text to complete. + **kwargs: Additional parameters for the completion request. + + Returns: + The text completion response. + """ + ... + + +class EmbeddingCapable(Protocol): + """Protocol defining the interface for embedding models. + + Provider implementations supporting embeddings should implement this protocol. + """ + + def embed(self, request: EmbeddingRequest) -> EmbeddingResponse: + """Generating embeddings for the input text(s). + + Args: + request: The embedding request. + + Returns: + The embedding response containing vector representations. + """ + ... + + def embed_text( + self, input_text: Union[str, List[str]], **kwargs: Any + ) -> EmbeddingResponse: + """Convenience method for simple embedding generation. + + Args: + input_text: The text(s) to embed. + **kwargs: Additional parameters for the embedding request. + + Returns: + The embedding response with vector representations. + """ + ... + + +# Base class for capability-aware models +class CapabilityModel(BaseProviderModel): + """Extended base provider model with capability flags. + + This class extends BaseProviderModel with explicit capability tracking + to allow runtime capability detection for different model types. + + Attributes: + CAPABILITIES: Class variable mapping capability names to support flags. + """ + + CAPABILITIES: ClassVar[Dict[str, bool]] = { + "chat": True, + "completion": False, + "embedding": False, + } + + +# ----------------------------------------------------------------------------- +# PART 3: Extended Provider Base Classes +# ----------------------------------------------------------------------------- + + +class TextCompletionProviderModel(CapabilityModel, TextCompletionCapable): + """Base class for text completion model providers. + + Extends the BaseProviderModel to support text completion capabilities. + Providers supporting text completion should inherit from this class. + """ + + CAPABILITIES: ClassVar[Dict[str, bool]] = { + "chat": True, + "completion": True, + "embedding": False, + } + + def complete(self, request: CompletionRequest) -> CompletionResponse: + """Processing a text completion request. + + Args: + request: The text completion request. + + Returns: + The text completion response. + + Raises: + NotImplementedError: If the provider has not implemented this capability. + """ + raise NotImplementedError( + f"Provider {self.__class__.__name__} does not support text completion" + ) + + def complete_text(self, prompt: str, **kwargs: Any) -> CompletionResponse: + """Convenience method for text completion. + + Creates a CompletionRequest from the prompt and additional parameters, + then delegates to the complete() method for processing. + + Args: + prompt: The text to complete. + **kwargs: Additional parameters for the completion request. + + Returns: + The text completion response. + """ + request = CompletionRequest(prompt=prompt, **kwargs) + return self.complete(request=request) + + +class EmbeddingProviderModel(CapabilityModel, EmbeddingCapable): + """Base class for embedding model providers. + + Extends the BaseProviderModel to support embedding capabilities. + Providers supporting embeddings should inherit from this class. + """ + + CAPABILITIES: ClassVar[Dict[str, bool]] = { + "chat": True, + "completion": False, + "embedding": True, + } + + def embed(self, request: EmbeddingRequest) -> EmbeddingResponse: + """Generating embeddings for the input text(s). + + Args: + request: The embedding request. + + Returns: + The embedding response containing vector representations. + + Raises: + NotImplementedError: If the provider has not implemented this capability. + """ + raise NotImplementedError( + f"Provider {self.__class__.__name__} does not support embeddings" + ) + + def embed_text( + self, input_text: Union[str, List[str]], **kwargs: Any + ) -> EmbeddingResponse: + """Convenience method for generating embeddings. + + Creates an EmbeddingRequest from the input text and additional parameters, + then delegates to the embed() method for processing. + + Args: + input_text: The text(s) to embed. + **kwargs: Additional parameters for the embedding request. + + Returns: + The embedding response with vector representations. + """ + request = EmbeddingRequest(input=input_text, **kwargs) + return self.embed(request=request) \ No newline at end of file diff --git a/src/ember/core/utils/embedding_utils.py b/src/ember/core/utils/embedding_utils.py index 963f27ad..48de1b1b 100644 --- a/src/ember/core/utils/embedding_utils.py +++ b/src/ember/core/utils/embedding_utils.py @@ -1,16 +1,18 @@ from __future__ import annotations -import math from abc import ABC, abstractmethod from typing import List, Protocol +import numpy as np -# TODO: Fix embedding model structure -from ember.core.registry.model.base.services.model_service import ModelService +from ember.core.registry.model.providers.provider_capability import (EmbeddingProviderModel, + EmbeddingResponse) +from ember.core.registry.model.providers.openai.openai_provider import create_openai_embedding_model ################################################################ # 1) Embedding Model Interfaces & Implementations ################################################################ +# NOTE: These protocols are now outdated by the EmbeddingProviderModel/CapabilityModel interfaces class EmbeddingModel(Protocol): """Interface for embedding models. @@ -59,54 +61,6 @@ def embed_text(self, text: str) -> List[float]: return [] return [ord(ch) / 256.0 for ch in text] -# TODO: Fix embedding model structure -class Text_Embedding_Ada_002_Model: - """Interface for embedding models. - - This protocol defines the minimal interface required to compute a text - embedding. Implementations may use local models, external APIs, or custom - neural networks. - - Methods: - embed_text: Compute the embedding for a given text. - """ - def __init__(self, llm: ModelService): - self.llm = llm - - def embed_text(self, text: str) -> List[float]: - """Computes the embedding vector for the provided text. - - Args: - text (str): The text to be embedded. - - Returns: - List[float]: A list of floats representing the embedding vector. - """ - response = self.llm(model_id="openai:text-embedding-ada-002", prompt=text) - return response.embedding - -class OpenAITextEmbedding3(Protocol): - """Interface for embedding models. - - This protocol defines the minimal interface required to compute a text - embedding. Implementations may use local models, external APIs, or custom - neural networks. - - Methods: - embed_text: Compute the embedding for a given text. - """ - - def embed_text(self, text: str) -> List[float]: - """Computes the embedding vector for the provided text. - - Args: - text (str): The text to be embedded. - - Returns: - List[float]: A list of floats representing the embedding vector. - """ - - ################################################################ # 2) Similarity Metric Interface & Implementations ################################################################ @@ -155,13 +109,16 @@ def similarity(self, vec_a: List[float], vec_b: List[float]) -> float: if not vec_a or not vec_b: return 0.0 - dot_product: float = sum(a * b for a, b in zip(vec_a, vec_b)) - norm_a: float = math.sqrt(sum(a * a for a in vec_a)) - norm_b: float = math.sqrt(sum(b * b for b in vec_b)) + a = np.array(vec_a) + b = np.array(vec_b) + + norm_a = np.linalg.norm(a) + norm_b = np.linalg.norm(b) + if norm_a == 0 or norm_b == 0: return 0.0 - return dot_product / (norm_a * norm_b) + return float(np.dot(a, b) / (norm_a * norm_b)) ################################################################ @@ -170,7 +127,7 @@ def similarity(self, vec_a: List[float], vec_b: List[float]) -> float: def calculate_text_similarity( - text1: str, text2: str, model: EmbeddingModel, metric: SimilarityMetric + text1: str, text2: str, model: EmbeddingProviderModel, metric: SimilarityMetric ) -> float: """Calculates text similarity using an embedding model and a similarity metric. @@ -186,22 +143,27 @@ def calculate_text_similarity( Returns: float: The computed similarity score. """ - embedding1: List[float] = model.embed_text(text=text1) - embedding2: List[float] = model.embed_text(text=text2) - return metric.similarity(vec_a=embedding1, vec_b=embedding2) + response1: EmbeddingResponse = model.embed_text(input_text=text1) + response2: EmbeddingResponse = model.embed_text(input_text=text2) + + embeddings1: List[float] = response1.embeddings + embeddings2: List[float] = response2.embeddings + + return metric.similarity(vec_a=embeddings1, + vec_b=embeddings2) ################################################################ # 4) Example Usage (Executable as Script) ################################################################ if __name__ == "__main__": - mock_model: MockEmbeddingModel = MockEmbeddingModel() - cosine: CosineSimilarity = CosineSimilarity() + openai_embedding_model = create_openai_embedding_model() + cosine_simlarity = CosineSimilarity() text_a: str = "Hello world!" text_b: str = "Hello, world??" score: float = calculate_text_similarity( - text1=text_a, text2=text_b, model=mock_model, metric=cosine + text1=text_a, text2=text_b, model=openai_embedding_model, metric=cosine_simlarity ) print(f"Similarity between '{text_a}' and '{text_b}': {score}") diff --git a/src/ember/core/utils/logging.py b/src/ember/core/utils/ember_logging.py similarity index 97% rename from src/ember/core/utils/logging.py rename to src/ember/core/utils/ember_logging.py index 25de7aa2..4b7be51c 100644 --- a/src/ember/core/utils/logging.py +++ b/src/ember/core/utils/ember_logging.py @@ -12,11 +12,11 @@ Usage: # To apply standard configuration with reduced verbosity: - from ember.core.utils.logging import configure_logging + from ember.core.utils.ember_logging import configure_logging configure_logging(verbose=False) # To adjust specific component verbosity: - from ember.core.utils.logging import set_component_level + from ember.core.utils.ember_logging import set_component_level set_component_level("model_discovery", logging.DEBUG) """ diff --git a/src/ember/core/utils/eval/diversity_evaluators.py b/src/ember/core/utils/eval/diversity_evaluators.py new file mode 100644 index 00000000..1830e68b --- /dev/null +++ b/src/ember/core/utils/eval/diversity_evaluators.py @@ -0,0 +1,373 @@ +from __future__ import annotations + +from typing import Any, List, Tuple + +from .base_evaluator import EvaluationResult, IEvaluator + +# diversity imports +from diversity import compression_ratio +import Levenshtein +import numpy as np +from ember.core.utils.embedding_utils import (CosineSimilarity, + calculate_text_similarity) +from ember.core.registry.model.providers.provider_capability import EmbeddingProviderModel +from ember.core.registry.model.providers.openai.openai_provider import create_openai_embedding_model + +import logging + +# Composite Evaluator Example +class DiversityEnsembledEvaluator(IEvaluator[List[str], None]): + """Evaluator that combines multiple diversity metrics to assess ensemble output diversity. + + Computes diversity as an average of cosine similarity, compression ratio, and edit distance. + The higher this score is, the more diverse your text. + + Args: + system_output (List[str]): List of generated outputs from the system. + embedding_model (EmbeddingModel): The embedding model to compute cosine similarity. + + Returns: + EvaluationResult: Average of the three diversity scores with `is_correct=True`. + """ + def __init__(self, embedding_model: EmbeddingProviderModel): + self.embedding_model = embedding_model + if self.embedding_model is None: + logging.warning("DiversityEnsembledEvaluator isn't initialized with an embedding model") + + def evaluate( + self, + system_output: List[str], + **kwargs + ) -> EvaluationResult: + if not system_output: + logging.debug("DiversityEnsembledEvaluator didn't receive an output") + return EvaluationResult(is_correct=False, score=-1) + if self.embedding_model is None: + logging.debug("DiversityEnsembledEvaluator wasn't initialized with an embedding model") + return EvaluationResult(is_correct=False, score=-1) + if len(system_output) == 1: + logging.debug("DiversityEnsembledEvaluator only received one string of text") + return EvaluationResult(is_correct=True, score=0) + + # Lower cosine similarity --> more diverse + cosine_score = 1.0 - DiversityCosineSimilarityEvaluator(embedding_model=self.embedding_model).evaluate(system_output).score + # higher compression score --> more diverse + compression_score = DiversityCompressionEvaluator().evaluate(system_output).score + # higher edit distance --> more diverse + edit_score = DiversityEditDistanceEvaluator().evaluate(system_output).score + + avg_diversity = (cosine_score + compression_score + edit_score) / 3 + + return EvaluationResult( + is_correct=True, + score=avg_diversity, + metadata={"responses": system_output} + ) + + +class DiversityCosineSimilarityEvaluator(IEvaluator[List[str], None]): + """Evaluator that computes average pairwise cosine similarity between outputs. + + Lower average cosine similarity implies greater semantic diversity. + + Args: + system_output (List[str]): List of generated outputs from the system. + embedding_model (EmbeddingModel): The embedding model used to compute cosine similarity. + + Returns: + EvaluationResult: Result with average similarity score and output metadata. + """ + def __init__(self, embedding_model: EmbeddingProviderModel = None): + self.embedding_model = embedding_model + if self.embedding_model is None: + logging.warning("DiversityCosineEvaluator isn't initialized with an embedding model " + + "Using default OpenAI embedding model instead") + self.embedding_model = create_openai_embedding_model() + + def evaluate( + self, + system_output: List[str], + **kwargs + ) -> EvaluationResult: + if not system_output: + logging.debug("DiversityCosineEvaluator didn't receive an output") + return EvaluationResult(is_correct=False, score=-1) + if self.embedding_model is None: + logging.debug("DiversityCosineEvaluator wasn't initialized with an embedding model") + return EvaluationResult(is_correct=False, score=-1) + if len(system_output) == 1: + logging.deubg("DiversityCosineEvaluator only received one string of text") + return EvaluationResult(is_correct=True, score=0) + + cosine_similarity = CosineSimilarity() + scores = [] + + # TODO IDEA: Compute embedding vectors for all system_output --> get the average + # Then compute cosine similarity between all other outputs + + # Compare every possible combination of system_output vectors + for i in range(len(system_output)): + for j in range(i + 1, len(system_output)): + sim = calculate_text_similarity( + system_output[i], system_output[j], self.embedding_model, metric=cosine_similarity + ) + scores.append(sim) + + avg_score = float(np.average(scores)) + + return EvaluationResult( + is_correct=True, + score=avg_score, + metadata={"responses": system_output} + ) + + +class DiversityCompressionEvaluator(IEvaluator[List[str], None]): + """Evaluator that measures diversity using a compression ratio heuristic. + + Lower compression ratio indicates higher textual diversity. The final score is scaled + based on a minimum number of responses (5) and minimum total character count (100). + + Args: + system_output (List[str]): List of generated responses. + + Returns: + EvaluationResult: Scaled diversity score based on compression. + """ + + def evaluate( + self, + system_output: List[str], + **kwargs + ) -> EvaluationResult: + if not system_output: + return EvaluationResult(is_correct=False, score=-1) + + total_chars = sum(len(r) for r in system_output) + # ratio = (size of compressed data) / (size of uncompressed data) + # Higher ratio is --> more diverse + ratio = 1 / compression_ratio(system_output) + # Penalize inputs with few words (hard to measure) and inputs with very few characters + # Note that this is a temporary patch for compression_ratio does not normalizing over word length + scaled_score = ratio * min(1, len(system_output) / 5) * min(1, total_chars / 100) + + return EvaluationResult( + is_correct=True, + score=scaled_score, + metadata={"responses": system_output} + ) + + +class DiversityEditDistanceEvaluator: + """Evaluator that measures lexical diversity using normalized Levenshtein edit distance. + + Computes average pairwise normalized edit distance across all outputs. + + Args: + system_output (List[str]): List of generated responses. + + Returns: + EvaluationResult: Average normalized edit distance score. + """ + + def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult: + if not system_output: + return EvaluationResult(is_correct=False, score=-1, metadata={}) + + score = self.compute_distance(system_output) + + return EvaluationResult( + is_correct=True, + score=score, + metadata={"responses": system_output} + ) + + def compute_distance(self, outputs: List[str]) -> float: + n = len(outputs) + if n < 2: + return 0.0 + + total_distance = 0.0 + num_pairs = 0 + + for i in range(n): + for j in range(i + 1, n): + dist = Levenshtein.distance(outputs[i], outputs[j]) + max_len = max(len(outputs[i]), len(outputs[j])) + norm_dist = dist / max_len if max_len > 0 else 0 + total_distance += norm_dist + num_pairs += 1 + + return total_distance / num_pairs if num_pairs > 0 else 0.0 + + +class DiversityNoveltyEvaluator: + """Evaluator that measures novelty of each output relative to previously generated ones. + + For each response, computes its cosine distance from all prior responses. + Higher novelty implies lower similarity to prior outputs. + + Args: + model (EmbeddingModel): Embedding model used for computing cosine similarity. + system_output (List[str]): List of outputs ordered by generation. + + Returns: + EvaluationResult: Average novelty score across the sequence. + """ + + def __init__(self, embedding_model: EmbeddingProviderModel = None): + self.embedding_model = embedding_model + if self.embedding_model is None: + logging.warning("DiversityNoveltyEvaluator isn't initialized with an embedding model " + + "Using default OpenAI embedding model instead") + self.embedding_model = create_openai_embedding_model() + + def evaluate(self, + system_output: List[str], + **kwargs + ) -> EvaluationResult: + """ + Evaluates the novelty of each response in a sequence relative to the responses that came before it, + using cosine similarity of embeddings. + + For each response, an embedding is computed and compared against embeddings of all prior responses. + The novelty score is defined as 1.0 minus the maximum cosine similarity with any prior response. + A high score indicates a novel response, while a low score indicates redundancy. + + Note: + - If all responses are identical, the first response gets a score of 1.0 while + all others get 0.0, resulting in an average (and minimum) score of 1/len(system_output). + + Returns: + EvaluationResult: + - is_correct: True if evaluation ran successfully. + - score: Average novelty score across all responses. + - metadata: Contains raw responses and their individual novelty scores. + """ + + if len(system_output) == 0: + logging.warning("Length of inputs to evaluate function is zero") + return EvaluationResult(is_correct=False, score=-1, metadata={}) + + novelty_scores = [] + prior_embeddings = [] + + for r in system_output: + new_emb, novelty = self._compute_novelty(self.embedding_model, r, prior_embeddings) + novelty_scores.append(novelty) + prior_embeddings.append(new_emb) + + avg_score = float(np.mean(novelty_scores)) if novelty_scores else 0.0 + + return EvaluationResult( + is_correct=True, + score=avg_score, + metadata={ + "responses": system_output, + "novelty_scores": novelty_scores + } + ) + + def _compute_novelty(self, + model: EmbeddingProviderModel, + response: str, + prior_embeddings: List[str] + ) -> Tuple[np.ndarray, float]: + + new_emb = model.embed_text(response).embeddings + + if not prior_embeddings: + return new_emb, 1.0 + + similarities = [ + np.dot(new_emb, pe) / (np.linalg.norm(new_emb) * np.linalg.norm(pe)) + for pe in prior_embeddings + ] + + return new_emb, 1.0 - max(similarities) + + +if __name__ == "__main__": + text_embedding_ada_002 = create_openai_embedding_model("text-embedding-ada-002") + + # List of text that represents completely n + very_diverse_text = ["Bananas don't belong in briefcases, but socks and t-shirts do!", + "Abraham Lincoln was the 16th president of the United States of America", + "ERROR 404: Index Not Found"] + + # This group of text all rephrase the same request, except + different_words_not_diverse_strs = ["Could you please lend me a hand with this?", + "Might you assist me with a task?", + "Can you spare a second to help me do something?"] + + repetition_strs = ["This is a sample text with lots of repetition.", + "This is a sample text with lots of repetition.", + "This is a sample text with lots of repetition."] + + # List of sample strings that have varying levels of diversity: + test_strings = [very_diverse_text, different_words_not_diverse_strs, repetition_strs] + + + # Measure Cosine similarity + cosine_similarity_evaluator = DiversityCosineSimilarityEvaluator(text_embedding_ada_002) + + print("\n" + "=" * 50 ) + print("Cosine Similarity Evaluator\n") + for i in range(len(test_strings)): + print(f"Computing cosine-similarity for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = cosine_similarity_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Diversity score: {score}\n") + + + # Measure Edit Distance + print("=" * 50 + "\nEdit Distance Evaluator\n") + edit_distance_evaluator = DiversityEditDistanceEvaluator() + + for i in range(len(test_strings)): + print(f"Computing Edit-Distance for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = edit_distance_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Edit-Distance score: {score}\n") + print("=" * 50 + "\n") + + + # Measure Novelty + print("=" * 50 + "\nNovelty Evaluator\n") + novelty_evaluator = DiversityNoveltyEvaluator() + + for i in range(len(test_strings)): + print(f"Computing Novelty for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = novelty_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Novelty score: {score}\n") + print("=" * 50 + "\n") + + + # Measure Compression Ratio + print("=" * 50 + "\nCompression Ratio Evaluator\n") + novelty_evaluator = DiversityCompressionEvaluator() + + for i in range(len(test_strings)): + print(f"Computing Compression Ratio for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = novelty_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Compression Ratio: {score}\n") + print("=" * 50 + "\n") + + + # Measure Ensembled Diversity + print("=" * 50 + "\nEnsembled Diversity Evaluator\n") + novelty_evaluator = DiversityCompressionEvaluator() + + for i in range(len(test_strings)): + print(f"Computing Ensembled Diversity Score for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = novelty_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Ensembled Diversity Score: {score}\n") + print("=" * 50 + "\n") \ No newline at end of file diff --git a/src/ember/core/utils/eval/evaluators.py b/src/ember/core/utils/eval/evaluators.py index 8d5511e6..47ddb8b9 100644 --- a/src/ember/core/utils/eval/evaluators.py +++ b/src/ember/core/utils/eval/evaluators.py @@ -6,17 +6,6 @@ from .base_evaluator import EvaluationResult, IEvaluator from .extractors import RegexExtractor -# diversity imports -from diversity import compression_ratio -import Levenshtein -import numpy as np -from ember.core.utils.embedding_utils import (CosineSimilarity, - calculate_text_similarity, - ) -from ember.core.registry.model.examples.provider_extension_guide import EmbeddingProviderModel - -import logging - T_out = TypeVar("T_out") T_truth = TypeVar("T_truth") @@ -209,250 +198,6 @@ def evaluate( metadata={"error": f"{type(error).__name__}: {str(error)}"}, ) - -# Composite Evaluator Example -class DiversityEnsembledEvaluator(IEvaluator[List[str], None]): - """Evaluator that combines multiple diversity metrics to assess ensemble output diversity. - - Computes diversity as an average of cosine similarity, compression ratio, and edit distance. - The higher this score is, the more diverse your text. - - Args: - system_output (List[str]): List of generated outputs from the system. - embedding_model (EmbeddingModel): The embedding model to compute cosine similarity. - - Returns: - EvaluationResult: Average of the three diversity scores with `is_correct=True`. - """ - def __init__(self, embedding_model: EmbeddingProviderModel): - self.embedding_model = embedding_model - if self.embedding_model is None: - logging.warning("DiversityEnsembledEvaluator isn't initialized with an embedding model") - - def evaluate( - self, - system_output: List[str], - **kwargs - ) -> EvaluationResult: - if not system_output: - logging.debug("DiversityEnsembledEvaluator didn't receive an output") - return EvaluationResult(is_correct=False, score=-1) - if self.embedding_model is None: - logging.debug("DiversityEnsembledEvaluator wasn't initialized with an embedding model") - return EvaluationResult(is_correct=False, score=-1) - if len(system_output) == 1: - logging.debug("DiversityEnsembledEvaluator only received one string of text") - return EvaluationResult(is_correct=True, score=0) - - # Lower cosine similarity --> more diverse - cosine_score = 1.0 - DiversityCosineSimilarityEvaluator(embedding_model=self.embedding_model).evaluate(system_output).score - # higher compression score --> more diverse - compression_score = DiversityCompressionEvaluator().evaluate(system_output).score - # higher edit distance --> more diverse - edit_score = DiversityEditDistanceEvaluator().evaluate(system_output).score - - avg_diversity = (cosine_score + compression_score + edit_score) / 3 - - return EvaluationResult( - is_correct=True, - score=avg_diversity, - metadata={"responses": system_output} - ) - - -class DiversityCosineSimilarityEvaluator(IEvaluator[List[str], None]): - """Evaluator that computes average pairwise cosine similarity between outputs. - - Lower average cosine similarity implies greater semantic diversity. - - Args: - system_output (List[str]): List of generated outputs from the system. - embedding_model (EmbeddingModel): The embedding model used to compute cosine similarity. - - Returns: - EvaluationResult: Result with average similarity score and output metadata. - """ - def __init__(self, embedding_model: EmbeddingProviderModel): - self.embedding_model = embedding_model - if self.embedding_model is None: - logging.warning("DiversityCosineEvaluator isn't initialized with an embedding model") - - def evaluate( - self, - system_output: List[str], - **kwargs - ) -> EvaluationResult: - if not system_output: - logging.debug("DiversityCosineEvaluator didn't receive an output") - return EvaluationResult(is_correct=False, score=-1) - if self.embedding_model is None: - logging.debug("DiversityCosineEvaluator wasn't initialized with an embedding model") - return EvaluationResult(is_correct=False, score=-1) - if len(system_output) == 1: - logging.deubg("DiversityCosineEvaluator only received one string of text") - return EvaluationResult(is_correct=True, score=0) - - cosine = CosineSimilarity() - scores = [] - - # TODO IDEA: Compute embedding vectors for all system_output --> get the average - # Then compute cosine similarity between all other outputs - - # Compare every possible combination of system_output vectors - for i in range(len(system_output)): - for j in range(i + 1, len(system_output)): - sim = calculate_text_similarity( - system_output[i], system_output[j], self.embedding_model, metric=cosine - ) - scores.append(sim) - - avg_score = float(np.average(scores)) - - return EvaluationResult( - is_correct=True, - score=avg_score, - metadata={"responses": system_output} - ) - - -class DiversityCompressionEvaluator(IEvaluator[List[str], None]): - """Evaluator that measures diversity using a compression ratio heuristic. - - Lower compression ratio indicates higher textual diversity. The final score is scaled - based on a minimum number of responses (5) and minimum total character count (100). - - Args: - system_output (List[str]): List of generated responses. - - Returns: - EvaluationResult: Scaled diversity score based on compression. - """ - - def evaluate( - self, - system_output: List[str], - **kwargs - ) -> EvaluationResult: - if not system_output: - return EvaluationResult(is_correct=False, score=-1) - - total_chars = sum(len(r) for r in system_output) - # ratio = (size of compressed data) / (size of uncompressed data) - # Higher ratio is --> more diverse - ratio = 1 / compression_ratio(system_output) - # Penalize inputs with few words (hard to measure) and inputs with very few characters - # Note that this is a temporary patch for compression_ratio does not normalizing over word length - scaled_score = ratio * min(1, len(system_output) / 5) * min(1, total_chars / 100) - - return EvaluationResult( - is_correct=True, - score=scaled_score, - metadata={"responses": system_output} - ) - - -class DiversityEditDistanceEvaluator: - """Evaluator that measures lexical diversity using normalized Levenshtein edit distance. - - Computes average pairwise normalized edit distance across all outputs. - - Args: - system_output (List[str]): List of generated responses. - - Returns: - EvaluationResult: Average normalized edit distance score. - """ - - def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult: - if not system_output: - return EvaluationResult(is_correct=False, score=-1, metadata={}) - - score = self.compute_distance(system_output) - - return EvaluationResult( - is_correct=True, - score=score, - metadata={"responses": system_output} - ) - - def compute_distance(self, outputs: List[str]) -> float: - n = len(outputs) - if n < 2: - return 0.0 - - total_distance = 0.0 - num_pairs = 0 - - for i in range(n): - for j in range(i + 1, n): - dist = Levenshtein.distance(outputs[i], outputs[j]) - max_len = max(len(outputs[i]), len(outputs[j])) - norm_dist = dist / max_len if max_len > 0 else 0 - total_distance += norm_dist - num_pairs += 1 - - return total_distance / num_pairs if num_pairs > 0 else 0.0 - - -class DiversityNoveltyEvaluator: - """Evaluator that measures novelty of each output relative to previously generated ones. - - For each response, computes its cosine distance from all prior responses. - Higher novelty implies lower similarity to prior outputs. - - Args: - model (EmbeddingModel): Embedding model used for computing cosine similarity. - system_output (List[str]): List of outputs ordered by generation. - - Returns: - EvaluationResult: Average novelty score across the sequence. - """ - - def evaluate( - self, - model: EmbeddingProviderModel, - system_output: List[str], - **kwargs - ) -> EvaluationResult: - if not system_output: - return EvaluationResult(is_correct=False, score=-1, metadata={}) - - novelty_scores = [ - self.compute_novelty(model, r, system_output[:i]) - for i, r in enumerate(system_output) - ] - - avg_score = float(np.mean(novelty_scores)) if novelty_scores else 0.0 - - return EvaluationResult( - is_correct=True, - score=avg_score, - metadata={ - "responses": system_output, - "novelty_scores": novelty_scores - } - ) - - def compute_novelty( - self, - model: EmbeddingProviderModel, - response: str, - prior_responses: List[str] - ) -> float: - if not prior_responses: - return 1.0 - - new_emb = model.embed_text(response) - prior_embs = [model.embed_text(r) for r in prior_responses] - - similarities = [ - np.dot(new_emb, pe) / (np.linalg.norm(new_emb) * np.linalg.norm(pe)) - for pe in prior_embs - ] - - return 1.0 - max(similarities) - - class MultipleChoiceEvaluator(IEvaluator[str, str]): """Evaluator to check if a system output contains the correct multiple-choice answer. diff --git a/src/ember/examples/operators/diversity_operators_example.py b/src/ember/examples/operators/diversity_operators_example.py new file mode 100644 index 00000000..cbf264fa --- /dev/null +++ b/src/ember/examples/operators/diversity_operators_example.py @@ -0,0 +1,107 @@ +import os +import logging + +# Set global logging level to ERROR +logging.basicConfig(level=logging.ERROR) + +os.environ["EMBER_LOGGING_LEVEL"] = "ERROR" + +# from ember.core.registry.model.model_module.lm import LMModule, LMModuleConfig +from ember.core.registry.model.config.settings import initialize_registry +from ember.core.registry.model.base.services.model_service import ModelService +from ember.core.utils.eval.evaluators import (DiversityCosineSimilarityEvaluator, + DiversityEnsembledEvaluator, + DiversityEditDistanceEvaluator, + DiversityNoveltyEvaluator, + DiversityCompressionEvaluator +) + +from ember.core.registry.model.providers.openai.openai_provider import create_openai_embedding_model + + +model_registry = initialize_registry() +logging.info(model_registry.list_models()) + +text_embedding_ada_002 = create_openai_embedding_model("text-embedding-ada-002") + +# List of text that represents completely n +very_diverse_text = ["Bananas don't belong in briefcases, but socks and t-shirts do!", + "Abraham Lincoln was the 16th president of the United States of America", + "ERROR 404: Index Not Found"] + +# This group of text all rephrase the same request, except +different_words_not_diverse_strs = ["Could you please lend me a hand with this?", + "Might you assist me with a task?", + "Can you spare a second to help me do something?"] + +repetition_strs = ["This is a sample text with lots of repetition.", + "This is a sample text with lots of repetition.", + "This is a sample text with lots of repetition."] + +# List of sample strings that have varying levels of diversity: +test_strings = [very_diverse_text, different_words_not_diverse_strs, repetition_strs] + + +# Measure Cosine similarity +cosine_similarity_evaluator = DiversityCosineSimilarityEvaluator(text_embedding_ada_002) + +print("\n" + "=" * 50 ) +print("Cosine Similarity Evaluator\n") +for i in range(len(test_strings)): + print(f"Computing cosine-similarity for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = cosine_similarity_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Diversity score: {score}\n") + + +# Measure Edit Distance +print("=" * 50 + "\nEdit Distance Evaluator\n") +edit_distance_evaluator = DiversityEditDistanceEvaluator() + +for i in range(len(test_strings)): + print(f"Computing Edit-Distance for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = edit_distance_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Edit-Distance score: {score}\n") +print("=" * 50 + "\n") + + +# Measure Novelty +print("=" * 50 + "\nNovelty Evaluator\n") +novelty_evaluator = DiversityNoveltyEvaluator() + +for i in range(len(test_strings)): + print(f"Computing Novelty for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = novelty_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Novelty score: {score}\n") +print("=" * 50 + "\n") + + +# Measure Compression Ratio +print("=" * 50 + "\nCompression Ratio Evaluator\n") +novelty_evaluator = DiversityCompressionEvaluator() + +for i in range(len(test_strings)): + print(f"Computing Compression Ratio for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = novelty_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Compression Ratio: {score}\n") +print("=" * 50 + "\n") + + +# Measure Ensembled Diversity +print("=" * 50 + "\nEnsembled Diversity Evaluator\n") +novelty_evaluator = DiversityCompressionEvaluator() + +for i in range(len(test_strings)): + print(f"Computing Ensembled Diversity Score for the following strings: ") + for j in range(len(test_strings[i])): + print(f"String {j+1}: {test_strings[i][j]}") + score: float = novelty_evaluator.evaluate(system_output=test_strings[i]).score + print(f"Ensembled Diversity Score: {score}\n") +print("=" * 50 + "\n") \ No newline at end of file From 4f5f099cdba3caef2fcb20875db58ced8dfbcb49 Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Fri, 4 Apr 2025 21:44:42 -0700 Subject: [PATCH 10/14] changed testbench to use diversity operators implemented within the ember framework rather than implemented within the notebook --- src/ember/examples/diversity_testbench.ipynb | 454 +------------------ 1 file changed, 17 insertions(+), 437 deletions(-) diff --git a/src/ember/examples/diversity_testbench.ipynb b/src/ember/examples/diversity_testbench.ipynb index a7eee462..192e1236 100644 --- a/src/ember/examples/diversity_testbench.ipynb +++ b/src/ember/examples/diversity_testbench.ipynb @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -130,7 +130,6 @@ "# compression related items\n", "from diversity import compression_ratio\n", "import Levenshtein\n", - "from dataclasses import dataclass\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import pandas as pd\n", @@ -143,10 +142,7 @@ "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", "\n", "from ember.core.registry.model import load_model, ChatResponse\n", - "from ember.core.registry.model.base.services.model_service import ModelService\n", - "\n", - "from ember.core.utils.eval.base_evaluator import IEvaluator, EvaluationResult\n", - "from ember.core.utils.eval.extractors import RegexExtractor" + "from ember.core.registry.model.base.services.model_service import ModelService" ] }, { @@ -430,209 +426,6 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "################################################################\n", - "# 1) Embedding Model Interfaces & Implementations\n", - "################################################################\n", - "\n", - "\n", - "class EmbeddingModel(Protocol):\n", - " \"\"\"Interface for embedding models.\n", - "\n", - " This protocol defines the minimal interface required to compute a text\n", - " embedding. Implementations may use local models, external APIs, or custom\n", - " neural networks.\n", - "\n", - " Methods:\n", - " embed_text: Compute the embedding for a given text.\n", - " \"\"\"\n", - "\n", - " def embed_text(self, text: str) -> List[float]:\n", - " \"\"\"Computes the embedding vector for the provided text.\n", - "\n", - " Args:\n", - " text (str): The text to be embedded.\n", - "\n", - " Returns:\n", - " List[float]: A list of floats representing the embedding vector.\n", - " \"\"\"\n", - " ...\n", - "\n", - "class Text_Embedding_Ada_002_Model:\n", - " \"\"\"Interface for embedding models.\n", - "\n", - " This protocol defines the minimal interface required to compute a text\n", - " embedding. Implementations may use local models, external APIs, or custom\n", - " neural networks.\n", - "\n", - " Methods:\n", - " embed_text: Compute the embedding for a given text.\n", - " \"\"\"\n", - "\n", - " def embed_text(self, text: str) -> List[float]:\n", - " \"\"\"Computes the embedding vector for the provided text.\n", - "\n", - " Args:\n", - " text (str): The text to be embedded.\n", - "\n", - " Returns:\n", - " List[float]: A list of floats representing the embedding vector.\n", - " \"\"\"\n", - " response = llm(model_id=\"openai:text-embedding-ada-002\", prompt=text)\n", - " return response.embedding\n", - "\n", - "# class Text_Embedding_3_EmbeddingModel(Protocol):\n", - "# \"\"\"Interface for embedding models.\n", - "\n", - "# This protocol defines the minimal interface required to compute a text\n", - "# embedding. Implementations may use local models, external APIs, or custom\n", - "# neural networks.\n", - "\n", - "# Methods:\n", - "# embed_text: Compute the embedding for a given text.\n", - "# \"\"\"\n", - "\n", - "# def __init__(self, api_key: str = None):\n", - "# \"\"\"Initializes the embedding model with the OpenAI API key.\n", - "\n", - "# Args:\n", - "# api_key (str): OpenAI API key for authentication.\n", - "# \"\"\"\n", - "# self.api_key = api_key or os.environ.get(\"OPENAI_API_KEY\")\n", - "# if not self.api_key:\n", - "# raise ValueError(\"OpenAI API key must be provided or set in the environment variable OPENAI_API_KEY.\")\n", - "# openai.api_key = self.api_key\n", - "\n", - "# def embed_text(self, text: str) -> List[float]:\n", - "# \"\"\"Computes the embedding vector for the provided text.\n", - "\n", - "# Args:\n", - "# text (str): The text to be embedded.\n", - "\n", - "# Returns:\n", - "# List[float]: A list of floats representing the embedding vector.\n", - "# \"\"\"\n", - "# response = openai.Embedding.create(\n", - "# model=\"text-embedding-3\",\n", - "# input=text\n", - "# )\n", - "# return response[\"data\"][0][\"embedding\"]\n", - "\n", - "\n", - "class MockEmbeddingModel:\n", - " \"\"\"Mock implementation of an embedding model using naive ASCII encoding.\n", - "\n", - " This simple model converts each character in the text to a normalized ASCII\n", - " value. It is intended solely for demonstration and testing purposes.\n", - "\n", - " Methods:\n", - " embed_text: Converts text to a sequence of normalized ASCII values.\n", - " \"\"\"\n", - "\n", - " def embed_text(self, text: str) -> List[float]:\n", - " \"\"\"Embeds text by converting each character to its normalized ASCII code.\n", - "\n", - " Args:\n", - " text (str): The input text to be embedded.\n", - "\n", - " Returns:\n", - " List[float]: A list of floats representing the embedding. Returns an\n", - " empty list if the text is empty.\n", - " \"\"\"\n", - " if not text:\n", - " return []\n", - " return [ord(ch) / 256.0 for ch in text]\n", - "\n", - "\n", - "################################################################\n", - "# 2) Similarity Metric Interface & Implementations\n", - "################################################################\n", - "\n", - "\n", - "class SimilarityMetric(ABC):\n", - " \"\"\"Abstract base class for computing similarity between embedding vectors.\n", - "\n", - " Subclasses must implement the similarity method to calculate a similarity\n", - " score between two vectors.\n", - " \"\"\"\n", - "\n", - " @abstractmethod\n", - " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", - " \"\"\"Calculates the similarity between two embedding vectors.\n", - "\n", - " Args:\n", - " vec_a (List[float]): The first embedding vector.\n", - " vec_b (List[float]): The second embedding vector.\n", - "\n", - " Returns:\n", - " float: The similarity score, typically in the range [0, 1] or [-1, 1].\n", - " \"\"\"\n", - " ...\n", - "\n", - "\n", - "class CosineSimilarity(SimilarityMetric):\n", - " \"\"\"Implementation of cosine similarity for embedding vectors.\n", - "\n", - " The cosine similarity is defined as:\n", - " similarity(a, b) = (a · b) / (||a|| * ||b||)\n", - "\n", - " Returns 0.0 if either vector is empty or if any vector's norm is zero.\n", - " \"\"\"\n", - "\n", - " def similarity(self, vec_a: List[float], vec_b: List[float]) -> float:\n", - " \"\"\"Computes cosine similarity between two embedding vectors.\n", - "\n", - " Args:\n", - " vec_a (List[float]): The first embedding vector.\n", - " vec_b (List[float]): The second embedding vector.\n", - "\n", - " Returns:\n", - " float: The cosine similarity score.\n", - " \"\"\"\n", - " if not vec_a or not vec_b:\n", - " return 0.0\n", - "\n", - " dot_product: float = sum(a * b for a, b in zip(vec_a, vec_b))\n", - " norm_a: float = math.sqrt(sum(a * a for a in vec_a))\n", - " norm_b: float = math.sqrt(sum(b * b for b in vec_b))\n", - " if norm_a == 0 or norm_b == 0:\n", - " return 0.0\n", - "\n", - " return dot_product / (norm_a * norm_b)\n", - "\n", - "\n", - "################################################################\n", - "# 3) High-Level Utility Function\n", - "################################################################\n", - "\n", - "\n", - "def calculate_text_similarity(\n", - " text1: str, text2: str, model: EmbeddingModel, metric: SimilarityMetric\n", - ") -> float:\n", - " \"\"\"Calculates text similarity using an embedding model and a similarity metric.\n", - "\n", - " This function generates embeddings for the provided texts and then computes a\n", - " similarity score using the given similarity metric.\n", - "\n", - " Args:\n", - " text1 (str): The first text string.\n", - " text2 (str): The second text string.\n", - " model (EmbeddingModel): An instance conforming to the embedding model interface.\n", - " metric (SimilarityMetric): An instance implementing a similarity metric.\n", - "\n", - " Returns:\n", - " float: The computed similarity score.\n", - " \"\"\"\n", - " embedding1: List[float] = model.embed_text(text=text1)\n", - " embedding2: List[float] = model.embed_text(text=text2)\n", - " return metric.similarity(vec_a=embedding1, vec_b=embedding2)" - ] - }, - { - "cell_type": "code", - "execution_count": 26, "metadata": { "scrolled": true }, @@ -651,7 +444,11 @@ } ], "source": [ - "embedding_model: Text_Embedding_Ada_002_Model = Text_Embedding_Ada_002_Model()\n", + "from src.ember.core.registry.model.providers.openai.openai_provider import create_openai_embedding_model\n", + "from src.ember.core.utils.embedding_utils import CosineSimilarity, calculate_text_similarity\n", + "# from src.ember.core.utils.eval.diversity_evaluators\n", + "\n", + "embedding_model = create_openai_embedding_model()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", "\n", "text_a: str = \"Hello world!\"\n", @@ -686,124 +483,7 @@ }, { "cell_type": "code", - "execution_count": 80, - "metadata": {}, - "outputs": [], - "source": [ - "T_out = TypeVar(\"T_out\")\n", - "T_truth = TypeVar(\"T_truth\")\n", - "\n", - "class ComposedEvaluator(IEvaluator[T_out, T_truth], Generic[T_out, T_truth]):\n", - " \"\"\"Combines an output extractor with an evaluator for the extracted data.\n", - "\n", - " This evaluator first transforms the system output using the provided extractor,\n", - " then evaluates the extracted value using the specified base evaluator.\n", - "\n", - " Args:\n", - " extractor: An object with an `extract` method to process the system output.\n", - " base_evaluator (IEvaluator): An evaluator that processes the extracted output.\n", - "\n", - " Returns:\n", - " EvaluationResult: The result of the evaluation.\n", - " \"\"\"\n", - "\n", - " def __init__(\n", - " self,\n", - " extractor: Any, # Expecting an extractor with an `extract` method.\n", - " base_evaluator: IEvaluator[Any, Any],\n", - " ) -> None:\n", - " self.extractor = extractor\n", - " self.base_evaluator = base_evaluator\n", - "\n", - " def evaluate(\n", - " self, system_output: T_out, correct_answer: Any, **kwargs: Any\n", - " ) -> EvaluationResult:\n", - " \"\"\"Evaluates the provided system output against the correct answer.\n", - "\n", - " Args:\n", - " system_output (T_out): The raw output generated by the system.\n", - " correct_answer (Any): The expected correct answer.\n", - " **kwargs: Additional keyword arguments for extraction or evaluation.\n", - "\n", - " Returns:\n", - " EvaluationResult: The result of evaluating the extracted value.\n", - " \"\"\"\n", - " extracted_value = self.extractor.extract(system_output, **kwargs)\n", - " return self.base_evaluator.evaluate(extracted_value, correct_answer, **kwargs)\n", - "\n", - "\n", - "# Basic Evaluators\n", - "\n", - "\n", - "class ExactMatchEvaluator(IEvaluator[str, str]):\n", - " \"\"\"Evaluator to check for an exact match between two strings,\n", - " ignoring differences in whitespace and case.\n", - "\n", - " Example:\n", - " evaluator = ExactMatchEvaluator()\n", - " result = evaluator.evaluate(\"Hello World\", \"hello world\")\n", - "\n", - " Args:\n", - " compare_fn (Optional[Callable[[str, str], bool]]): Optional custom comparison function.\n", - " If not provided, strings are normalized (whitespace removed, lowercase) before comparison.\n", - "\n", - " Returns:\n", - " EvaluationResult: The result containing a correctness flag and a score.\n", - " \"\"\"\n", - "\n", - " def __init__(self, compare_fn: Optional[Callable[[str, str], bool]] = None) -> None:\n", - " self.compare_fn = compare_fn or self._default_compare\n", - "\n", - " def _default_compare(self, str1: str, str2: str) -> bool:\n", - " \"\"\"Default string comparison function that ignores case and whitespace.\n", - "\n", - " Args:\n", - " str1 (str): First string to compare\n", - " str2 (str): Second string to compare\n", - "\n", - " Returns:\n", - " bool: True if strings match after normalization\n", - " \"\"\"\n", - " return str1.strip().lower() == str2.strip().lower()\n", - "\n", - " def evaluate(\n", - " self, system_output: str, correct_answer: str, **kwargs: Any\n", - " ) -> EvaluationResult:\n", - " \"\"\"Evaluates whether a system output exactly matches the correct answer.\n", - "\n", - " Args:\n", - " system_output (str): The system-generated string.\n", - " correct_answer (str): The expected answer string.\n", - " **kwargs: Additional keyword arguments (unused).\n", - "\n", - " Returns:\n", - " EvaluationResult: An object with `is_correct` set to True if the normalized strings match,\n", - " along with a corresponding score.\n", - " \"\"\"\n", - " is_correct = self.compare_fn(system_output, correct_answer)\n", - " score = 1.0 if is_correct else 0.0\n", - " return EvaluationResult(is_correct=is_correct, score=score)\n", - "\n", - "class DiversityCompressionEvaluator(IEvaluator[List[str], None]):\n", - " \"\"\"\n", - " Evaluator to test ensemble outputs -> score them (float)\n", - " \"\"\"\n", - " def evaluate(\n", - " self, \n", - " system_output: List[str], \n", - " **kwargs) -> EvaluationResult:\n", - " if system_output is None or len(system_output) == 0:\n", - " return EvaluationResult(is_correct=False, score=-1)\n", - "\n", - " # current compression ratio formula - scaled by min num of words (5 words) + min num of chars (min 100)\n", - " letter_sum = sum(len(response) for response in system_output)\n", - " ratio = 1/compression_ratio(system_output, algorithm=\"gzip\") * min(1, len(system_output)/5) * min(1, letter_sum/100)\n", - " return EvaluationResult(is_correct=True,score=ratio,metadata = {'responses': system_output})\n" - ] - }, - { - "cell_type": "code", - "execution_count": 78, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -817,15 +497,9 @@ } ], "source": [ - "compression_evaluator = DiversityCompressionEvaluator()\n", + "from src.ember.core.utils.eval.diversity_evaluators import DiversityCompressionEvaluator\n", "\n", - "# input_strs = [\n", - "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", - "# \"fej89qw098efjq29f38j0938j20f398jqwe098fjq98wf -- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", - "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", - "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", - "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", - "# ]\n", + "compression_evaluator = DiversityCompressionEvaluator()\n", "\n", "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", "\n", @@ -847,51 +521,6 @@ "### Edit Distance" ] }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class EvaluationResult:\n", - " is_correct: bool\n", - " score: float\n", - " metadata: dict\n", - "\n", - "class DiversityEditDistanceEvaluator:\n", - "\n", - " def evaluate(self, system_output: List[str], **kwargs) -> EvaluationResult:\n", - " if system_output is None or len(system_output) == 0:\n", - " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", - "\n", - " diversity_score = self.compute_distance(system_output)\n", - "\n", - " return EvaluationResult(\n", - " is_correct=True, \n", - " score=diversity_score,\n", - " metadata={'responses': system_output}\n", - " )\n", - "\n", - " def compute_distance(self, outputs: List[str]) -> float:\n", - " n = len(outputs)\n", - " if n < 2:\n", - " return 0.0\n", - "\n", - " total_distance = 0\n", - " pairs = 0\n", - "\n", - " for i in range(n):\n", - " for j in range(i + 1, n):\n", - " dist = Levenshtein.distance(outputs[i], outputs[j])\n", - " max_len = max(len(outputs[i]), len(outputs[j]))\n", - " normalized_dist = dist / max_len if max_len > 0 else 0 \n", - " total_distance += normalized_dist\n", - " pairs += 1\n", - " \n", - " return total_distance / pairs if pairs > 0 else 0.0\n" - ] - }, { "cell_type": "code", "execution_count": 30, @@ -908,15 +537,9 @@ } ], "source": [ - "distance_evaluator = DiversityEditDistanceEvaluator()\n", + "from src.ember.core.utils.eval.diversity_evaluators import DiversityEditDistanceEvaluator\n", "\n", - "# input_strs = [\n", - "# \";lkjawefopajwiefpoij23jf9aj8sdfj8903jf908j -- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\",\n", - "# \"fej89qw098efjq29f38j0938j20f398jqwe098fjq98wf -- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\",\n", - "# \"Effective communication is a cornerstone of a successful work environment. When employees communicate clearly and efficiently, it improves the overall flow of work and enhances collaboration. Clear exchanges of ideas help to eliminate confusion, build mutual trust, and ensure that everyone is aligned in their goals. Additionally, strong communication skills are key to managing conflicts and setting clear expectations among teams. Whether in meetings, emails, or other formats, being able to communicate effectively contributes to a thriving and efficient workplace.\",\n", - "# \"The role of communication in the workplace cannot be overlooked. It serves as the foundation for successful teamwork and organizational growth. When team members share information clearly, it promotes a collaborative atmosphere and reduces the risk of errors or misinterpretations. Strong communication is also vital in building relationships, resolving issues, and making sure everyone is on the same page. Whether it's verbal exchanges or written correspondence, honing your ability to communicate well is vital for fostering an effective work environment.\",\n", - "# \"Communication within the workplace is a vital element for success. Clear and open communication promotes a cooperative and efficient atmosphere, helping team members to better understand each other’s ideas and work toward common goals. It reduces confusion, builds trust, and allows for smoother problem-solving when conflicts arise. By conveying thoughts and expectations effectively, individuals can create stronger working relationships and a productive team dynamic. Whether through emails, phone calls, or face-to-face interactions, mastering communication techniques is key for professional achievement.\",\n", - "# ]\n", + "distance_evaluator = DiversityEditDistanceEvaluator()\n", "\n", "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", "\n", @@ -938,50 +561,6 @@ "### Novelty Score (WIP)" ] }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class EvaluationResult:\n", - " is_correct: bool\n", - " score: float\n", - " metadata: dict\n", - "\n", - "class DiversityNoveltyEvaluator:\n", - " \n", - " def evaluate(self, model: EmbeddingModel, system_output: List[str], **kwargs) -> EvaluationResult:\n", - " if not system_output or len(system_output) == 0:\n", - " return EvaluationResult(is_correct=False, score=-1, metadata={})\n", - "\n", - " novelty_scores = [self.compute_novelty(model, r, system_output[:i]) for i, r in enumerate(system_output)]\n", - "\n", - " avg_novelty = sum(novelty_scores) / len(novelty_scores) if novelty_scores else 0.0\n", - "\n", - " return EvaluationResult(\n", - " is_correct=True,\n", - " score=avg_novelty,\n", - " metadata={'responses': system_output, 'novelty_scores': novelty_scores}\n", - " )\n", - "\n", - " def compute_novelty(self, model: EmbeddingModel, response: str, prior_responses: List[str]) -> float:\n", - " if not prior_responses:\n", - " return 1.0\n", - "\n", - " new_embedding = model.embed_text(response)\n", - " prior_embeddings = [model.embed_text(r) for r in prior_responses]\n", - "\n", - " similarities = [\n", - " np.dot(new_embedding, prior_embedding) /\n", - " (np.linalg.norm(new_embedding) * np.linalg.norm(prior_embedding))\n", - " for prior_embedding in prior_embeddings\n", - " ]\n", - "\n", - " return 1 - max(similarities)\n" - ] - }, { "cell_type": "code", "execution_count": null, @@ -998,11 +577,12 @@ } ], "source": [ - "novelty_evaluator = DiversityNoveltyEvaluator()\n", + "from src.ember.core.utils.eval.diversity_evaluators import DiversityNoveltyEvaluator\n", + "\n", + "novelty_evaluator = DiversityNoveltyEvaluator(create_openai_embedding_model())\n", "\n", "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", "\n", - "embedding_model: Text_Embedding_Ada_002_Model = Text_Embedding_Ada_002_Model()\n", "novelty = novelty_evaluator.evaluate(embedding_model, input_strs)\n", "\n", "print(f\"Novelty Score: {novelty.score:.4f}\")\n", @@ -1029,11 +609,11 @@ }, { "cell_type": "code", - "execution_count": 82, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "embedding_model: Text_Embedding_Ada_002_Model = Text_Embedding_Ada_002_Model()\n", + "embedding_model = create_openai_embedding_model()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", "exact_evaluator = ExactMatchEvaluator()\n", "compression_evaluator = DiversityCompressionEvaluator()\n", From 62604bae6037bba236eded6305a61e040c1a4315 Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Fri, 4 Apr 2025 21:50:32 -0700 Subject: [PATCH 11/14] modified eval's __init__.py file to include diversity operators, also minor fixes in diversity testbench --- src/ember/core/utils/eval/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/ember/core/utils/eval/__init__.py b/src/ember/core/utils/eval/__init__.py index b3cba4a4..33304c9c 100644 --- a/src/ember/core/utils/eval/__init__.py +++ b/src/ember/core/utils/eval/__init__.py @@ -12,6 +12,13 @@ NumericToleranceEvaluator, PartialRegexEvaluator, ) +from .diversity_evaluators import ( + DiversityEnsembledEvaluator, + DiversityCosineSimilarityEvaluator, + DiversityEditDistanceEvaluator, + DiversityCompressionEvaluator, + DiversityNoveltyEvaluator, +) from .pipeline import ( BatchEvaluationSummary, PipelineEvaluator, From a25e7d36c1c3c6300ef94001f550f13558e88590 Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Fri, 4 Apr 2025 21:54:10 -0700 Subject: [PATCH 12/14] removed todo in chat_schemas.py --- src/ember/core/registry/model/base/schemas/chat_schemas.py | 2 +- src/ember/examples/diversity_testbench.ipynb | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/ember/core/registry/model/base/schemas/chat_schemas.py b/src/ember/core/registry/model/base/schemas/chat_schemas.py index 9dfa2dcc..fb4649b3 100644 --- a/src/ember/core/registry/model/base/schemas/chat_schemas.py +++ b/src/ember/core/registry/model/base/schemas/chat_schemas.py @@ -115,6 +115,6 @@ class ChatResponse(BaseModel): """ data: str - embedding: list[float] = None # TODO: Fix embedding model structure + embedding: list[float] = None raw_output: Any = None usage: Optional[UsageStats] = None diff --git a/src/ember/examples/diversity_testbench.ipynb b/src/ember/examples/diversity_testbench.ipynb index 192e1236..2129cd00 100644 --- a/src/ember/examples/diversity_testbench.ipynb +++ b/src/ember/examples/diversity_testbench.ipynb @@ -35,7 +35,7 @@ "- `uv venv`\n", "- `uv pip install ember-ai`\n", "- `uv pip install pip`\n", - "- `source .vemv/bin/activate`\n", + "- `source .venv/bin/activate`\n", "\n", "Plus, add your OpenAI API key -> environ\n", "- `export OPENAI_API_KEY=` in the terminal\n", @@ -446,7 +446,6 @@ "source": [ "from src.ember.core.registry.model.providers.openai.openai_provider import create_openai_embedding_model\n", "from src.ember.core.utils.embedding_utils import CosineSimilarity, calculate_text_similarity\n", - "# from src.ember.core.utils.eval.diversity_evaluators\n", "\n", "embedding_model = create_openai_embedding_model()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", @@ -615,7 +614,6 @@ "source": [ "embedding_model = create_openai_embedding_model()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", - "exact_evaluator = ExactMatchEvaluator()\n", "compression_evaluator = DiversityCompressionEvaluator()\n", "edit_dist_evaluator = DiversityEditDistanceEvaluator()\n", "\n", From 3dc3e082a9b1e7c2941a0ae2fee66d3585944121 Mon Sep 17 00:00:00 2001 From: connorchow Date: Fri, 4 Apr 2025 22:46:39 -0700 Subject: [PATCH 13/14] updated testbench to use ember package + integrate jason's changes --- src/ember/examples/diversity_testbench.ipynb | 173 +++++++++---------- 1 file changed, 79 insertions(+), 94 deletions(-) diff --git a/src/ember/examples/diversity_testbench.ipynb b/src/ember/examples/diversity_testbench.ipynb index 2129cd00..79c09852 100644 --- a/src/ember/examples/diversity_testbench.ipynb +++ b/src/ember/examples/diversity_testbench.ipynb @@ -44,7 +44,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -62,7 +62,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -83,7 +83,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -91,16 +91,12 @@ "output_type": "stream", "text": [ "\u001b[2mUsing Python 3.11.9 environment at: /Users/concon/research/ember-branch/ember/.venv\u001b[0m\n", - "\u001b[2mAudited \u001b[1m1 package\u001b[0m \u001b[2min 10ms\u001b[0m\u001b[0m\n" + "\u001b[2mAudited \u001b[1m1 package\u001b[0m \u001b[2min 27ms\u001b[0m\u001b[0m\n" ] } ], "source": [ "# !uv pip install -q -e .\n", - "!uv pip install -q google-generativeai==0.7.2\n", - "\n", - "# embedding model dependencies\n", - "!uv pip install -q openai\n", "\n", "# compression ratio dependencies\n", "!uv pip install diversity==0.2.0\n", @@ -115,7 +111,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ @@ -133,6 +129,7 @@ "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import pandas as pd\n", + "import tqdm\n", "\n", "# ember repo loads\n", "from ember.core.registry.model.config.settings import initialize_registry\n", @@ -142,7 +139,11 @@ "from ember.core.registry.model.base.schemas.provider_info import ProviderInfo\n", "\n", "from ember.core.registry.model import load_model, ChatResponse\n", - "from ember.core.registry.model.base.services.model_service import ModelService" + "from ember.core.registry.model.base.services.model_service import ModelService\n", + "\n", + "from ember.core.registry.model.providers.openai.openai_provider import create_openai_embedding_model\n", + "from ember.core.utils.embedding_utils import CosineSimilarity, calculate_text_similarity\n", + "from ember.core.utils.eval.diversity_evaluators import DiversityCosineSimilarityEvaluator, DiversityEditDistanceEvaluator, DiversityCompressionEvaluator, DiversityNoveltyEvaluator, DiversityEnsembledEvaluator" ] }, { @@ -154,7 +155,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 18, "metadata": { "scrolled": true }, @@ -175,7 +176,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 19, "metadata": { "scrolled": true }, @@ -184,28 +185,27 @@ "data": { "text/plain": [ "['openai:gpt-4o-audio-preview-2024-12-17',\n", - " 'openai:gpt-4o-realtime-preview-2024-12-17',\n", " 'openai:dall-e-3',\n", " 'openai:dall-e-2',\n", " 'openai:gpt-4o-audio-preview-2024-10-01',\n", - " 'openai:gpt-4o-mini-realtime-preview-2024-12-17',\n", - " 'openai:gpt-4o-mini-realtime-preview',\n", " 'openai:gpt-4o-realtime-preview-2024-10-01',\n", " 'openai:gpt-4o-transcribe',\n", " 'openai:gpt-4o-mini-transcribe',\n", " 'openai:gpt-4o-realtime-preview',\n", " 'openai:gpt-4o-mini-tts',\n", + " 'openai:gpt-4o-realtime-preview-2024-12-17',\n", " 'openai:text-embedding-3-large',\n", " 'openai:gpt-4',\n", " 'openai:text-embedding-ada-002',\n", " 'openai:gpt-4o-mini-audio-preview',\n", " 'openai:gpt-4o-audio-preview',\n", + " 'openai:gpt-4o-mini-realtime-preview',\n", + " 'openai:gpt-4o-mini-realtime-preview-2024-12-17',\n", " 'openai:gpt-3.5-turbo-instruct-0914',\n", " 'openai:gpt-4o-mini-search-preview',\n", - " 'openai:gpt-4-0125-preview',\n", " 'openai:gpt-4-turbo-preview',\n", + " 'openai:gpt-4-0125-preview',\n", " 'openai:gpt-3.5-turbo-1106',\n", - " 'openai:gpt-4o-search-preview',\n", " 'openai:gpt-4-turbo',\n", " 'openai:gpt-3.5-turbo-instruct',\n", " 'openai:gpt-3.5-turbo',\n", @@ -213,10 +213,11 @@ " 'openai:gpt-4o-2024-11-20',\n", " 'openai:gpt-3.5-turbo-0125',\n", " 'openai:gpt-4o-2024-05-13',\n", - " 'openai:gpt-4-turbo-2024-04-09',\n", " 'openai:gpt-3.5-turbo-16k',\n", + " 'openai:gpt-4-turbo-2024-04-09',\n", " 'openai:gpt-4-1106-preview',\n", " 'openai:gpt-4-0613',\n", + " 'openai:gpt-4o-search-preview',\n", " 'openai:gpt-4.5-preview',\n", " 'openai:gpt-4.5-preview-2025-02-27',\n", " 'openai:gpt-4o-search-preview-2025-03-11',\n", @@ -228,7 +229,7 @@ " 'openai:gpt-4o-mini-audio-preview-2024-12-17']" ] }, - "execution_count": 20, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -239,7 +240,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -276,7 +277,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -292,7 +293,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -302,28 +303,27 @@ "➡️ Testing model: openai:o1\n", "❌ Error with model openai:o1: [Error 3002] Model 'openai:o1' not found. Available models:\n", "- openai:gpt-4o-audio-preview-2024-12-17\n", - "- openai:gpt-4o-realtime-preview-2024-12-17\n", "- openai:dall-e-3\n", "- openai:dall-e-2\n", "- openai:gpt-4o-audio-preview-2024-10-01\n", - "- openai:gpt-4o-mini-realtime-preview-2024-12-17\n", - "- openai:gpt-4o-mini-realtime-preview\n", "- openai:gpt-4o-realtime-preview-2024-10-01\n", "- openai:gpt-4o-transcribe\n", "- openai:gpt-4o-mini-transcribe\n", "- openai:gpt-4o-realtime-preview\n", "- openai:gpt-4o-mini-tts\n", + "- openai:gpt-4o-realtime-preview-2024-12-17\n", "- openai:text-embedding-3-large\n", "- openai:gpt-4\n", "- openai:text-embedding-ada-002\n", "- openai:gpt-4o-mini-audio-preview\n", "- openai:gpt-4o-audio-preview\n", + "- openai:gpt-4o-mini-realtime-preview\n", + "- openai:gpt-4o-mini-realtime-preview-2024-12-17\n", "- openai:gpt-3.5-turbo-instruct-0914\n", "- openai:gpt-4o-mini-search-preview\n", - "- openai:gpt-4-0125-preview\n", "- openai:gpt-4-turbo-preview\n", + "- openai:gpt-4-0125-preview\n", "- openai:gpt-3.5-turbo-1106\n", - "- openai:gpt-4o-search-preview\n", "- openai:gpt-4-turbo\n", "- openai:gpt-3.5-turbo-instruct\n", "- openai:gpt-3.5-turbo\n", @@ -331,10 +331,11 @@ "- openai:gpt-4o-2024-11-20\n", "- openai:gpt-3.5-turbo-0125\n", "- openai:gpt-4o-2024-05-13\n", - "- openai:gpt-4-turbo-2024-04-09\n", "- openai:gpt-3.5-turbo-16k\n", + "- openai:gpt-4-turbo-2024-04-09\n", "- openai:gpt-4-1106-preview\n", "- openai:gpt-4-0613\n", + "- openai:gpt-4o-search-preview\n", "- openai:gpt-4.5-preview\n", "- openai:gpt-4.5-preview-2025-02-27\n", "- openai:gpt-4o-search-preview-2025-03-11\n", @@ -346,14 +347,14 @@ "- openai:gpt-4o-mini-audio-preview-2024-12-17 [Recovery: Check the model name and ensure it's correctly registered] [Context: caller_file='/Users/concon/research/ember-branch/ember/src/ember/core/registry/model/base/registry/model_registry.py', caller_function='get_model', caller_lineno=144]\n", "➡️ Testing model: openai:gpt-4o\n", "🛎️ Service response from openai:gpt-4o:\n", - "Quantum computing utilizes quantum bits (qubits) to perform computations using principles of quantum mechanics, such as superposition and entanglement. This enables exponentially faster processing for specific tasks compared to classical computers, promising breakthroughs in fields like cryptography, optimization, and complex simulations. It remains largely experimental but rapidly advancing.\n", + "Quantum computing harnesses quantum mechanics to process information using quantum bits (qubits), which exist in multiple states simultaneously (superposition). It leverages entanglement and interference for complex computations, outperforming classical computers in certain tasks by solving problems exponentially faster, revolutionizing fields like cryptography, optimization, and drug discovery.\n", "\n", "🎯 Direct response from openai:gpt-4o:\n", "The capital of France is Paris.\n", "\n", "➡️ Testing model: openai:gpt-4o-mini\n", "🛎️ Service response from openai:gpt-4o-mini:\n", - "Quantum computing utilizes the principles of quantum mechanics to process information. Unlike classical computers, which use bits as 0s and 1s, quantum computers use qubits, allowing for superposition and entanglement. This enables them to perform complex calculations much faster, potentially revolutionizing fields like cryptography, optimization, and drug discovery.\n", + "Quantum computing utilizes quantum bits, or qubits, which can exist in multiple states simultaneously due to superposition. This allows quantum computers to process complex problems at unprecedented speeds compared to classical computers. Entanglement further enhances processing power, enabling efficient solutions for tasks like cryptography, optimization, and complex simulations.\n", "\n", "🎯 Direct response from openai:gpt-4o-mini:\n", "The capital of France is Paris.\n", @@ -388,7 +389,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 23, "metadata": {}, "outputs": [ { @@ -425,7 +426,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 24, "metadata": { "scrolled": true }, @@ -434,7 +435,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Cosine similarity Score: 0.7289\n", + "Cosine similarity Score: 0.7287\n", "\n", "Cosine similarity Score: 0.8205\n", "\n", @@ -444,9 +445,6 @@ } ], "source": [ - "from src.ember.core.registry.model.providers.openai.openai_provider import create_openai_embedding_model\n", - "from src.ember.core.utils.embedding_utils import CosineSimilarity, calculate_text_similarity\n", - "\n", "embedding_model = create_openai_embedding_model()\n", "cosine: CosineSimilarity = CosineSimilarity()\n", "\n", @@ -482,7 +480,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "metadata": {}, "outputs": [ { @@ -496,8 +494,6 @@ } ], "source": [ - "from src.ember.core.utils.eval.diversity_evaluators import DiversityCompressionEvaluator\n", - "\n", "compression_evaluator = DiversityCompressionEvaluator()\n", "\n", "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", @@ -522,7 +518,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 26, "metadata": {}, "outputs": [ { @@ -536,8 +532,6 @@ } ], "source": [ - "from src.ember.core.utils.eval.diversity_evaluators import DiversityEditDistanceEvaluator\n", - "\n", "distance_evaluator = DiversityEditDistanceEvaluator()\n", "\n", "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", @@ -562,7 +556,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "metadata": {}, "outputs": [ { @@ -576,13 +570,11 @@ } ], "source": [ - "from src.ember.core.utils.eval.diversity_evaluators import DiversityNoveltyEvaluator\n", - "\n", "novelty_evaluator = DiversityNoveltyEvaluator(create_openai_embedding_model())\n", "\n", "input_strs = [\"hi there\", \"hi\", \"hello\", \"yo whatup\"]\n", "\n", - "novelty = novelty_evaluator.evaluate(embedding_model, input_strs)\n", + "novelty = novelty_evaluator.evaluate(input_strs)\n", "\n", "print(f\"Novelty Score: {novelty.score:.4f}\")\n", "print(f\"Is Correct: {novelty.is_correct}\")\n", @@ -608,36 +600,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 28, "metadata": {}, "outputs": [], "source": [ - "embedding_model = create_openai_embedding_model()\n", - "cosine: CosineSimilarity = CosineSimilarity()\n", + "text_embedding_ada_002 = create_openai_embedding_model(\"text-embedding-ada-002\")\n", + "\n", + "cosine_evaluator = DiversityCosineSimilarityEvaluator(embedding_model=text_embedding_ada_002)\n", "compression_evaluator = DiversityCompressionEvaluator()\n", "edit_dist_evaluator = DiversityEditDistanceEvaluator()\n", + "diversity_evaluator = DiversityEnsembledEvaluator(embedding_model=embedding_model)\n", "\n", "def ensemble_diversity(strings):\n", - " compression = compression_evaluator.evaluate(strings)\n", - " cosine_scores = list()\n", - " for ind1 in range(len(strings)):\n", - " ind2 = ind1+1 if ind1+1 != len(strings) else 0\n", - " curr_score = calculate_text_similarity(text1=strings[ind1], text2=strings[ind2], model=embedding_model, metric=cosine)\n", - " # print(f\"SimilarityScore between ind1={ind1} and ind2={ind2}: {curr_score}\")\n", - " cosine_scores.append(curr_score)\n", - " avg_cosine_score = np.average(cosine_scores)\n", - " edit_distance = edit_dist_evaluator.evaluate(strings)\n", - "\n", - " div_cosine = 1 - avg_cosine_score\n", - " div_compression = min(compression.score, 1)\n", - " div_edit = edit_distance.score\n", - " div_ensemble_score = (div_cosine + div_compression + div_edit)/3\n", - "\n", - " # print(f\"diversity cosine-sim inverse: {div_cosine:.4f}\")\n", - " # print(f\"compression (1/compression == compression/original) result: {div_compression:.4f}\")\n", - " # print(f\"edit-dist score: {div_edit:.4f}\")\n", - " # print(f\"diversity score (higher is better): {div_ensemble_score:.4f}\")\n", - " # print(\"-------------------------------\")\n", + " div_cosine = cosine_evaluator.evaluate(strings).score\n", + " div_compression = compression_evaluator.evaluate(strings).score\n", + " div_edit = edit_dist_evaluator.evaluate(strings).score\n", + " div_ensemble_score = diversity_evaluator.evaluate(strings).score\n", " return div_cosine, div_compression, div_edit, div_ensemble_score" ] }, @@ -650,7 +628,7 @@ }, { "cell_type": "code", - "execution_count": 83, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -693,7 +671,7 @@ }, { "cell_type": "code", - "execution_count": 84, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -705,32 +683,32 @@ "\t- This is a sample text with lots of repetition.\n", "\t- This is a sample text with lots of repetition.\n", "1 -------------------\n", - "\t- Why don’t skeletons fight each other? They don’t have the guts.\n", + "\t- Why don't scientists trust atoms?Because they make up everything!\n", "\t- Why don't skeletons fight each other? They don't have the guts.\n", - "\t- Why did the scarecrow win an award? Because he was outstanding in his field!\n", "\t- Why don't skeletons fight each other? They don't have the guts!\n", - "\t- Why did the scarecrow win an award?Because he was outstanding in his field!\n", "\t- Why don't skeletons fight each other? They don't have the guts!\n", "\t- Why don't skeletons fight each other? They don't have the guts.\n", - "\t- Why don't skeletons fight each other? They don't have the guts.\n", - "\t- Why don’t skeletons fight each other? They don’t have the guts.\n", - "\t- Why don't skeletons fight each other? They don't have the guts.\n", + "\t- Why did the scarecrow win an award? Because he was outstanding in his field!\n", + "\t- Why do cows have hooves instead of feet? Because they lactose!\n", + "\t- Why don't scientists trust atoms?Because they make up everything!\n", + "\t- Why did the scarecrow win an award? Because he was outstanding in his field!\n", + "\t- Why don't scientists trust atoms? Because they make up everything!\n", "2 -------------------\n", - "\t- Why don't scientists trust atoms? Because they make up everything! \n", - "\t- I told my wife she was drawing her eyebrows too high. She looked surprised. \n", - "\t- Why did the scarecrow win an award? Because he was outstanding in his field! \n", - "\t- I told my computer I needed a break, and it said, \"No problem, I'll go to sleep.\" \n", - "\t- How does a penguin build its house? Igloos it together! \n", - "\t- Why don't skeletons fight each other? They don't have the guts. \n", - "\t- I used to play piano by ear, but now I use my hands. \n", - "\t- What do you call fake spaghetti? An impasta. \n", - "\t- Why did the bicycle collapse? It was two-tired. \n", - "\t- Want to hear a construction joke? Oh, sorry, I'm still working on it.\n", + "\t- Why did the scarecrow win an award? Because he was outstanding in his field! \n", + "\t- Why don’t scientists trust atoms? Because they make up everything! \n", + "\t- What do you call fake spaghetti? An impasta! \n", + "\t- Why was the math book sad? Because it had too many problems. \n", + "\t- How does a penguin build its house? Igloos it together. \n", + "\t- What do you call cheese that isn't yours? Nacho cheese! \n", + "\t- Why did the bicycle fall over? Because it was two-tired! \n", + "\t- How does a cucumber become a pickle? It goes through a jarring experience. \n", + "\t- Why did the golfer bring two pairs of pants? In case he got a hole in one! \n", + "\t- What do you get when you cross a snowman and a vampire? Frostbite!\n", "3 -------------------\n", - "\t- In a serene forest, nestled within the roots of an ancient oak, a hidden village harnessed quantum computers. These magical machines harnessed qubits, capable of existing simultaneously in multiple states, like leaves whispering secrets to the wind. Villagers marveled as these quantum leaves, entangled in a delicate dance, solved complex problems at unprecedented speeds. Guiding whispers directed them through intricate patterns, while the village's wise sage, Schrödinger, invoked superposition spells, merging probabilities into certainty. With each computation completed, the villagers celebrated, knowing their world was forever changed by the mystical harmony of quantum entanglement under the watchful oak.\n", - "\t- In a sunlit meadow, a group of bunnies frolicked in the lush, emerald grass. Jasper, the adventurous one, led the way, his fluffy tail bouncing playfully. Poppy and Luna followed close behind, their ears perked and alert. The air shimmered with the fragrance of blooming daisies, and butterflies danced above them like tiny, delicate kites. Jasper paused, discovering a hidden patch of clover, prompting a joyous feast. As the sun dipped toward the horizon, painting the sky in hues of pink and gold, the bunnies settled into a cozy circle, content and safe under the watchful gaze of the moon.\n", - "\t- In the lush forests of Viridian, Pikachu found an ancient map leading to the Thunder Stone of Legends. Eager for adventure, it sprinted past towering oaks and over babbling brooks. Along the way, Pikachu met a lost Pidgey, guiding it safely back to its nest. Grateful, the Pidgey gifted Pikachu a feather for luck. Further on, an angry Onix blocked the path, but Pikachu’s electric agility dazzled the rocky giant, earning its respect. Reaching the hidden grotto, Pikachu uncovered the Thunder Stone, glowing brilliantly. Empowered, Pikachu returned home, its spirit alive with newfound wisdom and friends aplenty.\n", - "\t- Nestled in a quiet Tokyo alley, Ichiro’s ramen shop was legendary. Locals whispered about his secret broth, simmered for hours with a mysterious spice blend. One rainy evening, a weary traveler stumbled in, drawn by the rich aroma. As the traveler savored the first bite, memories flooded back—his grandmother’s kitchen and long-lost family gatherings. The warmth of the broth melted years of solitude. Ichiro, sensing the traveler’s nostalgia, shared a knowing smile. In that cozy nook, where stories weaved into noodles, everyone discovered more than a meal; they found connection, hope, and for the traveler, a place called home.\n", + "\t- In a vibrant quantum lab, tiny particles danced. Unlike regular bits that were zeros or ones, qubits spun like magical coins, showing both at once. Entwined like celestial partners, these qubits shared secrets through entanglement, defying distances. Scientists orchestrated this cosmic ballet, using algorithms like ancient spells to solve complex puzzles. As qubits calmed into states with precision, their whispers unlocked solutions to problems once deemed impossible. The quantum computer, a symphony of probability and possibility, became a lighthouse of advancement, shining new paths in medicine, cryptography, and beyond, merging the mysteries of the quantum world with human ingenuity.\n", + "\t- In a meadow kissed by sunlight, a group of bunnies joyfully frolicked in the lush, emerald grass. Their soft fur shimmered in the golden rays as they leapt and twirled, ears flopping jauntily. One brave bunny ventured to the center, starting a playful chase that sent them all scampering in mirthful circles. As they paused, catching breath in the shade of a towering oak, a gentle breeze whispered through the trees. The bunnies nuzzled together, sharing the serenity of the moment, before resuming their joyful dance, celebrating the simple delight of life under the endless sky.\n", + "\t- In the lush forest of Viridian, Pikachu trotted joyfully with its trainer, Ash. They stumbled upon a mysterious glowing berry that Pikachu eagerly nibbled. Suddenly, Pikachu was enveloped in a golden aura, granting it the power to understand other Pokémon's thoughts. With this newfound ability, Pikachu led a mission to resolve a dispute between two rival Pokémon clans: the Squirtle Squad and the Charmander League. Through communication and empathy, Pikachu fostered a harmonious agreement, creating a united community. Victorious, Pikachu and Ash celebrated under the twinkling stars, their bond stronger than ever, ready for the next adventure.\n", + "\t- Under dim lantern light, Suki's ramen shop thrived quietly in a bustling Tokyo alley. Each bowl crafted was a symphony of flavors, secrets passed from her grandmother. The irresistible aroma lured weary souls seeking solace in slurps. One rainy night, a wandering musician left his tune as thanks. Enchanted, Suki played his melody, infusing it into her broth. Customers, entranced by the unseen harmony, felt warmth beyond the soup. Word spread of this magical amalgamation, and soon, seekers of both food and peace filled the tiny shop. Suki's ramen didn’t just feed bodies; it nourished spirits.\n", "4 -------------------\n", "\t- Understanding the importance of effective communication in the workplace cannot be overstated. Clear communication fosters a positive environment where people can express their ideas and work together efficiently. When team members understand one another, they can collaborate seamlessly, avoid misunderstandings, and achieve collective goals. Furthermore, communication skills are essential for building trust, resolving conflicts, and ensuring that expectations are clear. Whether through verbal discussions, emails, or presentations, knowing how to convey thoughts in an understandable way is key to success in any professional setting.\n", "\t- In any workplace, the ability to communicate effectively is crucial for success. When individuals can clearly articulate their ideas and listen actively, it leads to a more productive and harmonious environment. Good communication prevents misunderstandings, aids in team collaboration, and helps in meeting shared objectives. It also plays a vital role in fostering trust among colleagues, resolving disputes, and ensuring transparency. Whether it’s through face-to-face conversations, written messages, or virtual meetings, mastering communication is essential to creating a positive, high-functioning work culture.\n", @@ -749,12 +727,19 @@ }, { "cell_type": "code", - "execution_count": 85, + "execution_count": 37, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 5/5 [02:12<00:00, 26.55s/it]\n" + ] + }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA90AAAJOCAYAAACqS2TfAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAVWFJREFUeJzt3QmcVWX9P/AHUDYXUJFFRNFcAEVQEEPLLQyXyi01s0BT6pdSGmqK/QQ1E00jrEjUxC1LyjTNBVMUcw0DNRcwNRVcEHABBQWE+b++z/915zcDAww4hxlm3u/X6zRzzz333OfeOZqf832WRmVlZWUJAAAAqHGNa/6UAAAAQBC6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBYBmNGjVK5513XlrX3XjjjalLly5p/fXXT61bt67Rcx9//PGpc+fOqa6aOHFi/jvGz4aqvlzHAOs6oRuA5bzyyivpe9/7Xtp2221T8+bN08Ybb5z22muvdPnll6ePP/64tptHNUybNi0H48997nPp6quvTlddddUKj41gFgGttLVs2TJttdVW6atf/Wq69tpr08KFC1N98Ic//CGNGjWqxs9b+t5OOumkKp//yU9+Un7MnDlzVvv8jz32WP4bffDBBzXQWgDWtkZlZWVla/1dAaiz7rrrrnTUUUelZs2apQEDBqSdd945LVq0KD3yyCPpL3/5Sw5yKwtw9cEnn3yS1ltvvbytq8aMGZO+//3vp5deeiltt912Kz02At3555+frrjiirThhhvmkP3mm2+me++9Nwe+XXbZJd15552pU6dO5a9ZvHhxWrp0ab5O6qJoW1y3TZs2TY0b//8aw1e+8pX03HPPpddee61G3yvCdNyciu2dd97J71lR3Lx6++2383U1e/bs1KZNm9U6/2WXXZbOPPPM9Oqrr65W74L6cB0D1Af+LQxAufiP+m984xtp6623Tg888EDq0KFD+XOnnHJKevnll3Mor49KIa0UntZ1s2bNyj9Xp1v517/+9UqBcNiwYemmm27KN1/iRswTTzxR/lx0Wa/Nv9GqRNBem3/HAw88MN1xxx3pnnvuSYceemj5/rhpEf9cHXnkkfmmVdHq23UMUB/oXg5AuZ///Ofpo48+Stdcc02lwF0SFdNTTz21/PGnn36afvrTn+YuzFHxjCrcOeecs1x35NgfVcYYX9u7d+/UokWL1L179/Lxtrfeemt+HCGhV69e6amnnqr0+qiuRwX2v//9b+rfv3/aYIMN0hZbbJEuuOCCtGyHragK7rnnnmmzzTbL7xPnu+WWW6qsTg4ePDiHyp122im3f/z48VWOhf3www/Taaedlj9HHNe2bdt0wAEHpClTplQ655///Of8fvG+EV6/9a1v5YpxVZ8l9h922GH598033zydccYZacmSJdX6O/32t78tb3N8D3FDpGLX42jn8OHD8+9x7s8ytve4447L3ab/+c9/pvvuu6/S5yhVXaPqvemmm6YTTjhhudfPmzcv/13j85XE9RHti+spPkNU0H/84x8vd92s7G9088035+96o402ysMf4vqJ4Q8rGtO977775htGr7/+enlX72h/XO9xPVW8rkveeOON1KRJkzRixIhVfk8dO3ZMe++9d+7CXlG0PdoWPUaqEt9rBPZWrVrlbv377LNPevTRR8ufj79bVLnDNttsU972UrV+da7jENfdiSeemK+bODbOGT0iIqiX/pbR62H77bfPf7f45+gLX/hCpb89AKtHpRuAcn/7299yV9gIrdURYez666/PFdLTTz89B4gIKFOnTk233XZbpWOjSv7Nb34zjxWPMBrhOMYMRzfoCOonn3xyPi5ef/TRR6cXX3yxvFtwiEAa4eTzn/98vjkQwSKCWwT/CN8lEby+9rWv5bAYQSLCWVRpo3v0IYccUqlNUc3/05/+lENLhOQVdd39n//5nxzc47hu3bqld999N3e3j8+522675WOuu+66HDp33333/Bmim3G0JQJU3ESoWHGOzxI3D/bYY4/8Pdx///3pF7/4Rb55EQGoOl3B+/Xrl4+N7ym6hT/55JP5vaICHeOWb7jhhvw3KHUZjy7ia+rb3/52HlLw97//Pd9sWFa85+GHH55vnlx55ZWVulf/9a9/zWE6elCUKrHx94nv77vf/W7q2rVrevbZZ9Mvf/nL9J///Ccfv6q/UQTAY489Nn3pS19Kl1xyST4u/hbx+asKz6Vx1XPnzs1BOt4rxPcSW7R93LhxaeTIkTlkl/zxj3/MN3XiWqqOuL7j/SPIx3nj2owbMUOGDMldvZcVn+2ggw7KNw/iWo7rPcbQ77///unhhx9Offr0SUcccUT+XqIt0e5ST4S4mbKy76gqb731Vj5n3KCJ7z4m2YsQHtf2ggUL8t8trq+4fuOf7Tg2bpr861//yjeYqvrbA1ANMaYbAObOnRsl47JDDz20Wsc//fTT+fiTTjqp0v4zzjgj73/ggQfK92299dZ532OPPVa+79577837WrRoUfb666+X77/yyivz/gcffLB838CBA/O+H/zgB+X7li5dWnbIIYeUNW3atGz27Nnl+xcsWFCpPYsWLSrbeeedy/bff/9K++N8jRs3Lnv++eeX+2zx3PDhw8sft2rVquyUU05Z4XcR79G2bdv8Ph9//HH5/jvvvDOfa9iwYct9lgsuuKDSOXbdddeyXr16rfA9wqxZs/Ln/fKXv1y2ZMmS8v2/+c1v8jnHjh1bvi/aH/sqfjcrsqpj33///fz84YcfXulzxN912b/n3/72t0qvPfjgg8u23Xbb8sc33nhj/t4ffvjhSseNGTMmv/7RRx9d5d/o1FNPLdt4443LPv300xV+prh+lr2O4nqp2OZl237PPfdU2r/LLruU7bPPPit8j4rtjOvjvffey3+f+IzhrrvuKmvUqFHZa6+9ttx3HNfv9ttvX9a/f//8e8Xrd5tttik74IADyvddeuml+bWvvvpqle9d3et4wIAB+dgnn3xyuWNLbejRo0f+ngCoObqXA5BFRStEd93quPvuu/PPqOJVFBXvsOzY76gQ9+3bt/xxVHlDVPVipuxl90dX8mVFJa+k1K02qtlRKS6Jrt0l77//fq5ufvGLX1yuK3iIrrzRrlWJKnVU8aNSWJWoBMYY6qjWVxxHG5X1qCZWNQ4+qucVRRur+swVxeeMzxtd3Sv2Ahg0aFDuYl3UePuo2pa62a9I/B2jyhoV44rff1SljznmmPJ9UfmN6nZ8LzGTd2mL14cHH3xwlX+j+HvMnz+/xro8R6+B6G4dXbRLYsK1f//737lXRnVtsskmuTdGVKVDdDWPXiMxR8Kynn766TzJXVTHo+dE6XuIzxUV/H/84x+5V0B1VOc6jnNFL4LoXRJDPJYV/zyVvtvnn38+tw2AmiF0A5BFaFtVsKooxsZG8Ft2Zuz27dvn/3CP5yuqGKxDjGENFWfErrg/AltF8V7R9b2iHXbYIf+sOBt1dCOPLugRfmOccXTDjS7WEb6XFeNZqyO6s0cIi7ZGl9voglsxIJc+64477rjcayNcLvtdRNsqdg8uBbZlP/OyVvQ+0S04vptl36emRHfpVd2QiRmyY7Kw22+/vXxsdnQ3jzHCFUN3hLkIdfH5K26lv2VpAriV/Y3i5kYcH12zt9xyy/Sd73ynfBzzmohrK7qQRyiNbtYhAnj8nWJowuqIEB03A6ZPn57PF4+rUgq1AwcOXO67+N3vfpe/w6qu2apU5zqOWdPjxtqKxpaXxFCN6H4e32+MRY/x5HHzAYA1J3QDUB66o9oX4XJ1lCpkq1JxrGx19q/JipYxDjbGC0dYisnGohofASiCT1Xnq1gVX5kYYx4h+9e//nX+ji699NI8aVXMVL0mVvSZ66rSNbGqpcdi3HbctCl9LzHOOG469OjRo1LFNcJc/F2q2kpj+1f2N4qJ7KJSHLOFx987quMRwCPArqmYoT1uLkRQjmslqtQx+V/pJlB1RXtigrJoSwTnuHaqUqpix7W0ou+i1MNgVap7HVdHTAb3yiuvpLFjx+aAHjcAYt6C+AnAmjGRGgDlImTEhFmPP/54pa7gVYkusxEcomIX3YVLYgKxqJRV1aX2s4j3iuBbqoiGmGAqlCaOiiWZInDH+tIV14+Oyak+q5jNPQJhbFGNjSDys5/9LIe90meNSc1K3aRLYl9NfRcV36di1T+6nMeyVNFNugg33nhj/hmTv60qsMX3FF3MY8brmOArJjCrKCaLe+aZZ3IX6uresKlKVPejq3RscW3E3yUmcTv33HNXeHNgZe8XAXPXXXfNFe6onkelOm6yrK4IwDEr/e9///t8baxoTe74Hko3u1b1d/ss31NJVNDjvapzU600E31scSMi/q7RuyMmVwNg9al0A1Aulm2K5ZPiP64jPC8rKmClZZkOPvjg/DNmyq4oZoAOy84UXhN+85vflP8e1ch4HDNnR4ArVZAjoFRceiu6ni87I/bqiHMt2803Kq1R8S51o44xsrEvZmKvuOxVVHxjVu2a+i4inEXY/NWvflWpch9LvEUbi/jOo+IbVc64CVP6nlfWTTtmso9Z8COox+zdFbuWh6j8xozZV1999XKv//jjj/OY5lWJMdDLvm9pdvZllx2rKK7tlXXZjlnaY4b2uKZjqawIzWsilkeL2cjjBsCKxIzlEbxj9vpS9/1lu4NXbHeouCzc6orvKG4GxN8m5iBYVul6Wva7jWp73MRY2fcKwMqpdANQLkJAhKwISlG9ji63UQGMSupjjz2WJ8GK9ZlDdBmOLrRRGY8wEJM5TZo0KS8hFv9xv99++9Vo26KCHeN24z1jsrUItDFxWCw3VhofHaEzQn9MZhVdyqMiPXr06Bwa1nRcanSXjspnhMn4zBFCYkKzWKIrlvkKEfxj6aqoDMb3EMtZlZYMiyr8j370oxr5DuJzDh06NC8ZFp8xujJH1Tu60sdSZasz6VdVYumo+Hzx945gHD0GYhmu+Nzxt6+OuHaiQhyhM7qRV+wFUQq20e08JpKLbuF77bVXvrExbdq0vD/es6qJviqKm0Lvvfde7lUQf5sYyx7v2bNnz+Xeb9mgG1X4mPwvvq/4rFEpL4lrJm48xVJrsRxb/F3XRHxfFbvUrygEx82MCPYxVCGunVjrO773+F6iKh0BudTuEL0Gogt/tCvaXQrj1XXRRRflmwpxjZaWa3v77bfz3zaWcIu5GGJCtljTPN4zKt4R0EvL5QGwhmpwJnQA6on//Oc/ZYMGDSrr3LlzXgJpo402Kttrr73Kfv3rX5d98skn5cctXry47Pzzz89LHK2//vplnTp1Khs6dGilY0Is01TVMkSlpZYqimWRYn8sk1RxeaoNNtig7JVXXsnLZbVs2bKsXbt2eTmkiktnhWuuuSYvxdSsWbOyLl26lF177bXlyzWt6r2rWmpp4cKFZWeeeWZeSim+h2hH/P7b3/52udeNGzcuL/0V773pppuWHXfccWVvvPFGpWNKn2VZVbVxRWKJsPhs8Z3H9/D9738/L+tV1flWZ8mw0ta8efOyLbfcsuwrX/lKXoZs2b9n6XNUtfxWLD0V10Gc58ILL1zhEmuXXHJJ2U477ZS/q0022SQvlxbXUixdt6q/0S233JKvg1imLa7Prbbaqux73/te2dtvv73SJcM++uijsm9+85tlrVu3zs9V1f5Y4mzZ5e1WZWXX0qr+Hk899VTZEUccUbbZZpvl7yLadPTRR5dNmDCh0nE//elPyzp27JiX/Kq4fFh1r+OSWJ4vlg7bfPPN8/vFcm7x+rjOQ/zN+vTpk7+jWM4vrrOf/exn+W8GwJppFP+zpoEdANaGqK5Hta2qbrhQkw4//PD07LPPppdffrm2mwJAPWFMNwBASrmrdQxZiC7wAFBTjOkGABq0mPk9xq7HGOsYL/29732vtpsEQD2i0g0ANGgPPfRQrm5H+I6JANu3b1/bTQKgHjGmGwAAAAqi0g0AAAAFEboBAACgIA1uIrWlS5emt956K2200UapUaNGtd0cAAAA1kExUvvDDz9MW2yxRWrceMX17AYXuiNwd+rUqbabAQAAQD0wY8aMtOWWW67w+QYXuqPCXfpiNt5449puDgAAAOugefPm5YJuKWOuSIML3aUu5RG4hW4AAAA+i1UNWzaRGgAAABRE6AYAAICCCN0AAABQkAY3pru6lixZkhYvXlzbzaCOWn/99VOTJk1quxkAAEAdJ3RXsdbazJkz0wcffFDbTaGOa926dWrfvr313gEAgBUSupdRCtxt27ZNLVu2FKio8sbMggUL0qxZs/LjDh061HaTAACAOkroXqZLeSlwb7bZZrXdHOqwFi1a5J8RvON60dUcAACoionUKiiN4Y4KN6xK6Tox9h8AAFgRobsKupRTHa4TAABgVYRuAAAAKIjQTbVcd911ebZuAAAAqs9EatXU+ey71ur7vXbxIWs08/rPfvazdNddd6U333wzT/DVs2fPdNppp6UvfelLn6k9xxxzTDr44INT0SZOnJj222+/9P777wv5AADAOk/oridee+21tNdee+Wgeumll6bu3bvnCb7uvffedMopp6Rp06Z95tm6SzN2AwAAUD26l9cTJ598cp7Ya9KkSenII49MO+ywQ9ppp53SkCFD0hNPPJGPmT59ejr00EPThhtumDbeeON09NFHp3feeaf8HM8880yuMm+00Ub5+V69eqV//etfVXYvP++883IV/cYbb0ydO3dOrVq1St/4xjfShx9+WH7M0qVL04gRI9I222yTA3uPHj3SLbfcslqfq/S+cfOga9euue0HHnhgevvtt/Pzf//731Pz5s3zUm8VnXrqqWn//fdfw28TAACgZgjd9cB7772Xxo8fnyvaG2ywwXLPR2iNAByBO4596KGH0n333Zf++9//5m7jJccdd1zacsst05NPPpkmT56czj777LT++uuv8H1feeWV9Ne//jXdeeedeYvzXnzxxeXPR+C+4YYb0pgxY9Lzzz+ffvSjH6Vvfetb+bjVsWDBgnTZZZflgP+Pf/wj3zw444wz8nPRbT4+31/+8pdK662PGzcufx4AAIDapHt5PfDyyy+nsrKy1KVLlxUeM2HChPTss8+mV199NXXq1Cnvi0Ac1fAI2bvvvnsOs2eeeWb5ebbffvuVvm8E+ahER2U8fPvb387vE+PKFy5cmC666KJ0//33p759++bnt9122/TII4+kK6+8Mu2zzz7V/nzRTT6C++c+97n8ePDgwemCCy7Ivzdp0iRX2P/whz+kE088sfyzRuU7Kv4AAAC1SaW7HojAvSpTp07NYbsUuEO3bt1ylTieC9EV/aSTTkr9+vXLFeuoZK9MdCsvBe7QoUOHNGvWrPIbAVGhPuCAA3KX8NIWQb903gj8pf0HHXTQCt+nZcuW5YF72fcJUdGOCdjeeuut/Pimm25KhxxyiInYAACAWqfSXQ9ERTrGc3/WydJinPY3v/nNPPv5Pffck4YPH55uvvnmdPjhh1d5/LJdz6MNUf0OH330Uf4Z5+rYsWOl45o1a5Z/3n333bmKHVY2SVtV71PxRkNU6SOUR1u///3vp9tuuy1X4AEAAGqb0F0PbLrppql///5p9OjR6Yc//OFy47qjq3VMQjZjxoy8lardL7zwQn4uKt4lMQFbbDH++thjj03XXnvtCkP3ysQ5I1xHl/UVdSXfeuutU02JandUuGNMeuPGjXOlGwAAoLbpXl5PROCOCcT69OmTJxV76aWXcrfxX/3qV3lMdXQZj2XEIpxOmTIlz3I+YMCAHIh79+6dPv744zxWOrppv/766+nRRx/NY70jrK+J6HYek51FeL/++utzl/J431//+tf5cU0rfa4YT/71r3+9vJoOAABQm1S664mYpKwUOk8//fS8pNbmm2+el/264oorcpfs22+/Pf3gBz9Ie++9d64Gx9JbEYJLE5K9++67OYjHMmJt2rRJRxxxRDr//PPXuE0//elPcxtiFvOYKT3GWO+2227pnHPOSTVtu+22yzcc4mbCqFGjavz8AAAAa6JRWXVm4apH5s2bl9eUnjt3bl6LuqJPPvkkz+4d60rH2s+wMq4XAKh7ul/fPdUFzw58trabANRitqxI93IAAAAoiNANAAAABRG6AQAAoCBCNwAAABTE7OUAQJVMSAUAn51KNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0Uy917tw5jRo1qrabAQAANHBmL6+u81qt5febu9ovmTlzZvrZz36W7rrrrvTmm2+mtm3bpp49e6bTTjstfelLX0oNyZNPPpk22GCD2m4GAADQwAnd9cRrr72W9tprr9S6det06aWXpu7du6fFixene++9N51yyilp2rRpqS6Jtq2//vqFnX/zzTcv7NwAAADVpXt5PXHyySenRo0apUmTJqUjjzwy7bDDDmmnnXZKQ4YMSU888UQ+Zvr06enQQw9NG264Ydp4443T0Ucfnd55553yc5x33nm5Mj527Ni01VZb5ePivEuWLEk///nPU/v27XP1PKrpFcX7XnHFFemggw5KLVq0SNtuu2265ZZbKt0QiGPGjRuX9tlnn9S8efN000035ed+97vfpa5du+Z9Xbp0Sb/97W/LX7do0aI0ePDg1KFDh/z81ltvnUaMGJGfKysry+2NdjZr1ixtscUW6Yc//OEKu5dX97PfeOON+bWtWrVK3/jGN9KHH35Yw38pAACgIVHprgfee++9NH78+ByGq+pSHdXvpUuXlofOhx56KH366ae5An7MMcekiRMnlh/7yiuvpHvuuSefL37/+te/nv773//mEB+ve+yxx9J3vvOd1K9fv7THHnuUv+7cc89NF198cbr88stzcI3A+uyzz+ZAXXL22WenX/ziF2nXXXctD97Dhg1Lv/nNb/K+p556Kg0aNCh/hoEDB6Zf/epX6Y477kh/+tOfcrieMWNG3sJf/vKX9Mtf/jLdfPPN+eZCdK1/5plnqvx+Vuez//Wvf0133nlnev/993Mwj8+07E0GAACA6hK664GXX345V36jUrwiEyZMyCH41VdfTZ06dcr7brjhhhxYY/zz7rvvXh5Qo9K90UYbpW7duqX99tsvvfjii+nuu+9OjRs3TjvuuGO65JJL0oMPPlgpdB911FHppJNOyr//9Kc/Tffdd1/69a9/XalyHWPLjzjiiPLHw4cPzyG8tG+bbbZJL7zwQrryyitz6I7q9Pbbb5++8IUv5Ep5VLpL4rmovEf4j27qEcr79OnzmT/7ddddlz97+Pa3v51fK3QDAABrSvfyeiAC96pMnTo1B85S6AwRqqMKHs+VRNfqUugM7dq1y8dF4K64b9asWZXO37dv3+UeVzxv6N27d/nv8+fPz5XlE088MVegS9uFF16Y94fjjz8+Pf300znoR9fxv//975VC/scff5y7skd1/LbbbssV7Jr87NGtfdnPCQAAsDqE7nogqsFRCa6JydKWndwszlvVvqgKr66KXd8/+uij/PPqq6/Owbq0Pffcc+Vj0HfbbbdcnY7KeQTs6O4d3d1DBOiowEclPcaRx9jzvffeO0/QtqZq6nMCAACUCN31wKabbpr69++fRo8enSvIy/rggw/y2OqKY6JDdOWO56Lq+1mVgnLFxxXHcy8rquUx+VmMF99uu+0qbdHNvCQmPYux1xHOYyK2GMsdY9hDhO2vfvWreex3jM1+/PHHczfyZRX92QEAAFbEmO56IgJ3LBkW45ovuOCCtMsuu+Tu1jG2OmYWj5AZy4gdd9xxeVbveC6qwzGbeMVu32vqz3/+cz5PjL+OCdJiFvVrrrlmpa85//zzc7fxmCn8wAMPTAsXLkz/+te/8iRmMev6yJEjcxfvmGQturfHe8Q47ugWHmOvY1b1GFfesmXL9Pvf/z6H8Irjvkti3HeRnx0AAGBFVLrriRjbPGXKlDzx2emnn5523nnndMABB+SJwCJ0R1fp22+/PW2yySa5G3YE0XhNVI9rQgTomEk8wn5MUvbHP/5xlVXkmHgtlgy79tprcyiOEBxhulTpjvHVsVRZBOOY7CyWHitN6BbBO6rfcaMh3vP+++9Pf/vb39Jmm2223PsU/dkBAABWpFFZdWbhqkfmzZuXK6tz587NXZcr+uSTT/IY4gh9saQV1ROhNiYyO+yww1JD4noB6rvu13dPdcGzA5cfOgQr4roF6kK2rEilGwAAAAoidAMAAEBBTKTGZ9bARigAAABUm0o3AAAAFEToBgAAgIII3QAAAFAQoRsAAADqc+gePXp06ty5c17reI899kiTJk1a4bH77rtvXhd62e2QQw5Zq20GAACAOh+6x40bl4YMGZKGDx+epkyZknr06JH69++fZs2aVeXxt956a3r77bfLt+eeey41adIkHXXUUWu97QAAAFCnQ/fIkSPToEGD0gknnJC6deuWxowZk1q2bJnGjh1b5fGbbrppat++ffl233335eOF7tUXPQT++te/5t9fe+21/Pjpp5+u7WYBAADUG7W6TveiRYvS5MmT09ChQ8v3NW7cOPXr1y89/vjj1TrHNddck77xjW+kDTbYoMCWptT9+u5pbXp24LOrdfzxxx+frr/++uX2R6+B8ePHr/L1nTp1yj0H2rRpkx9PnDgx7bfffun9999PrVu3Xq22AAAAUAdC95w5c9KSJUtSu3btKu2Px9OmTVvl62Psd3Qvj+C9IgsXLsxbybx581J9deCBB6Zrr7220r5mzZpV67XRRT96DgAAAGvX2i7w1VThj3Wke/lnEWG7e/fuqU+fPis8ZsSIEalVq1blW1R066sI2BW73se2ySab5OdeeumltPfee+fJ6qIbf3TLr6hi9/L4ParcIV4f+6OSDgAAwDoUuqMrc1RY33nnnUr74/Gqqq7z589PN998czrxxBNXelx0XZ87d275NmPGjNTQLF26NB1xxBGpadOm6Z///GceN3/WWWet8Pi4MfGXv/wl//7iiy/mbueXX375WmwxAABA/VCr3csjBPbq1StNmDAhHXbYYeUBMR4PHjx4pa/985//nLuNf+tb31pl9be6XazXdXfeeWfacMMNK+0755xzUu/evXN3/XvvvTdtscUWef9FF12UDjrooCrPEzdCYsK60LZtW2O6AQAA1sXQHWK5sIEDB+ZgGN3ER40alavYMZt5GDBgQOrYsWPuJr5s1/II6ptttlkttbzuiS7hV1xxRaV9EZ5vvPHGXL0uBe7Qt2/fWmghAABAw1LrofuYY45Js2fPTsOGDUszZ85MPXv2zLNtlyZXmz59ep7RvKLo8vzII4+kv//977XU6ropZnDfbrvtarsZAAAA1JXQHaIr+Yq6k8fSVcvacccdU1lZ2VpoWf3QtWvXPJY9xmZ36NAh73viiSdW2fU/xOzyAAAArMOhm5oRY9yjt0BF6623Xl73fIcddsjd+C+99NK8bNpPfvKTlZ5r6623zrOWxzjxgw8+OLVo0WK58eIAAADU4yXDqCy65Uclu+L2hS98IXfPv+2229LHH3+cx82fdNJJ6Wc/+9lKzxXj6M8///x09tln567+q5rYDgAAgOWpdNeTheKvu+66vK1IVLoffvjhSvsqdtHv3Lnzcl32zz333LwBAACwZlS6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEEsGQYAdcl5rVKdsc1Wtd0CAFjnqXQDAABAQYRuAAAAKIjQTa157bXXUqNGjdLTTz+9wmMmTpyYj/nggw8+03t17tw5jRo16jOdAwAAYHUZ011NU7t0Xavv13Xa1NU6/vjjj0/XX3/9cvv79++fxo8fX4MtAwAAoLqE7nrkwAMPTNdee22lfc2aNau19gAAADR0upfXIxGw27dvX2nbZJNN8nPRRft3v/tdOvzww1PLli3T9ttvn+64447y177//vvpuOOOS5tvvnlq0aJFfr5igJ8xY0Y6+uijU+vWrdOmm26aDj300Nw9vGKl/bDDDksXXXRRateuXT7uggsuSJ9++mk688wz82u23HLL5W4KhGnTpqU999wzNW/ePO28887poYceWunnfOSRR9IXv/jF3M5OnTqlH/7wh2n+/Pnlz8+aNSt99atfzc9vs8026aabbvrM3y0AAMCaELobkPPPPz8H53//+9/p4IMPziH7vffey8+de+656YUXXkj33HNPmjp1arriiitSmzZt8nOLFy/O3dQ32mij9PDDD6dHH300bbjhhrmyvmjRovLzP/DAA+mtt95K//jHP9LIkSPT8OHD01e+8pUc/P/5z3+m//mf/0nf+9730htvvFGpXRHKTz/99PTUU0+lvn375sD87rvvVvkZXnnllfy+Rx55ZP4c48aNyyF88ODBlW4AxE2CBx98MN1yyy3pt7/9bQ7iAAAAa5vQXY/ceeedOQxX3KLyXDGMHnvssWm77bbL+z/66KM0adKk/Nz06dPTrrvumnr37p0nHevXr18OvyGC7dKlS3OlvHv37qlr1665Yh2viYnOSqKa/atf/SrtuOOO6Tvf+U7+uWDBgnTOOefkyvnQoUNT06ZNc0iuKAJzhOg4b4T9Vq1apWuuuabKzzhixIh8s+C0007L54wKebznDTfckD755JP0n//8J984uPrqq9PnP//51KtXr3yujz/+uKBvHQAAYMWM6a5H9ttvvxxaK4ogXLLLLruU/77BBhukjTfeuLwC/P3vfz8H3ylTpqQvf/nLuat4BNrwzDPPpJdffjlXuiuKkBuV55KddtopNW78f/dxopt5dBcvadKkSdpss82WqzpHdbtkvfXWy8E/qu1VibZEhbtil/GysrJ8U+DVV1/NoTvOEWG7pEuXLrm7OwAAwNomdNcjEaSjir0i66+/fqXHMc47wmo46KCD0uuvv57uvvvudN9996UvfelL6ZRTTkmXXXZZrohHiK1qbHSMAV/Z+Vf2nmsi2hJd1GMc97K22mqrHLoBAADqCt3LqRSgBw4cmH7/+9/nNa2vuuqqvH+33XZLL730Umrbtm0O9RW36Ar+WT3xxBPlv8fEa5MnT85dzasSbYmx58u2I7bouh5V7dI5Sl588cXPvM43AADAmhC665GFCxemmTNnVtrmzJlTrdcOGzYs3X777bkb+fPPP5/Hh5eCb4yhjknVYsbymEgtunHHWO6oNi87KdqaGD16dLrtttvyLOZRXY+Z1GNMeFXOOuus9Nhjj+Vx4E8//XS+GRDtLk2kFuPIY6K1qIbH5G0Rvk866aQ8kzkAAMDaJnTXI+PHj08dOnSotH3hC1+o1mujShwTncW477333juPv7755pvzc7HEWMxIHt23jzjiiBzGTzzxxDymO8aFf1YXX3xx3nr06JEnWYulzEozpy8r2hdLikU38lg2LCZ/ixsGW2yxRfkxMclbPN5nn31ye7/73e/mKj0AAMDa1qgsZqFqQObNm5e7RM+dO3e5wBghMqq4sbZzrBkNK+N6AQpx3mcftlNTum+zVaoLnh34bG03gXVI9+u7p7rAdcvqcN3Wv2xZkUo3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInRXoYHNLccacp0AAACrInRXsP766+efCxYsqO2msA4oXSel6wYAAGBZ6y23pwGLtalbt26dZs2aVb4+daNGjWq7WdTBCncE7rhO4nqJ6wYAAKAqQvcy2rdvn3+WgjesSATu0vUCAABQFaF7GVHZ7tChQ2rbtm1avHhxbTeHOiq6lKtwAwAAqyJ0r0AEKqEKAACAz8JEagAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEFqPXSPHj06de7cOTVv3jztscceadKkSSs9/oMPPkinnHJK6tChQ2rWrFnaYYcd0t13373W2gsAAADVtV6qRePGjUtDhgxJY8aMyYF71KhRqX///unFF19Mbdu2Xe74RYsWpQMOOCA/d8stt6SOHTum119/PbVu3bpW2g8AAAB1NnSPHDkyDRo0KJ1wwgn5cYTvu+66K40dOzadffbZyx0f+99777302GOPpfXXXz/viyo5AAAA1EW11r08qtaTJ09O/fr1+7/GNG6cHz/++ONVvuaOO+5Iffv2zd3L27Vrl3beeed00UUXpSVLlqzFlgMAAEAdr3TPmTMnh+UIzxXF42nTplX5mv/+97/pgQceSMcdd1wex/3yyy+nk08+OS1evDgNHz68ytcsXLgwbyXz5s2r4U8CAAAAdXQitdWxdOnSPJ77qquuSr169UrHHHNM+slPfpK7pa/IiBEjUqtWrcq3Tp06rdU2AwAA0HDVWuhu06ZNatKkSXrnnXcq7Y/H7du3r/I1MWN5zFYeryvp2rVrmjlzZu6uXpWhQ4emuXPnlm8zZsyo4U8CAAAAdSx0N23aNFerJ0yYUKmSHY9j3HZV9tprr9ylPI4r+c9//pPDeJyvKrGs2MYbb1xpAwAAgHrfvTyWC7v66qvT9ddfn6ZOnZq+//3vp/nz55fPZj5gwIBcqS6J52P28lNPPTWH7ZjpPCZSi4nVAAAAoK6p1SXDYkz27Nmz07Bhw3IX8Z49e6bx48eXT642ffr0PKN5SYzHvvfee9OPfvSjtMsuu+R1uiOAn3XWWbX4KQAAAKAOhu4wePDgvFVl4sSJy+2LrudPPPHEWmgZAAAANKDZywEAAGBdInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCrFfUiQEAasLULl1TXdF12tTabgIA6xiVbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACA+hy6R48enTp37pyaN2+e9thjjzRp0qQVHnvdddelRo0aVdridQAAAFDX1HroHjduXBoyZEgaPnx4mjJlSurRo0fq379/mjVr1gpfs/HGG6e33367fHv99dfXapsBAABgnQjdI0eOTIMGDUonnHBC6tatWxozZkxq2bJlGjt27ApfE9Xt9u3bl2/t2rVbq20GAACAOh+6Fy1alCZPnpz69ev3fw1q3Dg/fvzxx1f4uo8++ihtvfXWqVOnTunQQw9Nzz///FpqMQAAAKwjoXvOnDlpyZIly1Wq4/HMmTOrfM2OO+6Yq+C33357+v3vf5+WLl2a9txzz/TGG29UefzChQvTvHnzKm0AAADQILqXr66+ffumAQMGpJ49e6Z99tkn3XrrrWnzzTdPV155ZZXHjxgxIrVq1ap8i+o4AAAA1PvQ3aZNm9SkSZP0zjvvVNofj2OsdnWsv/76adddd00vv/xylc8PHTo0zZ07t3ybMWNGjbQdAAAA6nTobtq0aerVq1eaMGFC+b7oLh6Po6JdHdE9/dlnn00dOnSo8vlmzZrl2c4rbgAAALA2rJdqWSwXNnDgwNS7d+/Up0+fNGrUqDR//vw8m3mIruQdO3bM3cTDBRdckD7/+c+n7bbbLn3wwQfp0ksvzUuGnXTSSbX8SQAAAKCOhe5jjjkmzZ49Ow0bNixPnhZjtcePH18+udr06dPzjOYl77//fl5iLI7dZJNNcqX8sccey8uNAQAAQF1S66E7DB48OG9VmThxYqXHv/zlL/MGAAAAdd06N3s5AAAArCuEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAXQrdn376abr//vvTlVdemT788MO876233kofffRRTbcPAAAA1lnrre4LXn/99XTggQem6dOnp4ULF6YDDjggbbTRRumSSy7Jj8eMGVNMSwEAAKC+V7pPPfXU1Lt37/T++++nFi1alO8//PDD04QJE2q6fQAAANBwKt0PP/xweuyxx1LTpk0r7e/cuXN68803a7JtAAAA0LAq3UuXLk1LlixZbv8bb7yRu5kDAAAAaxi6v/zlL6dRo0aVP27UqFGeQG348OHp4IMPXt3TAQAAQL212t3LL7vssjyRWrdu3dInn3ySvvnNb6aXXnoptWnTJv3xj38sppUAALAOmdqla6oruk6bWttNgAZttUN3p06d0jPPPJPGjRuXf0aV+8QTT0zHHXdcpYnVAAAAoKFbrdC9ePHi1KVLl3TnnXfmkB0bAAAAUANjutdff/3cpRwAAAAoYCK1U045JV1yySXp008/Xd2XAgAAQIOy2mO6n3zyyTRhwoT097//PXXv3j1tsMEGlZ6/9dZba7J9AAAA0HBCd+vWrdORRx5ZTGsAAACgIYfua6+9tpiWAAAAQEMP3SWzZ89OL774Yv59xx13TJtvvnlNtgsAAAAa3kRq8+fPT9/5zndShw4d0t577523LbbYIq/VvWDBgmJaCQAAAA0hdA8ZMiQ99NBD6W9/+1v64IMP8nb77bfnfaeffnoxrQQAAICG0L38L3/5S7rlllvSvvvuW77v4IMPTi1atEhHH310uuKKK2q6jQAAANAwKt3Rhbxdu3bL7W/btq3u5QAAAPBZQnffvn3T8OHD0yeffFK+7+OPP07nn39+fg4AAABYw+7ll19+eerfv3/acsstU48ePfK+Z555JjVv3jzde++9q3s6AAAAqLdWO3TvvPPO6aWXXko33XRTmjZtWt537LHHpuOOOy6P6wYAAAA+wzrdLVu2TIMGDVqTl7KuOq9VqjPOm1vbLQAAAChmTPeIESPS2LFjl9sf+y655JLVPR0AAADUW6sduq+88srUpUuX5fbvtNNOacyYMTXVLgAAAGh4oXvmzJmpQ4cOy+3ffPPN09tvv11T7QIAAICGF7o7deqUHn300eX2x74tttiiptoFAAAADW8itZhA7bTTTkuLFy9O+++/f943YcKE9OMf/zidfvrpRbQRAAAAGkboPvPMM9O7776bTj755LRo0aK8L9boPuuss9LQoUOLaCMAAAA0jNDdqFGjPEv5ueeem6ZOnZrX5t5+++1Ts2bNimkhAAB1W11aWnSbrWq7BQCfbUx3yYYbbph23333tNFGG6VXXnklLV26dE1PBQAAAA07dMc63CNHjqy077vf/W7adtttU/fu3dPOO++cZsyYUUQbAQAAoH6H7quuuiptsskm5Y/Hjx+frr322nTDDTekJ598MrVu3Tqdf/75RbUTAAAA6u+Y7pdeein17t27/PHtt9+eDj300HTcccflxxdddFE64YQTimklAAAA1OdK98cff5w23njj8sePPfZY2nvvvcsfRzfzmTNn1nwLAQAAoL5Xurfeeus0efLk/HPOnDnp+eefT3vttVf58xG4W7WqQzNXAgAArIhZ96lroXvgwIHplFNOyWH7gQceSF26dEm9evWqVPmOydQAAACA1QzdP/7xj9OCBQvSrbfemtq3b5/+/Oc/V3r+0UcfTccee2x1TwcAAAD1XrVDd+PGjdMFF1yQt6osG8IBAACgoav2RGoAAADA6hG6AQAAoCBCNwAAABRE6AYAAIC6EroffPDBYloCAAAADT10H3jggelzn/tcuvDCC9OMGTNqpBGjR49OnTt3Ts2bN0977LFHmjRpUrVed/PNN6dGjRqlww47rEbaAQAAALUaut988800ePDgdMstt6Rtt9029e/fP/3pT39KixYtWqMGjBs3Lg0ZMiQNHz48TZkyJfXo0SOfc9asWSt93WuvvZbOOOOM9MUvfnGN3hcAAADqXOhu06ZN+tGPfpSefvrp9M9//jPtsMMO6eSTT05bbLFF+uEPf5ieeeaZ1TrfyJEj06BBg9IJJ5yQunXrlsaMGZNatmyZxo4du8LXLFmyJB133HHp/PPPz8EfAAAA6t1EarvttlsaOnRornx/9NFHOSj36tUrV5+ff/75Vb4+quOTJ09O/fr1+78GNW6cHz/++OMrfN0FF1yQ2rZtm0488cRVvsfChQvTvHnzKm0AAABQZ0P34sWLc/fygw8+OG299dbp3nvvTb/5zW/SO++8k15++eW876ijjlrleebMmZOr1u3atau0Px7PnDmzytc88sgj6ZprrklXX311tdo6YsSI1KpVq/KtU6dO1fyUAAAAsJZD9w9+8IPUoUOH9L3vfS93LX/qqadyVfqkk05KG2ywQZ4Q7bLLLkvTpk1LNe3DDz9M3/72t3Pgjm7u1RGV+Llz55ZvNTX5GwAAAKzKemk1vfDCC+nXv/51OuKII1KzZs2qPCYCcXWWFovjmjRpkivkFcXj9u3bL3f8K6+8kidQ++pXv1q+b+nSpf//g6y3XnrxxRfzzOoVRRtX1E4AAACoU5XumGU8uo4vG2Q//fTT9I9//KM8AO+zzz6rPFfTpk3zGPAJEyZUCtHxuG/fvssd36VLl/Tss8/mSdxK29e+9rW033775d91HQcAAGCdrnRHwH377bfzRGYVRdfteC7GaK+OWC5s4MCBqXfv3qlPnz5p1KhRaf78+Xk28zBgwIDUsWPHPDY71vHeeeedK72+devW+eey+wEAAGCdC91lZWWpUaNGy+1/991385ju1XXMMcek2bNnp2HDhuXJ03r27JnGjx9fPrna9OnT84zmAAAAUG9Dd4zhDhG4jz/++Erdy6O6/e9//zvtueeea9SIWHIstqpMnDhxpa+97rrr1ug9AQAAoM6E7lhuq1Tp3mijjVKLFi0qjc3+/Oc/nwYNGlRMKwEAACjU1C5dU13RddrU1OBC97XXXpt/xpJgZ5xxxhp1JQcAAICGZL01mb0cAAAAqKHQvdtuu+VlvDbZZJO06667VjmRWsmUKVOqc0oAAACo96oVug899NDyidMOO+ywotsEAAAADSd0V+xSrns5AAAAVM9qL4A9Y8aM9MYbb5Q/njRpUjrttNPSVVddtbqnAgAAgHpttUP3N7/5zfTggw/m32fOnJn69euXg/dPfvKTdMEFFxTRRgAAAGgYofu5555Lffr0yb//6U9/St27d0+PPfZYuummm9J1111XRBsBAACgYYTuxYsXl0+qdv/996evfe1r+fcuXbqkt99+u+ZbCAAAAA0ldO+0005pzJgx6eGHH0733XdfOvDAA/P+t956K2222WZFtBEAAAAaRui+5JJL0pVXXpn23XffdOyxx6YePXrk/XfccUd5t3MAAACgmkuGlZSVlaVtt902TZ8+PX366adpk002KX/uu9/9bmrZsmURbQQAAID6X+mO0L3ddtvlWcsrBu7QuXPn1LZt25puHwAAADSM0N24ceO0/fbbp3fffbe4FgEAAEBDHdN98cUXpzPPPDMvHQYAAADU0JjuMGDAgLRgwYI8gVrTpk1TixYtKj3/3nvvre4pAQAAoF5a7dA9atSoYloCAAAADT10Dxw4sJiWAAAAQEMf0x1eeeWV9L//+795ne5Zs2blfffcc096/vnna7p9AAAA0HBC90MPPZS6d++e/vnPf6Zbb701ffTRR3n/M888k4YPH15EGwEAAKBhhO6zzz47XXjhhem+++7LE6mV7L///umJJ56o6fYBAABAwwndzz77bDr88MOX29+2bds0Z86cmmoXAAAANLzQ3bp16/T2228vt/+pp55KHTt2rKl2AQAAQMML3d/4xjfSWWedlWbOnJkaNWqUli5dmh599NF0xhln5DW8AQAAgDUM3RdddFHq0qVL6tSpU55ErVu3bmnvvfdOe+65Z57RHAAAAFjDdbpj8rSrr746nXvuuem5557LwXvXXXdN22+//eqeCgAAAOq11Q7djzzySPrCF76Qttpqq7wBAAAANdS9PJYG22abbdI555yTXnjhhdV9OQAAADQYqx2633rrrXT66aenhx56KO28886pZ8+e6dJLL01vvPFGMS0EAACAhhK627RpkwYPHpxnLH/llVfSUUcdla6//vrUuXPnXAUHAAAA1jB0VxTdzM8+++x08cUXp+7du+fqNwAAAPAZQ3dUuk8++eTUoUOH9M1vfjN3Nb/rrrvW9HQAAABQ76z27OVDhw5NN998cx7bfcABB6TLL788HXrooally5bFtBAAAAAaSuj+xz/+kc4888x09NFH5/HdAAAAQA2F7uhWDgAAANRQ6L7jjjvSQQcdlNZff/38+8p87Wtfq84pAQAAoN6rVug+7LDD0syZM1Pbtm3z7yvSqFGjtGTJkppsHwAAANTv0L106dIqfwcAAAAKWqcbAAAAqKGJ1KLKfd1116Vbb701vfbaa7k7+TbbbJO+/vWvp29/+9v5MQAAALCale6ysrI8SdpJJ52U3nzzzdS9e/e00047pddffz0df/zx6fDDD6/uqQAAAKBBqHalOyrcsUb3hAkT0n777VfpuQceeCBPsHbDDTekAQMGFNFOAAAAqL+V7j/+8Y/pnHPOWS5wh/333z+dffbZ6aabbqrp9gEAAED9D93//ve/04EHHrjC52Md72eeeaam2gUAAAANJ3S/9957qV27dit8Pp57//33a6pdAAAA0HBC95IlS9J66614CHiTJk3Sp59+WlPtAgAAgIYzkVrMXh6zlDdr1qzK5xcuXFiT7QIAAICGE7oHDhy4ymPMXA4AAABrELqvvfba6h4KAAAArM6YbgAAAGD1CN0AAABQEKEbAAAA6nPoHj16dOrcuXNq3rx52mOPPdKkSZNWeOytt96aevfunVq3bp022GCD1LNnz3TjjTeu1fYCAADAOhG6x40bl4YMGZKGDx+epkyZknr06JH69++fZs2aVeXxm266afrJT36SHn/88fTvf/87nXDCCXm7995713rbAQAAoE6H7pEjR6ZBgwbl4NytW7c0ZsyY1LJlyzR27Ngqj993333T4Ycfnrp27Zo+97nPpVNPPTXtsssu6ZFHHlnrbQcAAIA6G7oXLVqUJk+enPr16/d/DWrcOD+OSvaqlJWVpQkTJqQXX3wx7b333lUes3DhwjRv3rxKGwAAANT70D1nzpy0ZMmS1K5du0r74/HMmTNX+Lq5c+emDTfcMDVt2jQdcsgh6de//nU64IADqjx2xIgRqVWrVuVbp06davxzAAAAQJ3sXr4mNtpoo/T000+nJ598Mv3sZz/LY8InTpxY5bFDhw7NIb20zZgxY623FwAAgIZpvdp88zZt2qQmTZqkd955p9L+eNy+ffsVvi66oG+33Xb595i9fOrUqbmiHeO9l9WsWbO8AQAAQIOqdEf38F69euVx2SVLly7Nj/v27Vvt88RrYuw2AAAA1CW1WukO0TV84MCBee3tPn36pFGjRqX58+fn2czDgAEDUseOHXMlO8TPODZmLo+gfffdd+d1uq+44opa/iQAAABQx0L3Mccck2bPnp2GDRuWJ0+L7uLjx48vn1xt+vTpuTt5SQTyk08+Ob3xxhupRYsWqUuXLun3v/99Pg8AAADUJbUeusPgwYPzVpVlJ0i78MIL8wawSue1SnXGeXNruwUAANSCdXL2cgAAAFgXCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAD1eckwAKhNnc++K9UVrzWv7RYAADVJpRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgZi8HWAu6X9891QXPDny2tpsAANCgqHQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUJD1ijoxAHXP1C5dU13RddrU2m4CAEDhVLoBAACgICrdAADroM5n35Xqitea13YLAOoulW4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQn0P36NGjU+fOnVPz5s3THnvskSZNmrTCY6+++ur0xS9+MW2yySZ569ev30qPBwAAgAYbuseNG5eGDBmShg8fnqZMmZJ69OiR+vfvn2bNmlXl8RMnTkzHHntsevDBB9Pjjz+eOnXqlL785S+nN998c623HQAAAOp06B45cmQaNGhQOuGEE1K3bt3SmDFjUsuWLdPYsWOrPP6mm25KJ598curZs2fq0qVL+t3vfpeWLl2aJkyYsNbbDgAAAHU2dC9atChNnjw5dxEvb1DjxvlxVLGrY8GCBWnx4sVp0003rfL5hQsXpnnz5lXaAAAAoN6H7jlz5qQlS5akdu3aVdofj2fOnFmtc5x11llpiy22qBTcKxoxYkRq1apV+Rbd0QEAAKBBdC//LC6++OJ08803p9tuuy1PwlaVoUOHprlz55ZvM2bMWOvtBAAAoGFarzbfvE2bNqlJkybpnXfeqbQ/Hrdv336lr73sssty6L7//vvTLrvsssLjmjVrljcAAABoUJXupk2bpl69elWaBK00KVrfvn1X+Lqf//zn6ac//WkaP3586t2791pqLQAAAKxDle4Qy4UNHDgwh+c+ffqkUaNGpfnz5+fZzMOAAQNSx44d89jscMkll6Rhw4alP/zhD3lt79LY7w033DBvAAAAUFfUeug+5phj0uzZs3OQjgAdS4FFBbs0udr06dPzjOYlV1xxRZ71/Otf/3ql88Q63+edd95abz8AAADU2dAdBg8enLeqTJw4sdLj1157bS21CgAAABrw7OUAAABQlwndAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAqyXlEnhvpuapeuqa7oOm1qbTcBAACogko3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgvobu0aNHp86dO6fmzZunPfbYI02aNGmFxz7//PPpyCOPzMc3atQojRo1aq22FQAAANaZ0D1u3Lg0ZMiQNHz48DRlypTUo0eP1L9//zRr1qwqj1+wYEHadttt08UXX5zat2+/1tsLAAAA60zoHjlyZBo0aFA64YQTUrdu3dKYMWNSy5Yt09ixY6s8fvfdd0+XXnpp+sY3vpGaNWu21tsLAAAA60ToXrRoUZo8eXLq16/f/zWmceP8+PHHH6+tZgEAAECNWa+23njOnDlpyZIlqV27dpX2x+Np06bV2PssXLgwbyXz5s2rsXMDAABAnQzda8uIESPS+eefX9vNgAaj89l3pbritea13QIAABq6Wute3qZNm9SkSZP0zjvvVNofj2tykrShQ4emuXPnlm8zZsyosXMDAABAnQzdTZs2Tb169UoTJkwo37d06dL8uG/fvjX2PjHh2sYbb1xpAwAAgHrfvTyWCxs4cGDq3bt36tOnT153e/78+Xk28zBgwIDUsWPH3EW8NPnaCy+8UP77m2++mZ5++um04YYbpu222642PwoAAADUrdB9zDHHpNmzZ6dhw4almTNnpp49e6bx48eXT642ffr0PKN5yVtvvZV23XXX8seXXXZZ3vbZZ580ceLEWvkMAAAAUGcnUhs8eHDeqrJskO7cuXMqKytbSy0DAACAdXRMNwAAANR3QjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUJD1ijoxAABARZ3PvivVFa81r+0W0FCodAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAOpz6B49enTq3Llzat68edpjjz3SpEmTVnr8n//859SlS5d8fPfu3dPdd9+91toKAAAA60zoHjduXBoyZEgaPnx4mjJlSurRo0fq379/mjVrVpXHP/bYY+nYY49NJ554YnrqqafSYYcdlrfnnnturbcdAAAA6nToHjlyZBo0aFA64YQTUrdu3dKYMWNSy5Yt09ixY6s8/vLLL08HHnhgOvPMM1PXrl3TT3/607Tbbrul3/zmN2u97QAAAFBnQ/eiRYvS5MmTU79+/f6vQY0b58ePP/54la+J/RWPD1EZX9HxAAAAUFvWq7V3TinNmTMnLVmyJLVr167S/ng8bdq0Kl8zc+bMKo+P/VVZuHBh3krmzp2bf86bN68GPkEDsrAs1Rl15G/30ZIlqa6oS9fz0oULUl0xr1HduW6XfFw3rhfXbdVct1Vz3S7PdVs11+3yXLdVc91WzXVbt6/bVbWxrKys7obutWHEiBHp/PPPX25/p06daqU91ICLW9V2C+qeVr6TqtStb2Vqqgv6pDrEdVuluvWtuG6X47qtUt36Vly3y3HdVqlufSuu23X5uv3www9Tq5W0t1ZDd5s2bVKTJk3SO++8U2l/PG7fvn2Vr4n9q3P80KFD80RtJUuXLk3vvfde2myzzVKjRo1q5HOwdu8mxQ2TGTNmpI033ri2mwPV4rplXeS6ZV3kumVd5Lpdd0WFOwL3FltssdLjajV0N23aNPXq1StNmDAhz0BeCsXxePDgwVW+pm/fvvn50047rXzffffdl/dXpVmzZnmrqHXr1jX6OVj74l9I/qXEusZ1y7rIdcu6yHXLush1u25aWYW7znQvjyr0wIEDU+/evVOfPn3SqFGj0vz58/Ns5mHAgAGpY8eOuZt4OPXUU9M+++yTfvGLX6RDDjkk3Xzzzelf//pXuuqqq2r5kwAAAEAdC93HHHNMmj17dho2bFieDK1nz55p/Pjx5ZOlTZ8+Pc9oXrLnnnumP/zhD+l///d/0znnnJO233779Ne//jXtvPPOtfgpAAAAoA6G7hBdyVfUnXzixInL7TvqqKPyRsMTQwWGDx++3JABqMtct6yLXLesi1y3rItct/Vfo7JVzW8OAAAArJH/67cNAAAA1CihGwAAAAoidAMAAEBBhG7WGaNHj06dO3dOzZs3T3vssUeaNGlSbTcJVuof//hH+upXv5q22GKL1KhRo7zSAtRlsTzn7rvvnjbaaKPUtm3bdNhhh6UXX3yxtpsFK3XFFVekXXbZpXyN4759+6Z77rmntpsFq+Xiiy/O/61w2mmn1XZTKIDQzTph3LhxeU33mNlxypQpqUePHql///5p1qxZtd00WKH58+fnazVuGMG64KGHHkqnnHJKeuKJJ9J9992XFi9enL785S/naxnqqi233DIHlsmTJ6d//etfaf/990+HHnpoev7552u7aVAtTz75ZLryyivzzSPqJ7OXs06IynZUX37zm9/kx0uXLk2dOnVKP/jBD9LZZ59d282DVYq717fddluuHMK6Yvbs2bniHWF87733ru3mQLVtuumm6dJLL00nnnhibTcFVuqjjz5Ku+22W/rtb3+bLrzwwtSzZ880atSo2m4WNUylmzpv0aJF+e51v379yvc1btw4P3788cdrtW0A9dncuXPLAwysC5YsWZJuvvnm3DsjuplDXRe9iw455JBK/51L/bNebTcAVmXOnDn5/0TbtWtXaX88njZtWq21C6A+ix5FMbZwr732SjvvvHNtNwdW6tlnn80h+5NPPkkbbrhh7lnUrVu32m4WrFTcIIphk9G9nPpN6AYAqqy+PPfcc+mRRx6p7abAKu24447p6aefzr0zbrnlljRw4MA8LELwpq6aMWNGOvXUU/P8GTFJMPWb0E2d16ZNm9SkSZP0zjvvVNofj9u3b19r7QKorwYPHpzuvPPOPAN/TFIFdV3Tpk3Tdtttl3/v1atXrhxefvnleXIqqIti6GRMCBzjuUuiZ2f8ezfmMFq4cGH+71/qB2O6WSf+jzT+D3TChAmVuj3GY+O1AGpOzK0agTu65j7wwANpm222qe0mwRqJ/06I0AJ11Ze+9KU8LCJ6aJS23r17p+OOOy7/LnDXLyrdrBNiubDoKhb/MurTp0+e1TEmSTnhhBNqu2mw0hlJX3755fLHr776av4/0piUaquttqrVtsGKupT/4Q9/SLfffnteq3vmzJl5f6tWrVKLFi1qu3lQpaFDh6aDDjoo/3v1ww8/zNfwxIkT07333lvbTYMVin/HLjtfxgYbbJA222wz82jUQ0I364RjjjkmL10zbNiw/B+BsZzC+PHjl5tcDeqSWC92v/32q3TzKMQNpOuuu64WWwZVu+KKK/LPfffdt9L+a6+9Nh1//PG11CpYueiiO2DAgPT222/nG0Sx1nEE7gMOOKC2mwaQWacbAAAACmJMNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBoA66vjjj0+NGjXK2/rrr5+22Wab9OMf/zh98skntd00AKCa1qvugQDA2nfggQema6+9Ni1evDhNnjw5DRw4MIfwSy65pLabBgBUg0o3ANRhzZo1S+3bt0+dOnVKhx12WOrXr1+677778nNLly5NI0aMyBXwFi1apB49eqRbbrml/LXvv/9+Ou6449Lmm2+en99+++1zgA+vvfZaDu8333xz2nPPPVPz5s3TzjvvnB566KFK7x+P+/Tpk9vRoUOHdPbZZ6dPP/20/Pl99903/fCHP8wV+E033TS39bzzzit/vqysLD/eaqut8jm22GKLfHzJwoUL0xlnnJE6duyYNthgg7THHnukiRMnFvqdAsDaJHQDwDriueeeS4899lhq2rRpfhyB+4YbbkhjxoxJzz//fPrRj36UvvWtb5UH53PPPTe98MIL6Z577klTp05NV1xxRWrTpk2lc5555pnp9NNPT0899VTq27dv+upXv5refffd/Nybb76ZDj744LT77runZ555Jr/+mmuuSRdeeGGlc1x//fU5MP/zn/9MP//5z9MFF1xQfmPgL3/5S/rlL3+ZrrzyyvTSSy+lv/71r6l79+7lrx08eHB6/PHHc/j/97//nY466qhc3Y9jAaA+aFQWt6ABgDo5pvv3v/99rkJHdTmqwo0bN05/+tOf0le+8pVcWb7//vtzWC456aST0oIFC9If/vCH9LWvfS2H7LFjxy537qh0R4X84osvTmeddVbeF+8R+37wgx/kyvVPfvKTHJojsEdVPPz2t7/Nx8+dOze3JSrdS5YsSQ8//HD5uaMyvv/+++dzjxw5MgfuuGEQ49Irmj59etp2223zz6iAl0Q1P85x0UUXFfK9AsDaZEw3ANRh++23X64wz58/P1eM11tvvXTkkUfmynaE6wMOOKDS8YsWLUq77rpr/v373/9+PnbKlCnpy1/+cu6eHl3JK6oY2OPcvXv3ziE7xM94vhS4w1577ZU++uij9MYbb+Qu42GXXXapdM7ohj5r1qz8e1SuR40alcN1VLCjch7V9HivZ599Ngf2HXbYodLr4+bCZpttVkPfIADULqEbAOqw6La93Xbb5d+jYh3jtqOLd4y/DnfddVceD11RjJ0OBx10UHr99dfT3Xffnbt7f+lLX0qnnHJKuuyyy2q0jctWsCOkx3jzEGPRX3zxxVyRjzacfPLJ6dJLL81d4CO8N2nSJE8QFz8r2nDDDWu0jQBQW4zpBoB1RHTnPuecc9L//u//pm7duuVwHV2zI5RX3CLolsQkajHjeXRTj4rzVVddVemcTzzxRPnv0b08AnDXrl3z4/gZ460rjkR79NFH00YbbZS23HLLarc7JnGL6vavfvWrPElanDOq3FGRj0p3VMWX/QwxIRsA1Acq3QCwDonu2jH5WYyTjlm/Y/K0qCp/4QtfyOOsIxRvvPHGOWgPGzYs9erVK+200065y/add95ZHqhLRo8enWc1j/3RfT1mPP/Od76Tn4uqdAT1GOMdE55FxXr48OFpyJAh+QZAdVx33XU5WMes5C1btszhP0L41ltvnbuQx+zqAwYMSL/4xS9yCJ89e3aaMGFC7rJ+yCGHFPIdAsDaJHQDwDokxkJHAI5Zwl999dVcyY5ZzP/73/+m1q1bp9122y1Xw0PMcj506NA8aVoE3S9+8Yt5lvCKYrKz2J5++ulcYb7jjjvKZziPbuvRNT1CfnRrj4nbTjzxxFxpr65oU5w/gnqE75i5/G9/+1v5mO1YwixmQ48Z1GO29Hjvz3/+83miOACoD8xeDgANUGn28lgqrGfPnrXdHACot4zpBgAAgIII3QAAAFAQ3csBAACgICrdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAACQivH/AKoDC46N7J+pAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA90AAAJOCAYAAACqS2TfAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAUZ5JREFUeJzt3QeYVOX5N+CXIk0RVEREUTRqAEFUEIMaK4olxhY1agSNmqIk9gQ0gtiNJZjYG5ZoNLFXjKLYsLdYwNgQLIgt2Kn7Xc/7XbP/XViqe9h239c12Z0zZ868M3Mi+zvPWxqVlZWVJQAAAKDaNa7+QwIAAABB6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBoA5NGrUKJ144omprrv22mtTly5d0lJLLZXatm1brcc+4IADUufOnVNtNWbMmPw9xs+Gqr6cxwB1ndANwFzeeuut9Otf/zqtueaaqUWLFmnZZZdNm266aTrvvPPSt99+W9PNYyGMHz8+B+Mf/OAH6bLLLkuXXnrpPPeNYBYBrXRr1apVWm211dLOO++cRo4cmaZNm5bqg+uvvz6NGDGi2o9b+twOPvjgKh8//vjjy/f55JNPFvn4Y8eOzd/R//73v2poLQBLWqOysrKyJf6qANRad999d9pzzz1T8+bN04ABA1L37t3T9OnT02OPPZZuvvnmHOTmF+Dqg++++y41bdo03+qqiy++OP32t79Nb7zxRlprrbXmu28EuuHDh6eLLrooLbPMMjlkv//+++m+++7LgW+99dZLd911V+rUqVP5c2bMmJFmz56dz5PaKNoW522zZs1S48b/v8bwk5/8JL3yyitpwoQJ1fpaEabj4lTcPvroo/yaFcXFqw8//DCfVx9//HFq167dIh3/7LPPTscee2x65513Fql3QX04jwHqA/8VBqBc/FH/85//PK2++urpwQcfTCuvvHL5Y4cddlh68803cyivj0ohrRSe6ropU6bkn4vSrfxnP/tZpUA4dOjQdN111+WLL3Eh5sknnyx/LLqs1+R3tCARtJfk97j99tunO+64I917771pl112Kd8eFy3i/1d77LFHvmhVtPp2HgPUB7qXA1Duz3/+c/rqq6/SFVdcUSlwl0TF9PDDDy+/P3PmzHTyySfnLsxR8Ywq3HHHHTdXd+TYHlXGGF/bu3fv1LJly9SjR4/y8ba33HJLvh8hoVevXumFF16o9PyorkcF9u233079+/dPSy+9dOrYsWM66aST0pwdtqIquMkmm6QVVlghv04c76abbqqyOjlo0KAcKtddd93c/lGjRlU5FvbLL79MRxxxRH4fsV/79u3Ttttum55//vlKx/zXv/6VXy9eN8LrL37xi1wxruq9xPZdd901/77iiiumY445Js2aNWuhvqcLL7ywvM3xOcQFkYpdj6Odw4YNy7/Hsb/P2N799tsvd5t+6qmn0v3331/pfZSqrlH1Xn755dOBBx441/O/+OKL/L3G+yuJ8yPaF+dTvIeooP/hD3+Y67yZ33d0ww035M+6devWefhDnD8x/GFeY7q33HLLfMHo3XffLe/qHe2P8z3Op4rndcl7772XmjRpkk4//fQFfk6rrLJK2nzzzXMX9oqi7dG26DFSlfhcI7C3adMmd+vfYost0uOPP17+eHxvUeUOa6yxRnnbS9X6RTmPQ5x3Bx10UD5vYt84ZvSIiKBe+i6j18Paa6+dv7f4/9Fmm21W6bsHYNGodANQ7s4778xdYSO0LowIY1dffXWukB599NE5QERAGTduXLr11lsr7RtV8n333TePFY8wGuE4xgxHN+gI6oceemjeL56/1157pddff728W3CIQBrh5Ec/+lG+OBDBIoJbBP8I3yURvH7605/msBhBIsJZVGmje/ROO+1UqU1Rzf/nP/+ZQ0uE5Hl13f3Nb36Tg3vs161bt/Tpp5/m7vbxPjfccMO8z1VXXZVD50YbbZTfQ3QzjrZEgIqLCBUrzvFe4uLBxhtvnD+HBx54IJ1zzjn54kUEoIXpCt6vX7+8b3xO0S38mWeeya8VFegYt3zNNdfk76DUZTy6iC+u/fffPw8p+Pe//50vNswpXnO33XbLF08uueSSSt2rb7vtthymowdFqRIb3098fr/61a9S165d08svv5z+8pe/pP/+9795/wV9RxEA99lnn7TNNtukM888M+8X30W8/6rCc2lc9dSpU3OQjtcK8bnELdp+4403pnPPPTeH7JJ//OMf+aJOnEsLI87veP0I8nHcODfjQsxRRx2Vu3rPKd7bDjvskC8exLkc53uMod96663To48+mvr06ZN23333/LlEW6LdpZ4IcTFlfp9RVT744IN8zLhAE599TLIXITzO7W+++SZ/b3F+xfkb/9+OfeOiybPPPpsvMFX13QOwEGJMNwBMnTo1SsZlu+yyy0Lt/+KLL+b9Dz744ErbjznmmLz9wQcfLN+2+uqr521jx44t33bfffflbS1btix79913y7dfcskleftDDz1Uvm3gwIF52+9+97vybbNnzy7baaedypo1a1b28ccfl2//5ptvKrVn+vTpZd27dy/beuutK22P4zVu3Ljs1Vdfneu9xWPDhg0rv9+mTZuyww47bJ6fRbxG+/bt8+t8++235dvvuuuufKyhQ4fO9V5OOumkSsfYYIMNynr16jXP1whTpkzJ73e77bYrmzVrVvn2888/Px/zyiuvLN8W7Y9tFT+beVnQvp9//nl+fLfddqv0PuJ7nfP7vPPOOys9d8cddyxbc801y+9fe+21+XN/9NFHK+138cUX5+c//vjjC/yODj/88LJll122bObMmfN8T3H+zHkexflSsc1ztv3ee++ttH299dYr22KLLeb5GhXbGefHZ599lr+feI/h7rvvLmvUqFHZhAkT5vqM4/xde+21y/r3759/r3j+rrHGGmXbbrtt+bazzjorP/edd96p8rUX9jweMGBA3veZZ56Za99SG3r27Jk/JwCqj+7lAGRR0QrRXXdh3HPPPflnVPEqiop3mHPsd1SI+/btW34/qrwhqnoxU/ac26Mr+ZyikldS6lYb1eyoFJdE1+6Szz//PFc3f/zjH8/VFTxEV95o14JElTqq+FEprEpUAmMMdVTrK46jjcp6VBOrGgcf1fOKoo1VveeK4n3G+42u7hV7ARxyyCG5i3VR4+2jalvqZj8v8T1GlTUqxhU//6hK77333uXbovIb1e34XGIm79Itnh8eeuihBX5H8X18/fXX1dblOXoNRHfr6KJdEhOu/ec//8m9MhbWcsstl3tjRFU6RFfz6DUScyTM6cUXX8yT3EV1PHpOlD6HeF9RwX/kkUdyr4CFsTDncRwrehFE75IY4jGn+P9T6bN99dVXc9sAqB5CNwBZhLYFBauKYmxsBL85Z8bu0KFD/sM9Hq+oYrAOMYY1VJwRu+L2CGwVxWtF1/eK1llnnfyz4mzU0Y08uqBH+I1xxtENN7pYR/ieU4xnXRjRnT1CWLQ1utxGF9yKAbn0Xn/4wx/O9dwIl3N+FtG2it2DS4Ftzvc8p3m9TnQLjs9mztepLtFdekEXZGKG7Jgs7Pbbby8fmx3dzWOMcMXQHWEuQl28/4q30ndZmgBuft9RXNyI/aNr9qqrrpp++ctflo9jXhxxbkUX8gil0c06RACP7ymGJiyKCNFxMWDixIn5eHG/KqVQO3DgwLk+i8svvzx/hlWds1VZmPM4Zk2PC2vzGlteEkM1ovt5fL4xFj3Gk8fFBwAWn9ANQHnojmpfhMtFUaqQLUjFsbILs31xVrSMcbAxXjjCUkw2FtX4CEARfKo6XsWq+PzEGPMI2X/729/yZ3TWWWflSatipurFMa/3XFuVzokFLT0W47bjok3pc4lxxnHRoWfPnpUqrhHm4nup6lYa2z+/7ygmsotKccwWHt93VMcjgEeAXVwxQ3tcXIigHOdKVKlj8r/SRaCFFe2JCcqiLRGc49ypSqmKHefSvD6LUg+DBVnY83hhxGRwb731VrryyitzQI8LADFvQfwEYPGYSA2AchEyYsKsJ554olJX8KpEl9kIDlGxi+7CJTGBWFTKqupS+33Ea0XwLVVEQ0wwFUoTR8WSTBG4Y33piutHx+RU31fM5h6BMG5RjY0gcuqpp+awV3qvMalZqZt0SWyrrs+i4utUrPpHl/NYliq6SRfh2muvzT9j8rcFBbb4nKKLecx4HRN8xQRmFcVkcS+99FLuQr2wF2yqEtX96Codtzg34nuJSdxOOOGEeV4cmN/rRcDcYIMNcoU7qudRqY6LLIsqAnDMSv/3v/89nxvzWpM7PofSxa4FfW/f53MqiQp6vNbCXFQrzUQft7gQEd9r9O6IydUAWHQq3QCUi2WbYvmk+OM6wvOcogJWWpZpxx13zD9jpuyKYgboMOdM4dXh/PPPL/89qpFxP2bOjgBXqiBHQKm49FZ0PZ9zRuxFEceas5tvVFqj4l3qRh1jZGNbzMRecdmrqPjGrNrV9VlEOIuw+de//rVS5T6WeIs2FvGZR8U3qpxxEab0Oc+vm3bMZB+z4EdQj9m7K3YtD1H5jRmzL7vssrme/+233+YxzQsSY6DnfN3S7OxzLjtWUZzb8+uyHbO0xwztcU7HUlkRmhdHLI8Ws5HHBYB5iRnLI3jH7PWl7vtzdgev2O5QcVm4RRWfUVwMiO8m5iCYU+l8mvOzjWp7XMSY3+cKwPypdANQLkJAhKwISlG9ji63UQGMSurYsWPzJFixPnOILsPRhTYq4xEGYjKnp59+Oi8hFn/cb7XVVtXatqhgx7jdeM2YbC0CbUwcFsuNlcZHR+iM0B+TWUWX8qhIX3DBBTk0LO641OguHZXPCJPxniOExIRmsURXLPMVIvjH0lVRGYzPIZazKi0ZFlX4I488slo+g3ifQ4YMyUuGxXuMrsxR9Y6u9LFU2aJM+lWVWDoq3l983xGMo8dALMMV7zu++4UR505UiCN0Rjfyir0gSsE2up3HRHLRLXzTTTfNFzbGjx+ft8drVjXRV0VxUeizzz7LvQriu4mx7PGa66+//lyvN2fQjSp8TP4Xn1e816iUl8Q5ExeeYqm1WI4tvtfFEZ9XxS718wrBcTEjgn0MVYhzJ9b6js89PpeoSkdALrU7RK+B6MIf7Yp2l8L4wjrttNPyRYU4R0vLtX344Yf5u40l3GIuhpiQLdY0j9eMincE9NJyeQAspmqcCR2AeuK///1v2SGHHFLWuXPnvARS69atyzbddNOyv/3tb2Xfffdd+X4zZswoGz58eF7iaKmllirr1KlT2ZAhQyrtE2KZpqqWISottVRRLIsU22OZpIrLUy299NJlb731Vl4uq1WrVmUrrbRSXg6p4tJZ4YorrshLMTVv3rysS5cuZSNHjixfrmlBr13VUkvTpk0rO/bYY/NSSvE5RDvi9wsvvHCu591444156a947eWXX75sv/32K3vvvfcq7VN6L3Oqqo3zEkuExXuLzzw+h9/+9rd5Wa+qjrcoS4aVbi1atChbddVVy37yk5/kZcjm/D5L76Oq5bdi6ak4D+I4p5xyyjyXWDvzzDPL1l133fxZLbfccnm5tDiXYum6BX1HN910Uz4PYpm2OD9XW221sl//+tdlH3744XyXDPvqq6/K9t1337K2bdvmx6pqfyxxNufydgsyv3NpQd/HCy+8ULb77ruXrbDCCvmziDbttddeZaNHj66038knn1y2yiqr5CW/Ki4ftrDncUkszxdLh6244or59WI5t3h+nOchvrM+ffrkzyiW84vz7NRTT83fGQCLp1H8z+IGdgBYEqK6HtW2qrrhQnXabbfd0ssvv5zefPPNmm4KAPWEMd0AACnlrtYxZCG6wANAdTGmGwBo0GLm9xi7HmOsY7z0r3/965puEgD1iEo3ANCgPfzww7m6HeE7JgLs0KFDTTcJgHrEmG4AAAAoiEo3AAAAFEToBgAAgII0uInUZs+enT744IPUunXr1KhRo5puDgAAAHVQjNT+8ssvU8eOHVPjxvOuZze40B2Bu1OnTjXdDAAAAOqBSZMmpVVXXXWejze40B0V7tIHs+yyy9Z0cwAAAKiDvvjii1zQLWXMeWlwobvUpTwCt9ANAADA97GgYcsmUgMAAICCCN0AAABQEKEbAAAACtLgxnQDwLyWlJw+fXpNN4NabKmllkpNmjSp6WYAUMcI3QA0eBG233nnnRy8YX7atm2bOnTosMBJcwCgROgGoEErKytLH374Ya5gxrIfjRsbeUXV58k333yTpkyZku+vvPLKNd0kAOoIoRuABm3mzJk5THXs2DG1atWqpptDLdayZcv8M4J3+/btdTUHYKG4nA9AgzZr1qz8s1mzZjXdFOqA0oWZGTNm1HRTAKgjhG4ASMkYXRaK8wSARSV0AwAAQEGEbgCgSldddVWerRsAWHwmUgOAKnQefPcSfb0JZ+y0WM+bPHlyOvXUU9Pdd9+d3n///TzB1/rrr5+OOOKItM0223yvNu29995pxx13TEUbM2ZM2mqrrdLnn38u5ANQ7wjdAFBHTZgwIW266aY5qJ511lmpR48eeYKv++67Lx122GFp/Pjx33u27tKM3QDA4tG9HADqqEMPPTRP7PX000+nPfbYI62zzjpp3XXXTUcddVR68skn8z4TJ05Mu+yyS1pmmWXSsssum/baa6/00UcflR/jpZdeylXm1q1b58d79eqVnn322Sq7l5944om5in7ttdemzp07pzZt2qSf//zn6csvvyzfZ/bs2en0009Pa6yxRg7sPXv2TDfddNMiva/S68bFg65du+a2b7/99nk99fDvf/87tWjRIv3vf/+r9LzDDz88bb311ov5aQJAMYRuAKiDPvvsszRq1Khc0V566aXnejxCawTgCNyx78MPP5zuv//+9Pbbb+du4yX77bdfWnXVVdMzzzyTnnvuuTR48OC01FJLzfN133rrrXTbbbelu+66K9/iuGeccUb54xG4r7nmmnTxxRenV199NR155JHpF7/4Rd5vUcTa6WeffXYO+I888ki+eHDMMcfkx6LbfLy/m2++udLSbzfeeGN+PwBQm+heDgB10JtvvpnKyspSly5d5rnP6NGj08svv5zeeeed1KlTp7wtAnFUwyNkb7TRRjnMHnvsseXHWXvttef7uhHkoxIdlfGw//7759eJceXTpk1Lp512WnrggQdS37598+Nrrrlmeuyxx9Ill1yStthii4V+f9FNPoL7D37wg3x/0KBB6aSTTsq/N2nSJFfYr7/++nTQQQeVv9eofEfFHwBqE5VuAKiDInAvyLhx43LYLgXu0K1bt1wljsdCdEU/+OCDU79+/XLFOirZ8xPdykuBO6y88sppypQp5RcCokK97bbb5i7hpVsE/dJxI/CXtu+www7zfJ1WrVqVB+45XydERTsmYPvggw/y/euuuy7ttNNOJmIDoNap0dAd3cV23nnn1LFjxzwmLbqrLUj8A7vhhhum5s2bp7XWWitfbQeAhiYq0vFv5/edLC3GaUc38AisDz74YA7lt9566zz3n7PrebQhqt/hq6++yj9jJvUXX3yx/Pbaa6+Vj+u+5557yrdffvnli/Q6FS80RJU+QvkNN9yQvv3229xmXcsBqI1qNHR//fXXeYKVCy64YKH2j+5x8UdBTPgS/1jHcihxdT4mWgGAhmT55ZdP/fv3z/+Gxr+nc4qu1jEJ2aRJk/KtJAJwPBbhuiQmYIux1zFB2e67755Gjhy5WG2KY8ZF8eiyHhfGK95K1fbVV1+9fNsqq6ySvo8I2VHhvvPOO1Pjxo3z3wgAUNvU6Jju6FY2v65lc4qxXTEb6jnnnJPvxx8TMU7sL3/5S/7DAwAakgjcsWRYnz598njn9dZbL82cOTNPmHbRRRflgB3LiEU4HTFiRH4sZjyPsdW9e/fOFeIYz/2zn/0s//v63nvv5bHeizsuOrqdx2RnEeCj+r3ZZpulqVOnpscffzzPjD5w4MBqff/xvqJSH+PJ4z1E4AeA2qZOTaT2xBNP5DFnFUXYjor3vMSkLnEr+eKLLwptIwAsKTFJ2fPPP59D59FHH52X1FpxxRXzsl8RuqNL9u23355+97vfpc033zxXg2Pprb/97W/lE5J9+umnacCAAXkZsXbt2uVK9/Dhwxe7TSeffHJuQ8xiHjOlxxjrGBZ23HHHpeoW1fK44BBLpsVFBQCojRqVLcxMLEtA/GEQ47F23XXXee4T3d8OPPDANGTIkPJtMTYsupPFxC2xHuic4gp4VX88xJX3uOpem3UefHeqLSacocseUD999913efhSVHpj7WeYH+cLABULum3atFlgtqz3s5dHQI8PoXSrOK4NAAAAilSnupd36NAhd3+rKO7HVYWqqtwhxncZ4wUAAEBNqFOV7r59+6bRo0dX2haTxcR2AAAAqG1qNHTHep6ltTpDjJGK32OpkVLX8JjcpeQ3v/lNnpTlD3/4Q16X9MILL0z//Oc/8yypAAAAUNvUaOh+9tln0wYbbJBv4aijjsq/Dx06NN+PWVhLATzEpCV33313rm7H+t6xdNjll19uuTAAAABqpRod073lllum+U2eftVVV1X5nBdeeKHglgEAAEADG9MNAAAAdYnQDQAAAAURugEAAKAgQjcAUC907tw5jRgxoqabAQC1ZyI1AKi1TmyzhF9v6mI9bfLkyenUU0/Nq3u8//77qX379mn99ddPRxxxRNpmm21SQ/LMM8+kpZdeuqabAQCVCN0AUEdNmDAhbbrppqlt27bprLPOSj169EgzZsxI9913XzrssMPS+PHjU20SbVtqqaUKO/6KK65Y2LEBYHHpXg4AddShhx6aGjVqlJ5++um0xx57pHXWWSetu+666aijjkpPPvlk3mfixIlpl112Scsss0xadtll01577ZU++uij8mOceOKJuTJ+5ZVXptVWWy3vF8edNWtW+vOf/5w6dOiQq+dRTa8oXveiiy5KO+ywQ2rZsmVac80100033VTpgkDsc+ONN6YtttgitWjRIl133XX5scsvvzx17do1b+vSpUu68MILy583ffr0NGjQoLTyyivnx1dfffV0+umn58dimdFob7SzefPmqWPHjun3v//9PLuXL+x7v/baa/Nz27Rpk37+85+nL7/8spq/KQAaMpVuAKiDPvvsszRq1KgchqvqUh3V79mzZ5eHzocffjjNnDkzV8D33nvvNGbMmPJ933rrrXTvvffm48XvP/vZz9Lbb7+dQ3w8b+zYsemXv/xl6tevX9p4443Ln3fCCSekM844I5133nk5uEZgffnll3OgLhk8eHA655xz0gYbbFAevIcOHZrOP//8vO2FF15IhxxySH4PAwcOTH/961/THXfckf75z3/mcD1p0qR8CzfffHP6y1/+km644YZ8cSG61r/00ktVfj6L8t5vu+22dNddd6XPP/88B/N4T3NeZACAxSV0A0Ad9Oabb+bKb1SK52X06NE5BL/zzjupU6dOeds111yTA2uMf95oo43KA2pUulu3bp26deuWttpqq/T666+ne+65JzVu3Dj98Ic/TGeeeWZ66KGHKoXuPffcMx188MH595NPPjndf//96W9/+1ulynWMLd99993L7w8bNiyH8NK2NdZYI7322mvpkksuyaE7qtNrr7122myzzXKlPCrdJfFYVN4j/Ec39Qjlffr0+d7v/aqrrsrvPey///75uUI3ANVF93IAqIMicC/IuHHjcuAshc4QoTqq4PFYSXStLoXOsNJKK+X9InBX3DZlypRKx+/bt+9c9yseN/Tu3bv896+//jpXlg866KBcgS7dTjnllLw9HHDAAenFF1/MQT+6jv/73/+uFPK//fbb3JU9quO33nprrmBX53uPbu1zvk8A+D6EbgCog6IaHJXg6pgsbc7JzeK4VW2LqvCiqtj1/auvvso/L7vsshysS7dXXnmlfAz6hhtumKvTUTmPgB3dvaO7e4gAHRX4qKTHOPIYe7755pvnCdoWV3W9TwCYF6EbAOqg5ZdfPvXv3z9dcMEFuYI8p//97395bHXFMdEhunLHY1H1/b5KQbni/YrjuecU1fKY/CzGi6+11lqVbtHNvCQmPYux1xHOYyK2GMsdY9hDhO2dd945j/2OsdlPPPFE7kY+p6LfOwAsLGO6AaCOisAdS4bFuOaTTjoprbfeerm7dYytjpnFI2TGMmL77bdfntU7HovqcMwmXrHb9+L617/+lY8T469jgrSYRf2KK66Y73OGDx+eu43HTOHbb799mjZtWnr22WfzJGYx6/q5556bu3jHJGvRvT1eI8ZxR7fwGHsds6rHuPJWrVqlv//97zmEVxz3XRLjvot87wCwsFS6AaCOirHNzz//fJ747Oijj07du3dP2267bZ4ILEJ3dJW+/fbb03LLLZe7YUcQjedE9bg6RICOmcQj7MckZf/4xz8WWEWOiddiybCRI0fmUBwhOMJ0qdId46tjqbIIxjHZWSw9VprQLYJ3VL/jQkO85gMPPJDuvPPOtMIKK8z1OkW/dwBYWI3KFmYmlnrkiy++yFfXp06dmruv1WadB9+daosJZ+xU000AKMR3332XxxBH6IslrVg4EWpjIrNdd901NSTOFwAWNVvqXg4AACwRiko0RLqXAwAAQEFUugGARdbARqcBwGJT6QYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBoIFo1KhRuu222/LvEyZMyPdffPHFmm4WANRr1ukGgCr0uLrHEn29lwe+vEj7H3DAAenqq6+ea3v//v3TqFGjFvj8Tp06pQ8//DC1a9cu3x8zZkzaaqut0ueff57atm27SG0BAOZN6AaAOmr77bdPI0eOrLStefPmC/XcJk2apA4dOhTUMgCgRPdyAKijImBHcK54W2655fJjb7zxRtp8881TixYtUrdu3dL9999f6bkVu5fH71HlDvH82B6VdADg+1PpBoB6Zvbs2Wn33XdPK620UnrqqafS1KlT0xFHHDHfruY333xz2mOPPdLrr7+ell122dSyZcsl2mYAqK+EbgCoo+666660zDLLVNp23HHHpd69e6fx48en++67L3Xs2DFvP+2009IOO+wwz67myy+/fP69ffv2xnQDQDUSugGgjoou4RdddFGlbRGer7322ly9LgXu0Ldv3xpoIQAgdANAHbX00kuntdZaq6abAQDMh4nUAKCe6dq1a5o0aVJeEqzkySefnO9zmjVrln/OmjWr8PYBQEOi0g0AddS0adPS5MmTK21r2rRp6tevX1pnnXXSwIED01lnnZW++OKLdPzxx8/3WKuvvnqetTzGie+44455IrU5x4sDAItOpRsA6qhRo0allVdeudJts802S40bN0633npr+vbbb1OfPn3SwQcfnE499dT5HmuVVVZJw4cPT4MHD86zng8aNGiJvQ8AqM8alZWVlaUGJK72t2nTJi+fEkui1GadB9+daosJZ+xU000AKMR3332X3nnnnbTGGmvkNa1hfpwv8P34+5aGmC1VugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwCwREyYMCE1atQovfjii/PcZ8yYMXmf//3vf9/rtTp37pxGjBjxvY4BANWhabUcBQDqmXFdui7R1+s6ftwi7X/AAQekq6++eq7t/fv3T6NGjarGlgEA34fQDQB11Pbbb59GjhxZaVvz5s1rrD0AwNx0LweAOioCdocOHSrdlltuufxYdNG+/PLL02677ZZatWqV1l577XTHHXeUP/fzzz9P++23X1pxxRVTy5Yt8+MVA/ykSZPSXnvtldq2bZuWX375tMsuu+Tu4RUr7bvuums67bTT0korrZT3O+mkk9LMmTPTsccem5+z6qqrznVRIIwfPz5tsskmqUWLFql79+7p4Ycfnu/7fOyxx9KPf/zj3M5OnTql3//+9+nrr78uf3zKlClp5513zo+vscYa6brrrvveny0AVBehGwDqqeHDh+fg/J///CftuOOOOWR/9tln+bETTjghvfbaa+nee+9N48aNSxdddFFq165dfmzGjBm5m3rr1q3To48+mh5//PG0zDLL5Mr69OnTy4//4IMPpg8++CA98sgj6dxzz03Dhg1LP/nJT3Lwf+qpp9JvfvOb9Otf/zq99957ldoVofzoo49OL7zwQurbt28OzJ9++mmV7+Gtt97Kr7vHHnvk93HjjTfmED5o0KBKFwDiIsFDDz2UbrrppnThhRfmIA4AtYHQDQB11F133ZXDcMVbVJ4rhtF99tknrbXWWnn7V199lZ5++un82MSJE9MGG2yQevfunScd69evXw6/IYLt7Nmzc6W8R48eqWvXrrliHc+Jic5Kopr917/+Nf3whz9Mv/zlL/PPb775Jh133HG5cj5kyJDUrFmzHJIrisAcITqOG2G/TZs26YorrqjyPZ5++un5YsERRxyRjxkV8njNa665Jn333Xfpv//9b75wcNlll6Uf/ehHqVevXvlY3377bUGfOgAsGmO6AaCO2mqrrXJorSiCcMl6661X/vvSSy+dll122fIK8G9/+9scfJ9//vm03Xbb5a7iEWjDSy+9lN58881c6a4oQm5UnkvWXXfd1Ljx/12/j27m0V28pEmTJmmFFVaYq+oc1e2Spk2b5uAf1faqRFuiwl2xy3hZWVm+KPDOO+/k0B3HiLBd0qVLl9zdHQBqA6EbAOqoCNJRxZ6XpZZaqtL9GOcdYTXssMMO6d1330333HNPuv/++9M222yTDjvssHT22WfniniE2KrGRscY8Pkdf36vuTiiLdFFPcZxz2m11VbLoRsAajPdywGggYoAPXDgwPT3v/89r2l96aWX5u0bbrhheuONN1L79u1zqK94i67g39eTTz5Z/ntMvPbcc8/lruZVibbE2PM52xG36LoeVe3SMUpef/31773ONwBUF6EbAOqoadOmpcmTJ1e6ffLJJwv13KFDh6bbb789dyN/9dVX8/jwUvCNMdQxqVrMWB4TqUU37hjLHdXmOSdFWxwXXHBBuvXWW/Ms5lFdj5nUY0x4Vf74xz+msWPH5nHgL774Yr4YEO0uTaQW48hjorWohsfkbRG+Dz744DyTOQDUBkI3ANRRo0aNSiuvvHKl22abbbZQz40qcUx0FuO+N9988zz++oYbbsiPxRJjMSN5dN/efffdcxg/6KCD8pjuGBf+fZ1xxhn51rNnzzzJWixlVpo5fU7RvlhSLLqRx7JhMflbXDDo2LFj+T4xyVvc32KLLXJ7f/WrX+UqPQDUBo3KYjaSBuSLL77IXeOmTp1aLX84FKnz4LtTbTHhjJ1qugkAhYggGZXcWN851o2G+XG+wPfj71saYrZU6QYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgBIKTWweUVZTM4TABaV0A1AgxZLZYXp06fXdFOoA7755pv8c6mllqrppgBQRzSt6QYAQE1q2rRpXpf6448/zkGqcWPXo6m6wh2Be8qUKalt27blF2sAYEGEbgAatEaNGqWVV145r7387rvv1nRzqOUicHfo0KGmmwFAHSJ0A9DgNWvWLK299tq6mDNf0RNChRuARSV0A0BMctK4cWrRokVNNwMAqGcMXAMAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKEjTog4MNEydB9+daosJZ+xU000AAKCBU+kGAACAggjdAAAAUBChGwAAAAoidAMAAEB9Dd0XXHBB6ty5c2rRokXaeOON09NPPz3f/UeMGJF++MMfppYtW6ZOnTqlI488Mn333XdLrL0AAACwsGo0dN94443pqKOOSsOGDUvPP/986tmzZ+rfv3+aMmVKlftff/31afDgwXn/cePGpSuuuCIf47jjjlvibQcAAIBaHbrPPffcdMghh6QDDzwwdevWLV188cWpVatW6corr6xy/7Fjx6ZNN9007bvvvrk6vt1226V99tlngdVxAAAAaFChe/r06em5555L/fr1+7/GNG6c7z/xxBNVPmeTTTbJzymF7Lfffjvdc889accdd5zn60ybNi198cUXlW4AAACwJDRNNeSTTz5Js2bNSiuttFKl7XF//PjxVT4nKtzxvM022yyVlZWlmTNnpt/85jfz7V5++umnp+HDh1d7+wEAAKDWT6S2KMaMGZNOO+20dOGFF+Yx4Lfccku6++6708knnzzP5wwZMiRNnTq1/DZp0qQl2mYAAAAarhqrdLdr1y41adIkffTRR5W2x/0OHTpU+ZwTTjgh7b///unggw/O93v06JG+/vrr9Ktf/Sodf/zxuXv6nJo3b55vAAAA0GAq3c2aNUu9evVKo0ePLt82e/bsfL9v375VPuebb76ZK1hHcA/R3RwAAABqkxqrdIdYLmzgwIGpd+/eqU+fPnkN7qhcx2zmYcCAAWmVVVbJ47LDzjvvnGc832CDDfKa3m+++Waufsf2UvgGAACA2qJGQ/fee++dPv744zR06NA0efLktP7666dRo0aVT642ceLESpXtP/3pT6lRo0b55/vvv59WXHHFHLhPPfXUGnwXAAAAUAtDdxg0aFC+zWvitIqaNm2ahg0blm8AAABQ29Wp2csBAACgLhG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQX2cvBwBg0XUefHeqLSacsVNNNwGg1lLpBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAg1ukGAKDe6HF1j1QbvDzw5ZpuAlBLqHQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoSNOiDgwAdUXnwXen2mLCGTvVdBMAgGqk0g0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAArStKgDAwB1W4+re6Ta4OWBL9d0EwBgsal0AwAAQEFUugEAAGqQnkX1m0o3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFKRpUQcGABbDiW1SrbHGajXdAgCo81S6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQG0K3TNnzkwPPPBAuuSSS9KXX36Zt33wwQfpq6++qu72AQAAQMNZp/vdd99N22+/fZo4cWKaNm1a2nbbbVPr1q3TmWeeme9ffPHFxbQUAAAA6nul+/DDD0+9e/dOn3/+eWrZsmX59t122y2NHj26utsHAAAADafS/eijj6axY8emZs2aVdreuXPn9P7771dn2wAAAKBhVbpnz56dZs2aNdf29957L3czBwAAABYzdG+33XZpxIgR5fcbNWqUJ1AbNmxY2nHHHRf1cAAAAFBvLXL38rPPPjtPpNatW7f03XffpX333Te98cYbqV27dukf//hHMa0EAACAhhC6O3XqlF566aV044035p9R5T7ooIPSfvvtV2liNQAAAGjoFil0z5gxI3Xp0iXdddddOWTHDQAAAKiGMd1LLbVU7lIOAAAAFDCR2mGHHZbOPPPMNHPmzEV9KgAAADQoixy6n3nmmXTLLbek1VZbLfXv3z/tvvvulW6L6oILLshrfLdo0SJtvPHG6emnn57v/v/73/9y8F955ZVT8+bN0zrrrJPuueeeRX5dAAAAqHUTqbVt2zbtscce1fLiMRnbUUcdlS6++OIcuGMpsgjyr7/+emrfvv1c+0+fPj1tu+22+bGbbroprbLKKundd9/NbQIAAIA6H7pHjhxZbS9+7rnnpkMOOSQdeOCB+X6E77vvvjtdeeWVafDgwXPtH9s/++yzNHbs2Dy+PESVHAAAAOpF9/KSjz/+OD322GP5Fr8vqqhaP/fcc6lfv37/15jGjfP9J554osrn3HHHHalv3765e/lKK62Uunfvnk477bQ0a9aseb7OtGnT0hdffFHpBgAAALUydH/99dfpl7/8ZR5Tvfnmm+dbx44d81rd33zzzUIf55NPPslhOcJzRXF/8uTJVT7n7bffzt3K43kxjvuEE05I55xzTjrllFPm+Tqnn356atOmTfkt1hkHAACAWhm6Ywz2ww8/nO688848qVncbr/99rzt6KOPTkWaPXt2Hs996aWXpl69eqW99947HX/88blb+rwMGTIkTZ06tfw2adKkQtsIAAAAiz2m++abb87V5i233LJ824477phatmyZ9tprr3TRRRct1HHatWuXmjRpkj766KNK2+N+hw4dqnxOVNdjLHc8r6Rr1665Mh7d1Zs1azbXc2KG87gBAABAra90RxfyObuEh6hAL0r38gjIUa0ePXp0pUp23I9x21XZdNNN05tvvpn3K/nvf/+bw3hVgRsAAADqVOiOQDxs2LD03XfflW/79ttv0/Dhw+cZlufXVf2yyy5LV199dRo3blz67W9/m8eMl2YzHzBgQO4eXhKPx+zlhx9+eA7bMdN5TKQWE6sBAABAne9eft555+W1tFddddXUs2fPvO2ll15KLVq0SPfdd98iHSvGZMfM50OHDs1dxNdff/00atSo8kr6xIkT84zmJTEJWrzGkUcemdZbb728TncE8D/+8Y+L+jYAAACg9oXuWKbrjTfeSNddd10aP3583rbPPvuk/fbbL4/rXlSDBg3Kt6qMGTNmrm1RTX/yyScX+XUAAACg1ofu0KpVq3TIIYdUf2sAAACgIY/pjnWvr7zyyrm2x7YzzzyzutoFAAAADS90X3LJJalLly5zbV933XXnu142AAAANDSLHLpjwrNYomtOK664Yvrwww+rq10AAADQ8EJ3zCD++OOPz7U9tnXs2LG62gUAAAANbyK1mEDtiCOOSDNmzEhbb7113jZ69Oj0hz/8IR199NFFtBEAAAAaRug+9thj06effpoOPfTQNH369Lwt1uiOtbKHDBlSRBsBAACgYYTuRo0a5VnKTzjhhDRu3Li8Nvfaa6+dmjdvXkwLAQAAoKGM6S5ZZpll0kYbbZRat26d3nrrrTR79uzqbRkAAAA0lNAd63Cfe+65lbb96le/SmuuuWbq0aNH6t69e5o0aVIRbQQAAID6HbovvfTStNxyy5XfHzVqVBo5cmS65ppr0jPPPJPatm2bhg8fXlQ7AQAAoP6O6X7jjTdS7969y+/ffvvtaZdddkn77bdfvn/aaaelAw88sJhWAgAAQH2udH/77bdp2WWXLb8/duzYtPnmm5ffj27mkydPrv4WAgAAQH0P3auvvnp67rnn8u+ffPJJevXVV9Omm25a/ngE7jZt2hTTSgAAAKjP3csHDhyYDjvssBy2H3zwwdSlS5fUq1evSpXvmEwNAAAAWMTQ/Yc//CF988036ZZbbkkdOnRI//rXvyo9/vjjj6d99tlnYQ8HAAAA9d5Ch+7GjRunk046Kd+qMmcIBwAAgIZuocd0AwAAAItG6AYAAICCCN0AAABQEKEbAAAAakvofuihh4ppCQAAADT00L399tunH/zgB+mUU05JkyZNKqZVAAAA0JCWDCt5//3307XXXpuuvvrqNHz48LT11lungw46KO26666pWbNmxbQSAGiwxnXpmmqLruPH1XQTAKjvle527dqlI488Mr344ovpqaeeSuuss0469NBDU8eOHdPvf//79NJLLxXTUgAAAGhIE6ltuOGGaciQIWnQoEHpq6++SldeeWXq1atX+vGPf5xeffXV6mslAAAANJTQPWPGjHTTTTelHXfcMa2++urpvvvuS+eff3766KOP0ptvvpm37bnnntXfWgAAAKjPY7p/97vfpX/84x+prKws7b///unPf/5z6t69e/njSy+9dDr77LNzd3MAAABoyBY5dL/22mvpb3/7W9p9991T8+bN5znu29JiAAAANHSL3L182LBhuev4nIF75syZ6ZFHHsm/N23aNG2xxRbV10oAAABoCKF7q622Sp999tlc26dOnZofAwAAABaze3mM5W7UqNFc2z/99NM8nhuAufW4ukeqDV4e+HJNNwEAoEFZ6NAdY7hDBO4DDjigUvfyWbNmpf/85z9pk002KaaVAAAAUJ9Dd5s2bcor3a1bt04tW7Ysf6xZs2bpRz/6UTrkkEOKaSUAAADU59A9cuTI/LNz587pmGOO0ZUcAAAAqntMd8xeDgAAAFRT6N5www3T6NGj03LLLZc22GCDKidSK3n++ecX5pAAAABQ7y1U6N5ll13KJ07bddddi24TAAAANJzQXbFLue7lAAAAsHAap0U0adKk9N5775Xff/rpp9MRRxyRLr300kU9FAAAANRrixy699133/TQQw/l3ydPnpz69euXg/fxxx+fTjrppCLaCAAAAA0jdL/yyiupT58++fd//vOfqUePHmns2LHpuuuuS1dddVURbQQAAICGEbpnzJhRPqnaAw88kH7605/m37t06ZI+/PDD6m8hAAAANJTQve6666aLL744Pfroo+n+++9P22+/fd7+wQcfpBVWWKGINgIAAEDDCN1nnnlmuuSSS9KWW26Z9tlnn9SzZ8+8/Y477ijvdg4AAAAs5JJhJWVlZWnNNddMEydOTDNnzkzLLbdc+WO/+tWvUqtWrYpoIwAAANT/SneE7rXWWivPWl4xcIfOnTun9u3bV3f7AAAAoGGE7saNG6e11147ffrpp8W1CAAAABrqmO4zzjgjHXvssXnpMAAAAKCaxnSHAQMGpG+++SZPoNasWbPUsmXLSo9/9tlni3pIAAAAqJcWOXSPGDGimJYAAABAQw/dAwcOLKYlAAAA0NDHdIe33nor/elPf8rrdE+ZMiVvu/fee9Orr75a3e0DAACAhhO6H3744dSjR4/01FNPpVtuuSV99dVXeftLL72Uhg0bVkQbAQAAoGF0Lx88eHA65ZRT0lFHHZVat25dvn3rrbdO559/fnW3DwAA6pxxXbqm2qLr+HE13QRo0Ba50v3yyy+n3Xbbba7t7du3T5988kl1tQsAAAAaXuhu27Zt+vDDD+fa/sILL6RVVlmlutoFAAAADS90//znP09//OMf0+TJk1OjRo3S7Nmz0+OPP56OOeaYvIY3AAAAsJih+7TTTktdunRJnTp1ypOodevWLW2++eZpk002yTOaAwAAAIs5kVqzZs3SZZddlk444YT0yiuv5OC9wQYbpLXXXntRDwUAAAD12iKH7sceeyxtttlmabXVVss3AAAAoJq6l8fSYGussUY67rjj0muvvbaoTwcAAIAGY5FD9wcffJCOPvro9PDDD6fu3bun9ddfP5111lnpvffeK6aFAAAA0FBCd7t27dKgQYPyjOVvvfVW2nPPPdPVV1+dOnfunKvgAAAAwGKG7oqim/ngwYPTGWeckXr06JGr3wAAAMD3DN1R6T700EPTyiuvnPbdd9/c1fzuu+9e3MMBAABAvbPIs5cPGTIk3XDDDXls97bbbpvOO++8tMsuu6RWrVoV00IAAABoKKH7kUceSccee2zaa6+98vhuAAAAoJpCd3QrBwAAAKopdN9xxx1phx12SEsttVT+fX5++tOfLswhAQAAoN5bqNC96667psmTJ6f27dvn3+elUaNGadasWdXZPmqLE9ukWuPEqTXdAgAAgOoL3bNnz67ydwAAAKAax3QDAECt7RG3xmo13QKAxQ/dUeW+6qqr0i233JImTJiQu5OvscYa6Wc/+1naf//9830AAADg/2ucFlJZWVmeJO3ggw9O77//furRo0dad91107vvvpsOOOCAtNtuuy3soQAAAKBBWOhKd1S4Y43u0aNHp6222qrSYw8++GCeYO2aa65JAwYMKKKdAAAAUH8r3f/4xz/ScccdN1fgDltvvXUaPHhwuu6666q7fQAAAFD/Q/d//vOftP3228/z8VjH+6WXXqqudgEAAEDDCd2fffZZWmmlleb5eDz2+eefV1e7AAAAoOGM6Z41a1Zq2nTeuzdp0iTNnDmzutoFAABQHEvdUdtCd8xeHrOUN2/evMrHp02bVp3tAgAAgIYTugcOHLjAfcxcDgAAAIsRukeOHLmwuwIAAACLMpEaAAAAsGiEbgAAAKjPofuCCy5InTt3Ti1atEgbb7xxevrppxfqeTfccENq1KhR2nXXXQtvIwAAANS50H3jjTemo446Kg0bNiw9//zzqWfPnql///5pypQp833ehAkT0jHHHJN+/OMfL7G2AgAAQJ0K3eeee2465JBD0oEHHpi6deuWLr744tSqVat05ZVXznfN8P322y8NHz48rbnmmku0vQAAAFAnQvf06dPTc889l/r16/d/DWrcON9/4okn5vm8k046KbVv3z4ddNBBS6ilAAAAUOCSYUX45JNPctV6pZVWqrQ97o8fP77K5zz22GPpiiuuSC+++OJCvca0adPyreSLL774nq0GAACAOtK9fFF8+eWXaf/990+XXXZZateu3UI95/TTT09t2rQpv3Xq1KnwdgIAAECNV7ojODdp0iR99NFHlbbH/Q4dOsy1/1tvvZUnUNt5553Lt82ePTv/bNq0aXr99dfTD37wg0rPGTJkSJ6orWKlW/AGAACg3ofuZs2apV69eqXRo0eXL/sVITruDxo0aK79u3Tpkl5++eVK2/70pz/lCvh5551XZZhu3rx5vgEAAECDCt0hqtADBw5MvXv3Tn369EkjRoxIX3/9dZ7NPAwYMCCtssoquZt4rOPdvXv3Ss9v27Zt/jnndgAAAEgNPXTvvffe6eOPP05Dhw5NkydPTuuvv34aNWpU+eRqEydOzDOaAwAAQF1T46E7RFfyqrqThzFjxsz3uVdddVVBrQIAAGg4xnXpmmqLruPHpfpCCRkAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBmhZ1YIAad2KbVGussVpNtwAAgBqg0g0AAAAFEboBAACgIEI3AAAAFEToBgAAgIKYSA2gARnXpWuqLbqOH1fTTQAAKJxKNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEB9Dt0XXHBB6ty5c2rRokXaeOON09NPPz3PfS+77LL04x//OC233HL51q9fv/nuDwAAAA02dN94443pqKOOSsOGDUvPP/986tmzZ+rfv3+aMmVKlfuPGTMm7bPPPumhhx5KTzzxROrUqVPabrvt0vvvv7/E2w4AAAC1OnSfe+656ZBDDkkHHnhg6tatW7r44otTq1at0pVXXlnl/tddd1069NBD0/rrr5+6dOmSLr/88jR79uw0evToJd52AAAAqLWhe/r06em5557LXcTLG9S4cb4fVeyF8c0336QZM2ak5ZdfvsCWAgAAwKJrmmrQJ598kmbNmpVWWmmlStvj/vjx4xfqGH/84x9Tx44dKwX3iqZNm5ZvJV988cX3bDUAAADUke7l38cZZ5yRbrjhhnTrrbfmSdiqcvrpp6c2bdqU32IMOAAAANT70N2uXbvUpEmT9NFHH1XaHvc7dOgw3+eeffbZOXT/+9//Tuutt9489xsyZEiaOnVq+W3SpEnV1n4AAACotaG7WbNmqVevXpUmQStNita3b995Pu/Pf/5zOvnkk9OoUaNS79695/sazZs3T8suu2ylGwAAANT7Md0hlgsbOHBgDs99+vRJI0aMSF9//XWezTwMGDAgrbLKKrmbeDjzzDPT0KFD0/XXX5/X9p48eXLevswyy+QbAAAA1BY1Hrr33nvv9PHHH+cgHQE6lgKLCnZpcrWJEyfmGc1LLrroojzr+c9+9rNKx4l1vk888cQl3n4AAACotaE7DBo0KN+qMmbMmEr3J0yYsIRaBQAAAA149nIAAACozYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFaVrUgaG+G9ela6otuo4fV9NNAAAAqqDSDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQAAAAURugEAAKAgQjcAAAAUROgGAACAggjdAAAAUBChGwAAAAoidAMAAEBBhG4AAAAoiNANAAAABRG6AQAAoCBCNwAAABRE6AYAAICCCN0AAABQEKEbAAAACiJ0AwAAQEGEbgAAACiI0A0AAAD1OXRfcMEFqXPnzqlFixZp4403Tk8//fR89//Xv/6VunTpkvfv0aNHuueee5ZYWwEAAKDOhO4bb7wxHXXUUWnYsGHp+eefTz179kz9+/dPU6ZMqXL/sWPHpn322ScddNBB6YUXXki77rprvr3yyitLvO0AAABQq0P3ueeemw455JB04IEHpm7duqWLL744tWrVKl155ZVV7n/eeeel7bffPh177LGpa9eu6eSTT04bbrhhOv/885d42wEAAKDWhu7p06en5557LvXr1+//GtS4cb7/xBNPVPmc2F5x/xCV8XntDwAAADWlaY29ckrpk08+SbNmzUorrbRSpe1xf/z48VU+Z/LkyVXuH9urMm3atHwrmTp1av75xRdfpNpu9rRvUm3xRaOyVGvUku/uq1mzUm1Rm85n523VZn1bO84X523VnLdVc97OzXlbNeft3Jy3VXPeVs15W7vP2wW1saysrPaG7iXh9NNPT8OHD59re6dOnWqkPXVVm1SLnFGrWlM7tPGZVKV2fSrjUm3QJ9Uiztsq1a5PxXk7F+dtlWrXp+K8nYvztkq161Nx3tbl8/bLL79MbebT3hoN3e3atUtNmjRJH330UaXtcb9Dhw5VPie2L8r+Q4YMyRO1lcyePTt99tlnaYUVVkiNGjWqlvfBkr2aFBdMJk2alJZddtmabg4sFOctdZHzlrrIeUtd5Lytu6LCHYG7Y8eO892vRkN3s2bNUq9evdLo0aPzDOSlUBz3Bw0aVOVz+vbtmx8/4ogjyrfdf//9eXtVmjdvnm8VtW3btlrfB0te/AfJf5Soa5y31EXOW+oi5y11kfO2bppfhbvWdC+PKvTAgQNT7969U58+fdKIESPS119/nWczDwMGDEirrLJK7iYeDj/88LTFFlukc845J+20007phhtuSM8++2y69NJLa/idAAAAQC0L3XvvvXf6+OOP09ChQ/NkaOuvv34aNWpU+WRpEydOzDOal2yyySbp+uuvT3/605/Scccdl9Zee+102223pe7du9fguwAAAIBaGLpDdCWfV3fyMWPGzLVtzz33zDcanhgqMGzYsLmGDEBt5rylLnLeUhc5b6mLnLf1X6OyBc1vDgAAACyW/+u3DQAAAFQroRsAAAAKInQDAABAQYRu6owLLrggde7cObVo0SJtvPHG6emnn67pJsF8PfLII2nnnXdOHTt2TI0aNcorLUBtFstzbrTRRql169apffv2adddd02vv/56TTcL5uuiiy5K6623Xvkax3379k333ntvTTcLFskZZ5yR/1Y44ogjaropFEDopk648cYb85ruMbPj888/n3r27Jn69++fpkyZUtNNg3n6+uuv87kaF4ygLnj44YfTYYcdlp588sl0//33pxkzZqTtttsun8tQW6266qo5sDz33HPp2WefTVtvvXXaZZdd0quvvlrTTYOF8swzz6RLLrkkXzyifjJ7OXVCVLaj+nL++efn+7Nnz06dOnVKv/vd79LgwYNrunmwQHH1+tZbb82VQ6grPv7441zxjjC++eab13RzYKEtv/zy6ayzzkoHHXRQTTcF5uurr75KG264YbrwwgvTKaecktZff/00YsSImm4W1Uylm1pv+vTp+ep1v379yrc1btw433/iiSdqtG0A9dnUqVPLAwzUBbNmzUo33HBD7p0R3cyhtoveRTvttFOlv3Opf5rWdANgQT755JP8j+hKK61UaXvcHz9+fI21C6A+ix5FMbZw0003Td27d6/p5sB8vfzyyzlkf/fdd2mZZZbJPYu6detW082C+YoLRDFsMrqXU78J3QBAldWXV155JT322GM13RRYoB/+8IfpxRdfzL0zbrrppjRw4MA8LELwpraaNGlSOvzww/P8GTFJMPWb0E2t165du9SkSZP00UcfVdoe9zt06FBj7QKorwYNGpTuuuuuPAN/TFIFtV2zZs3SWmutlX/v1atXrhyed955eXIqqI1i6GRMCBzjuUuiZ2f8dzfmMJo2bVr++5f6wZhu6sQ/pPEP6OjRoyt1e4z7xmsBVJ+YWzUCd3TNffDBB9Maa6xR002CxRJ/J0Rogdpqm222ycMioodG6da7d++033775d8F7vpFpZs6IZYLi65i8R+jPn365FkdY5KUAw88sKabBvOdkfTNN98sv//OO+/kf0hjUqrVVlutRtsG8+pSfv3116fbb789r9U9efLkvL1NmzapZcuWNd08qNKQIUPSDjvskP+7+uWXX+ZzeMyYMem+++6r6abBPMV/Y+ecL2PppZdOK6ywgnk06iGhmzph7733zkvXDB06NP8RGMspjBo1aq7J1aA2ifVit9pqq0oXj0JcQLrqqqtqsGVQtYsuuij/3HLLLSttHzlyZDrggANqqFUwf9FFd8CAAenDDz/MF4hireMI3Ntuu21NNw0gs043AAAAFMSYbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAQYRuAAAAKIjQDQC11AEHHJAaNWqUb0sttVRaY4010h/+8If03Xff1XTTAICF1HRhdwQAlrztt98+jRw5Ms2YMSM999xzaeDAgTmEn3nmmTXdNABgIah0A0At1rx589ShQ4fUqVOntOuuu6Z+/fql+++/Pz82e/bsdPrpp+cKeMuWLVPPnj3TTTfdVP7czz//PO23335pxRVXzI+vvfbaOcCHCRMm5PB+ww03pE022SS1aNEide/ePT388MOVXj/u9+nTJ7dj5ZVXToMHD04zZ84sf3zLLbdMv//973MFfvnll89tPfHEE8sfLysry/dXW221fIyOHTvm/UumTZuWjjnmmLTKKqukpZdeOm288cZpzJgxhX6mALAkCd0AUEe88soraezYsalZs2b5fgTua665Jl188cXp1VdfTUceeWT6xS9+UR6cTzjhhPTaa6+le++9N40bNy5ddNFFqV27dpWOeeyxx6ajjz46vfDCC6lv375p5513Tp9++ml+7P3330877rhj2mijjdJLL72Un3/FFVekU045pdIxrr766hyYn3rqqfTnP/85nXTSSeUXBm6++eb0l7/8JV1yySXpjTfeSLfddlvq0aNH+XMHDRqUnnjiiRz+//Of/6Q999wzV/djXwCoDxqVxSVoAKBWjun++9//nqvQUV2OqnDjxo3TP//5z/STn/wkV5YfeOCBHJZLDj744PTNN9+k66+/Pv30pz/NIfvKK6+c69hR6Y4K+RlnnJH++Mc/5m3xGrHtd7/7Xa5cH3/88Tk0R2CPqni48MIL8/5Tp07NbYlK96xZs9Kjjz5afuyojG+99db52Oeee24O3HHBIMalVzRx4sS05ppr5p9RAS+Jan4c47TTTivkcwWAJcmYbgCoxbbaaqtcYf76669zxbhp06Zpjz32yJXtCNfbbrttpf2nT5+eNthgg/z7b3/727zv888/n7bbbrvcPT26kldUMbDHsXv37p1Ddoif8XgpcIdNN900ffXVV+m9997LXcbDeuutV+mY0Q19ypQp+feoXI8YMSKH66hgR+U8qunxWi+//HIO7Ouss06l58fFhRVWWKGaPkEAqFlCNwDUYtFte6211sq/R8U6xm1HF+8Yfx3uvvvuPB66ohg7HXbYYYf07rvvpnvuuSd3995mm23SYYcdls4+++xqbeOcFewI6THePMRY9Ndffz1X5KMNhx56aDrrrLNyF/gI702aNMkTxMXPipZZZplqbSMA1BRjugGgjoju3Mcdd1z605/+lLp165bDdXTNjlBe8RZBtyQmUYsZz6ObelScL7300krHfPLJJ8t/j+7lEYC7du2a78fPGG9dcSTa448/nlq3bp1WXXXVhW53TOIW1e2//vWveZK0OGZUuaMiH5XuqIrP+R5iQjYAqA9UugGgDonu2jH5WYyTjlm/Y/K0qCpvttlmeZx1hOJll102B+2hQ4emXr16pXXXXTd32b7rrrvKA3XJBRdckGc1j+3RfT1mPP/lL3+ZH4uqdAT1GOMdE55FxXrYsGHpqKOOyhcAFsZVV12Vg3XMSt6qVasc/iOEr7766rkLecyuPmDAgHTOOefkEP7xxx+n0aNH5y7rO+20UyGfIQAsSUI3ANQhMRY6AnDMEv7OO+/kSnbMYv7222+ntm3bpg033DBXw0PMcj5kyJA8aVoE3R//+Md5lvCKYrKzuL344ou5wnzHHXeUz3Ae3daja3qE/OjWHhO3HXTQQbnSvrCiTXH8COoRvmPm8jvvvLN8zHYsYRazoccM6jFberz2j370ozxRHADUB2YvB4AGqDR7eSwVtv7669d0cwCg3jKmGwAAAAoidAMAAEBBdC8HAACAgqh0AwAAQEGEbgAAACiI0A0AAAAFEboBAACgIEI3AAAAFEToBgAAgIII3QAAAFAQoRsAAAAKInQDAABAKsb/A1WztvIewC6JAAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -764,7 +749,7 @@ } ], "source": [ - "scores = [ensemble_diversity(s_arr) for s_arr in input_strs]\n", + "scores = [ensemble_diversity(s_arr) for s_arr in tqdm.tqdm(input_strs)]\n", "labels = [str(number) for number in range(1, len(input_strs) + 1)]\n", "\n", "df = pd.DataFrame(\n", From 607320f77f9bc81c4ba0aef74d445d64e4dc23b1 Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Tue, 8 Apr 2025 23:17:19 -0700 Subject: [PATCH 14/14] renamed references to ember_logging --- src/ember/__init__.py | 2 +- src/ember/examples/advanced/example_architectures.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ember/__init__.py b/src/ember/__init__.py index b7f18d34..3cdea817 100644 --- a/src/ember/__init__.py +++ b/src/ember/__init__.py @@ -145,7 +145,7 @@ def initialize_ember( model = registry.get_model("openai:gpt-4") """ # Import here to avoid circular imports - from ember.core.utils.logging import configure_logging + from ember.core.utils.ember_logging import configure_logging from ember.core.config.manager import create_config_manager # 0. Configure logging first diff --git a/src/ember/examples/advanced/example_architectures.py b/src/ember/examples/advanced/example_architectures.py index cc7f1aa8..2b8b735f 100644 --- a/src/ember/examples/advanced/example_architectures.py +++ b/src/ember/examples/advanced/example_architectures.py @@ -219,7 +219,7 @@ def create_pipeline(*, model_name: str = "gpt-4o") -> non.Sequential: if __name__ == "__main__": # Use the centralized logging configuration with reduced verbosity - from ember.core.utils.logging import configure_logging + from ember.core.utils.ember_logging import configure_logging from ember.xcs.engine.execution_options import set_execution_options configure_logging(verbose=False)