From 21a71417943cdb5e26b4a73235eb173da879ebd2 Mon Sep 17 00:00:00 2001 From: Nightwing-77 Date: Sat, 31 Jan 2026 08:20:39 +0530 Subject: [PATCH] Update llm_gemini.py --- Voice-Driven_banking-Lam/Backend/services/llm_gemini.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Voice-Driven_banking-Lam/Backend/services/llm_gemini.py b/Voice-Driven_banking-Lam/Backend/services/llm_gemini.py index 726cf3de..8f905b9a 100644 --- a/Voice-Driven_banking-Lam/Backend/services/llm_gemini.py +++ b/Voice-Driven_banking-Lam/Backend/services/llm_gemini.py @@ -86,7 +86,7 @@ async def get_llm_nlu_response(prompt: str) -> str | None: if not model: return None try: # Create a separate model instance for NLU without the system prompt - nlu_model = genai.GenerativeModel(model_name="gemini-1.5-flash") + nlu_model = genai.GenerativeModel(model_name="gemini-2.5-flash") config = {"temperature": 0.1, "response_mime_type": "application/json"} response = await nlu_model.generate_content_async(prompt, generation_config=config, safety_settings=safety_settings) return response.text