Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion src/inputs/input.txt
Original file line number Diff line number Diff line change
@@ -1 +1,10 @@
Officer Voldemort here, at an incident reported at 456 Oak Street. Two victims, Mark Smith and Jane Doe. Medical aid rendered for minor lacerations. Handed off to Sheriff's Deputy Alvarez. End of transmission.
UC Vaccine Declination Statement

Name/SID: Sarah Johnson, SID 4527891
Job Title: Research Scientist
Department: Microbiology
Phone Number: 831-555-0142
Email: sjohnson@ucsc.edu
Date: 03/15/2026

Signature: ________________________
27 changes: 22 additions & 5 deletions src/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,19 +60,36 @@ def main_loop(self):
}

try:
response = requests.post(ollama_url, json=payload)
response = requests.post(ollama_url, json=payload, timeout=30)
response.raise_for_status()
json_data = response.json()
except requests.exceptions.ConnectionError:
raise ConnectionError(
f"Could not connect to Ollama at {ollama_url}. "
"Please ensure Ollama is running and accessible."
)
Comment on lines 66 to 70
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The connection error path raises a built-in ConnectionError without including the field context, and it differs from the PR description which states transport-layer failures should raise a contextual RuntimeError. Consider including the field name and chaining the original exception (and/or using a consistent exception type across transport/HTTP failures) so callers can handle failures uniformly.

Copilot uses AI. Check for mistakes.
except requests.exceptions.Timeout as e:
raise TimeoutError(
f"Request to Ollama timed out for field '{field}' at {ollama_url}: {e}"
) from e
except requests.exceptions.HTTPError as e:
raise RuntimeError(f"Ollama returned an error: {e}")
raise RuntimeError(
f"Ollama returned an HTTP error for field '{field}': {e}"
)
except requests.exceptions.RequestException as e:
raise RuntimeError(
f"Ollama request failed for field '{field}': {e}"
)
except ValueError as e:
raise ValueError(
f"Invalid JSON response from Ollama for field '{field}': {e}"
)

# parse response
json_data = response.json()
parsed_response = json_data["response"]
parsed_response = json_data.get("response")
if parsed_response is None:
raise ValueError(
f"Ollama response missing 'response' key for field '{field}'."
)
# print(parsed_response)
self.add_response_to_json(field, parsed_response)

Expand Down
66 changes: 66 additions & 0 deletions src/test/test_llm_reliability.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import pytest
import requests

from src.llm import LLM


class DummyResponse:
def __init__(self, status_error=None, json_data=None, json_error=None):
self._status_error = status_error
self._json_data = json_data
self._json_error = json_error

def raise_for_status(self):
if self._status_error:
raise self._status_error

def json(self):
if self._json_error:
raise self._json_error
return self._json_data


def make_llm():
return LLM(transcript_text="incident text", target_fields={"name": "string"})


def test_llm_timeout_raises_timeouterror(monkeypatch):
def fake_post(*args, **kwargs):
raise requests.exceptions.Timeout("timed out")

monkeypatch.setattr(requests, "post", fake_post)

with pytest.raises(TimeoutError, match="timed out"):
make_llm().main_loop()


def test_llm_http_error_raises_runtimeerror(monkeypatch):
http_error = requests.exceptions.HTTPError("500 Server Error")

def fake_post(*args, **kwargs):
return DummyResponse(status_error=http_error)

monkeypatch.setattr(requests, "post", fake_post)

with pytest.raises(RuntimeError, match="HTTP error"):
make_llm().main_loop()


def test_llm_invalid_json_raises_valueerror(monkeypatch):
def fake_post(*args, **kwargs):
return DummyResponse(json_error=ValueError("bad json"))

monkeypatch.setattr(requests, "post", fake_post)

with pytest.raises(ValueError, match="Invalid JSON response"):
make_llm().main_loop()


def test_llm_missing_response_key_raises_valueerror(monkeypatch):
def fake_post(*args, **kwargs):
return DummyResponse(json_data={"other": "value"})

monkeypatch.setattr(requests, "post", fake_post)

with pytest.raises(ValueError, match="missing 'response' key"):
make_llm().main_loop()