Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
OPENPIPE_API_KEY=<APIKEY>
OPENPIPE_MODEL=openpipe:oncollama-v3-beta1
API_KEY=<APIKEY>
MODEL=openpipe:oncollama-v3-beta1
BASE_URL=<BASEURL>
7 changes: 4 additions & 3 deletions gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ class OpenPipeGUI:
def __init__(self):
self.api_key = None
self.model = None
self.base_url = None
self.connected = False

def parse_escape_sequences(self, text):
Expand All @@ -26,15 +27,15 @@ def initialise(self):
Initialise GUI, test API connection
"""
# for api keys
self.api_key, self.model = load_env_vars()
self.api_key, self.model, self.base_url = load_env_vars()

if not self.api_key or not self.model:
dpg.set_value("status_text", "Error: Missing API key or model in .env")
dpg.configure_item("status_indicator", default_value=False)
return

dpg.set_value("status_text", "Testing connection...")
success, message = test_connection(self.api_key, self.model)
success, message = test_connection(self.api_key, self.model, self.base_url)

self.connected = success
dpg.set_value("status_text", message)
Expand Down Expand Up @@ -67,7 +68,7 @@ def on_infer_clicked(self):
dpg.configure_item("infer_button", enabled=False)

# call API
success, result = call_openpipe_api(self.api_key, self.model, input_text)
success, result = call_openpipe_api(self.api_key, self.model, input_text, self.base_url)

dpg.set_value("output_text", result)
dpg.configure_item("infer_button", enabled=True)
Expand Down
20 changes: 10 additions & 10 deletions utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,21 @@

"""
utils.py - supporting functions for backend operations (API, data processing)

Currently hard-coded to openpipe serverless deployment
Can refactor for additional LLM client compatibility (e.g. Bedrock)
"""

def load_env_vars():
load_dotenv()
api_key = os.getenv("OPENPIPE_API_KEY")
model = os.getenv("OPENPIPE_MODEL")
return api_key, model
api_key = os.getenv("API_KEY")
model = os.getenv("MODEL")
base_url = os.getenv("BASE_URL", None)
return api_key, model, base_url

def get_client(api_key, base_url=None):
return OpenAI(api_key=api_key, base_url=base_url) if base_url else OpenAI(api_key=api_key, openpipe={"api_key": api_key})

def test_connection(api_key, model):
def test_connection(api_key, model, base_url=None):
try:
client = OpenAI(openpipe={"api_key": api_key})
client = get_client(api_key, base_url)
client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": "test"}],
Expand Down Expand Up @@ -50,9 +50,9 @@ def extract_output_json(text):
return text # or return original


def call_openpipe_api(api_key, model, user_text):
def call_openpipe_api(api_key, model, user_text, base_url=None):
try:
client = OpenAI(openpipe={"api_key": api_key})
client = get_client(api_key, base_url)
system_prompt = create_system_prompt('infer_prompt.txt')

completion = client.chat.completions.create(
Expand Down