From 68d73b62e8354a03c1eefd8f6cc904cce59e4b05 Mon Sep 17 00:00:00 2001 From: Ryan Chen Date: Fri, 24 Oct 2025 08:44:08 -0400 Subject: [PATCH] Instituting LLM fallback to OpenAI if gaming PC is not on --- llm.py | 57 +++++++++++++++++++++++++++++++++------------------------ 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/llm.py b/llm.py index e700fce..897cd7e 100644 --- a/llm.py +++ b/llm.py @@ -4,9 +4,14 @@ from ollama import Client from openai import OpenAI import logging +from dotenv import load_dotenv + +load_dotenv() logging.basicConfig(level=logging.INFO) +TRY_OLLAMA = os.getenv("TRY_OLLAMA", False) + class LLMClient: def __init__(self): @@ -30,31 +35,35 @@ class LLMClient: prompt: str, system_prompt: str, ): + # Instituting a fallback if my gaming PC is not on if self.PROVIDER == "ollama": - response = self.ollama_client.chat( - model="gemma3:4b", - messages=[ - { - "role": "system", - "content": system_prompt, - }, - {"role": "user", "content": prompt}, - ], - ) - print(response) - output = response.message.content - elif self.PROVIDER == "openai": - response = self.openai_client.responses.create( - model="gpt-4o-mini", - input=[ - { - "role": "system", - "content": system_prompt, - }, - {"role": "user", "content": prompt}, - ], - ) - output = response.output_text + try: + response = self.ollama_client.chat( + model="gemma3:4b", + messages=[ + { + "role": "system", + "content": system_prompt, + }, + {"role": "user", "content": prompt}, + ], + ) + output = response.message.content + return output + except Exception as e: + logging.error(f"Could not connect to OLLAMA: {str(e)}") + + response = self.openai_client.responses.create( + model="gpt-4o-mini", + input=[ + { + "role": "system", + "content": system_prompt, + }, + {"role": "user", "content": prompt}, + ], + ) + output = response.output_text return output