Instituting LLM fallback to OpenAI if gaming PC is not on

This commit is contained in:
2025-10-24 08:44:08 -04:00
parent 6b616137d3
commit 68d73b62e8

57
llm.py
View File

@@ -4,9 +4,14 @@ from ollama import Client
from openai import OpenAI from openai import OpenAI
import logging import logging
from dotenv import load_dotenv
load_dotenv()
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
TRY_OLLAMA = os.getenv("TRY_OLLAMA", False)
class LLMClient: class LLMClient:
def __init__(self): def __init__(self):
@@ -30,31 +35,35 @@ class LLMClient:
prompt: str, prompt: str,
system_prompt: str, system_prompt: str,
): ):
# Instituting a fallback if my gaming PC is not on
if self.PROVIDER == "ollama": if self.PROVIDER == "ollama":
response = self.ollama_client.chat( try:
model="gemma3:4b", response = self.ollama_client.chat(
messages=[ model="gemma3:4b",
{ messages=[
"role": "system", {
"content": system_prompt, "role": "system",
}, "content": system_prompt,
{"role": "user", "content": prompt}, },
], {"role": "user", "content": prompt},
) ],
print(response) )
output = response.message.content output = response.message.content
elif self.PROVIDER == "openai": return output
response = self.openai_client.responses.create( except Exception as e:
model="gpt-4o-mini", logging.error(f"Could not connect to OLLAMA: {str(e)}")
input=[
{ response = self.openai_client.responses.create(
"role": "system", model="gpt-4o-mini",
"content": system_prompt, input=[
}, {
{"role": "user", "content": prompt}, "role": "system",
], "content": system_prompt,
) },
output = response.output_text {"role": "user", "content": prompt},
],
)
output = response.output_text
return output return output