sdf
This commit is contained in:
@@ -7,10 +7,16 @@ import asyncio
|
|||||||
from tortoise import Tortoise
|
from tortoise import Tortoise
|
||||||
from blueprints.users.models import User
|
from blueprints.users.models import User
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
# Database configuration with environment variable support
|
# Database configuration with environment variable support
|
||||||
DATABASE_PATH = os.getenv("DATABASE_PATH", "database/raggr.db")
|
DATABASE_PATH = os.getenv("DATABASE_PATH", "database/raggr.db")
|
||||||
DATABASE_URL = os.getenv("DATABASE_URL", f"sqlite://{DATABASE_PATH}")
|
DATABASE_URL = os.getenv("DATABASE_URL", f"sqlite://{DATABASE_PATH}")
|
||||||
|
|
||||||
|
print(DATABASE_URL)
|
||||||
|
|
||||||
|
|
||||||
async def add_user(username: str, email: str, password: str):
|
async def add_user(username: str, email: str, password: str):
|
||||||
"""Add a new user to the database"""
|
"""Add a new user to the database"""
|
||||||
|
|||||||
2
main.py
2
main.py
@@ -186,7 +186,7 @@ def consult_oracle(
|
|||||||
def llm_chat(input: str, transcript: str = "") -> str:
|
def llm_chat(input: str, transcript: str = "") -> str:
|
||||||
system_prompt = "You are a helpful assistant that understands veterinary terms."
|
system_prompt = "You are a helpful assistant that understands veterinary terms."
|
||||||
transcript_prompt = f"Here is the message transcript thus far {transcript}."
|
transcript_prompt = f"Here is the message transcript thus far {transcript}."
|
||||||
prompt = f"""Answer the user in a humorous way as if you were a cat named Simba. Be very coy.
|
prompt = f"""Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.
|
||||||
{transcript_prompt if len(transcript) > 0 else ""}
|
{transcript_prompt if len(transcript) > 0 else ""}
|
||||||
Respond to this prompt: {input}"""
|
Respond to this prompt: {input}"""
|
||||||
output = llm_client.chat(prompt=prompt, system_prompt=system_prompt)
|
output = llm_client.chat(prompt=prompt, system_prompt=system_prompt)
|
||||||
|
|||||||
Reference in New Issue
Block a user