linter
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
import datetime
|
||||
|
||||
from quart import Blueprint, jsonify
|
||||
from quart import Blueprint, jsonify, request
|
||||
from quart_jwt_extended import (
|
||||
get_jwt_identity,
|
||||
jwt_refresh_token_required,
|
||||
@@ -8,7 +8,13 @@ from quart_jwt_extended import (
|
||||
|
||||
import blueprints.users.models
|
||||
|
||||
from .logic import rename_conversation
|
||||
from .agents import main_agent
|
||||
from .logic import (
|
||||
add_message_to_conversation,
|
||||
get_conversation_by_id,
|
||||
get_conversation_transcript,
|
||||
rename_conversation,
|
||||
)
|
||||
from .models import (
|
||||
Conversation,
|
||||
PydConversation,
|
||||
@@ -20,6 +26,51 @@ conversation_blueprint = Blueprint(
|
||||
)
|
||||
|
||||
|
||||
@conversation_blueprint.post("/query")
|
||||
@jwt_refresh_token_required
|
||||
async def query():
|
||||
current_user_uuid = get_jwt_identity()
|
||||
user = await blueprints.users.models.User.get(id=current_user_uuid)
|
||||
data = await request.get_json()
|
||||
query = data.get("query")
|
||||
conversation_id = data.get("conversation_id")
|
||||
conversation = await get_conversation_by_id(conversation_id)
|
||||
await conversation.fetch_related("messages")
|
||||
await add_message_to_conversation(
|
||||
conversation=conversation,
|
||||
message=query,
|
||||
speaker="user",
|
||||
user=user,
|
||||
)
|
||||
|
||||
transcript = await get_conversation_transcript(user=user, conversation=conversation)
|
||||
|
||||
transcript_prompt = f"Here is the message transcript thus far {transcript}."
|
||||
prompt = f"""Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.
|
||||
{transcript_prompt if len(transcript) > 0 else ""}
|
||||
Respond to this prompt: {query}"""
|
||||
|
||||
payload = {
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful cat assistant named Simba that understands veterinary terms. When there are questions to you specifically, they are referring to Simba the cat. Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.\n\nIMPORTANT: When users ask factual questions about Simba's health, medical history, veterinary visits, medications, weight, or any information that would be in documents, you MUST use the simba_search tool to retrieve accurate information before answering. Do not rely on general knowledge - always search the documents for factual questions.",
|
||||
},
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
}
|
||||
|
||||
response = await main_agent.ainvoke(payload)
|
||||
message = response.get("messages", [])[-1].content
|
||||
await add_message_to_conversation(
|
||||
conversation=conversation,
|
||||
message=message,
|
||||
speaker="simba",
|
||||
user=user,
|
||||
)
|
||||
return jsonify({"response": message})
|
||||
|
||||
|
||||
@conversation_blueprint.route("/<conversation_id>")
|
||||
@jwt_refresh_token_required
|
||||
async def get_conversation(conversation_id: str):
|
||||
|
||||
36
services/raggr/blueprints/conversation/agents.py
Normal file
36
services/raggr/blueprints/conversation/agents.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from langchain.agents import create_agent
|
||||
from langchain.tools import tool
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from blueprints.rag.logic import query_vector_store
|
||||
|
||||
openai_gpt_5_mini = ChatOpenAI(model="gpt-5-mini")
|
||||
|
||||
|
||||
@tool(response_format="content_and_artifact")
|
||||
async def simba_search(query: str):
|
||||
"""Search through Simba's medical records, veterinary documents, and personal information.
|
||||
|
||||
Use this tool whenever the user asks questions about:
|
||||
- Simba's health history, medical records, or veterinary visits
|
||||
- Medications, treatments, or diagnoses
|
||||
- Weight, diet, or physical characteristics over time
|
||||
- Veterinary recommendations or advice
|
||||
- Ryan's (the owner's) information related to Simba
|
||||
- Any factual information that would be found in documents
|
||||
|
||||
Args:
|
||||
query: The user's question or information need about Simba
|
||||
|
||||
Returns:
|
||||
Relevant information from Simba's documents
|
||||
"""
|
||||
print(f"[SIMBA SEARCH] Tool called with query: {query}")
|
||||
serialized, docs = await query_vector_store(query=query)
|
||||
print(f"[SIMBA SEARCH] Found {len(docs)} documents")
|
||||
print(f"[SIMBA SEARCH] Serialized result length: {len(serialized)}")
|
||||
print(f"[SIMBA SEARCH] First 200 chars: {serialized[:200]}")
|
||||
return serialized, docs
|
||||
|
||||
|
||||
main_agent = create_agent(model=openai_gpt_5_mini, tools=[simba_search])
|
||||
@@ -74,7 +74,7 @@ async def rename_conversation(
|
||||
|
||||
prompt = f"Summarize the following conversation into a sassy one-liner title:\n\n{messages}"
|
||||
response = structured_llm.invoke(prompt)
|
||||
new_name: str = response.get("title")
|
||||
new_name: str = response.get("title", "")
|
||||
conversation.name = new_name
|
||||
await conversation.save()
|
||||
return new_name
|
||||
|
||||
Reference in New Issue
Block a user