linter
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
import datetime
|
||||
|
||||
from quart import Blueprint, jsonify
|
||||
from quart import Blueprint, jsonify, request
|
||||
from quart_jwt_extended import (
|
||||
get_jwt_identity,
|
||||
jwt_refresh_token_required,
|
||||
@@ -8,7 +8,13 @@ from quart_jwt_extended import (
|
||||
|
||||
import blueprints.users.models
|
||||
|
||||
from .logic import rename_conversation
|
||||
from .agents import main_agent
|
||||
from .logic import (
|
||||
add_message_to_conversation,
|
||||
get_conversation_by_id,
|
||||
get_conversation_transcript,
|
||||
rename_conversation,
|
||||
)
|
||||
from .models import (
|
||||
Conversation,
|
||||
PydConversation,
|
||||
@@ -20,6 +26,51 @@ conversation_blueprint = Blueprint(
|
||||
)
|
||||
|
||||
|
||||
@conversation_blueprint.post("/query")
|
||||
@jwt_refresh_token_required
|
||||
async def query():
|
||||
current_user_uuid = get_jwt_identity()
|
||||
user = await blueprints.users.models.User.get(id=current_user_uuid)
|
||||
data = await request.get_json()
|
||||
query = data.get("query")
|
||||
conversation_id = data.get("conversation_id")
|
||||
conversation = await get_conversation_by_id(conversation_id)
|
||||
await conversation.fetch_related("messages")
|
||||
await add_message_to_conversation(
|
||||
conversation=conversation,
|
||||
message=query,
|
||||
speaker="user",
|
||||
user=user,
|
||||
)
|
||||
|
||||
transcript = await get_conversation_transcript(user=user, conversation=conversation)
|
||||
|
||||
transcript_prompt = f"Here is the message transcript thus far {transcript}."
|
||||
prompt = f"""Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.
|
||||
{transcript_prompt if len(transcript) > 0 else ""}
|
||||
Respond to this prompt: {query}"""
|
||||
|
||||
payload = {
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful cat assistant named Simba that understands veterinary terms. When there are questions to you specifically, they are referring to Simba the cat. Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.\n\nIMPORTANT: When users ask factual questions about Simba's health, medical history, veterinary visits, medications, weight, or any information that would be in documents, you MUST use the simba_search tool to retrieve accurate information before answering. Do not rely on general knowledge - always search the documents for factual questions.",
|
||||
},
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
}
|
||||
|
||||
response = await main_agent.ainvoke(payload)
|
||||
message = response.get("messages", [])[-1].content
|
||||
await add_message_to_conversation(
|
||||
conversation=conversation,
|
||||
message=message,
|
||||
speaker="simba",
|
||||
user=user,
|
||||
)
|
||||
return jsonify({"response": message})
|
||||
|
||||
|
||||
@conversation_blueprint.route("/<conversation_id>")
|
||||
@jwt_refresh_token_required
|
||||
async def get_conversation(conversation_id: str):
|
||||
|
||||
36
services/raggr/blueprints/conversation/agents.py
Normal file
36
services/raggr/blueprints/conversation/agents.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from langchain.agents import create_agent
|
||||
from langchain.tools import tool
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from blueprints.rag.logic import query_vector_store
|
||||
|
||||
openai_gpt_5_mini = ChatOpenAI(model="gpt-5-mini")
|
||||
|
||||
|
||||
@tool(response_format="content_and_artifact")
|
||||
async def simba_search(query: str):
|
||||
"""Search through Simba's medical records, veterinary documents, and personal information.
|
||||
|
||||
Use this tool whenever the user asks questions about:
|
||||
- Simba's health history, medical records, or veterinary visits
|
||||
- Medications, treatments, or diagnoses
|
||||
- Weight, diet, or physical characteristics over time
|
||||
- Veterinary recommendations or advice
|
||||
- Ryan's (the owner's) information related to Simba
|
||||
- Any factual information that would be found in documents
|
||||
|
||||
Args:
|
||||
query: The user's question or information need about Simba
|
||||
|
||||
Returns:
|
||||
Relevant information from Simba's documents
|
||||
"""
|
||||
print(f"[SIMBA SEARCH] Tool called with query: {query}")
|
||||
serialized, docs = await query_vector_store(query=query)
|
||||
print(f"[SIMBA SEARCH] Found {len(docs)} documents")
|
||||
print(f"[SIMBA SEARCH] Serialized result length: {len(serialized)}")
|
||||
print(f"[SIMBA SEARCH] First 200 chars: {serialized[:200]}")
|
||||
return serialized, docs
|
||||
|
||||
|
||||
main_agent = create_agent(model=openai_gpt_5_mini, tools=[simba_search])
|
||||
@@ -74,7 +74,7 @@ async def rename_conversation(
|
||||
|
||||
prompt = f"Summarize the following conversation into a sassy one-liner title:\n\n{messages}"
|
||||
response = structured_llm.invoke(prompt)
|
||||
new_name: str = response.get("title")
|
||||
new_name: str = response.get("title", "")
|
||||
conversation.name = new_name
|
||||
await conversation.save()
|
||||
return new_name
|
||||
|
||||
46
services/raggr/blueprints/rag/__init__.py
Normal file
46
services/raggr/blueprints/rag/__init__.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from quart import Blueprint, jsonify
|
||||
from quart_jwt_extended import jwt_refresh_token_required
|
||||
|
||||
from .logic import get_vector_store_stats, index_documents, vector_store
|
||||
|
||||
rag_blueprint = Blueprint("rag_api", __name__, url_prefix="/api/rag")
|
||||
|
||||
|
||||
@rag_blueprint.get("/stats")
|
||||
@jwt_refresh_token_required
|
||||
async def get_stats():
|
||||
"""Get vector store statistics."""
|
||||
stats = get_vector_store_stats()
|
||||
return jsonify(stats)
|
||||
|
||||
|
||||
@rag_blueprint.post("/index")
|
||||
@jwt_refresh_token_required
|
||||
async def trigger_index():
|
||||
"""Trigger indexing of documents from Paperless-NGX."""
|
||||
try:
|
||||
await index_documents()
|
||||
stats = get_vector_store_stats()
|
||||
return jsonify({"status": "success", "stats": stats})
|
||||
except Exception as e:
|
||||
return jsonify({"status": "error", "message": str(e)}), 500
|
||||
|
||||
|
||||
@rag_blueprint.post("/reindex")
|
||||
@jwt_refresh_token_required
|
||||
async def trigger_reindex():
|
||||
"""Clear and reindex all documents."""
|
||||
try:
|
||||
# Clear existing documents
|
||||
collection = vector_store._collection
|
||||
all_docs = collection.get()
|
||||
|
||||
if all_docs["ids"]:
|
||||
collection.delete(ids=all_docs["ids"])
|
||||
|
||||
# Reindex
|
||||
await index_documents()
|
||||
stats = get_vector_store_stats()
|
||||
return jsonify({"status": "success", "stats": stats})
|
||||
except Exception as e:
|
||||
return jsonify({"status": "error", "message": str(e)}), 500
|
||||
75
services/raggr/blueprints/rag/fetchers.py
Normal file
75
services/raggr/blueprints/rag/fetchers.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import httpx
|
||||
|
||||
|
||||
class PaperlessNGXService:
|
||||
def __init__(self):
|
||||
self.base_url = os.getenv("BASE_URL")
|
||||
self.token = os.getenv("PAPERLESS_TOKEN")
|
||||
self.url = f"http://{os.getenv('BASE_URL')}/api/documents/?tags__id=8"
|
||||
self.headers = {"Authorization": f"Token {os.getenv('PAPERLESS_TOKEN')}"}
|
||||
|
||||
def get_data(self):
|
||||
print(f"Getting data from: {self.url}")
|
||||
r = httpx.get(self.url, headers=self.headers)
|
||||
results = r.json()["results"]
|
||||
|
||||
nextLink = r.json().get("next")
|
||||
|
||||
while nextLink:
|
||||
r = httpx.get(nextLink, headers=self.headers)
|
||||
results += r.json()["results"]
|
||||
nextLink = r.json().get("next")
|
||||
|
||||
return results
|
||||
|
||||
def get_doc_by_id(self, doc_id: int):
|
||||
url = f"http://{os.getenv('BASE_URL')}/api/documents/{doc_id}/"
|
||||
r = httpx.get(url, headers=self.headers)
|
||||
return r.json()
|
||||
|
||||
def download_pdf_from_id(self, id: int) -> str:
|
||||
download_url = f"http://{os.getenv('BASE_URL')}/api/documents/{id}/download/"
|
||||
response = httpx.get(
|
||||
download_url, headers=self.headers, follow_redirects=True, timeout=30
|
||||
)
|
||||
response.raise_for_status()
|
||||
# Use a temporary file for the downloaded PDF
|
||||
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
|
||||
temp_file.write(response.content)
|
||||
temp_file.close()
|
||||
temp_pdf_path = temp_file.name
|
||||
pdf_to_process = temp_pdf_path
|
||||
return pdf_to_process
|
||||
|
||||
def upload_cleaned_content(self, document_id, data):
|
||||
PUTS_URL = f"http://{os.getenv('BASE_URL')}/api/documents/{document_id}/"
|
||||
r = httpx.put(PUTS_URL, headers=self.headers, data=data)
|
||||
r.raise_for_status()
|
||||
|
||||
def upload_description(self, description_filepath, file, title, exif_date: str):
|
||||
POST_URL = f"http://{os.getenv('BASE_URL')}/api/documents/post_document/"
|
||||
files = {"document": ("description_filepath", file, "application/txt")}
|
||||
data = {
|
||||
"title": title,
|
||||
"create": exif_date,
|
||||
"document_type": 3,
|
||||
"tags": [7],
|
||||
}
|
||||
|
||||
r = httpx.post(POST_URL, headers=self.headers, data=data, files=files)
|
||||
r.raise_for_status()
|
||||
|
||||
def get_tags(self):
|
||||
GET_URL = f"http://{os.getenv('BASE_URL')}/api/tags/"
|
||||
r = httpx.get(GET_URL, headers=self.headers)
|
||||
data = r.json()
|
||||
return {tag["id"]: tag["name"] for tag in data["results"]}
|
||||
|
||||
def get_doctypes(self):
|
||||
GET_URL = f"http://{os.getenv('BASE_URL')}/api/document_types/"
|
||||
r = httpx.get(GET_URL, headers=self.headers)
|
||||
data = r.json()
|
||||
return {doctype["id"]: doctype["name"] for doctype in data["results"]}
|
||||
101
services/raggr/blueprints/rag/logic.py
Normal file
101
services/raggr/blueprints/rag/logic.py
Normal file
@@ -0,0 +1,101 @@
|
||||
import datetime
|
||||
import os
|
||||
|
||||
from langchain_chroma import Chroma
|
||||
from langchain_core.documents import Document
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
|
||||
from .fetchers import PaperlessNGXService
|
||||
|
||||
embeddings = OpenAIEmbeddings(model="text-embedding-3-large")
|
||||
|
||||
vector_store = Chroma(
|
||||
collection_name="simba_docs",
|
||||
embedding_function=embeddings,
|
||||
persist_directory=os.getenv("CHROMADB_PATH", ""),
|
||||
)
|
||||
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=1000, # chunk size (characters)
|
||||
chunk_overlap=200, # chunk overlap (characters)
|
||||
add_start_index=True, # track index in original document
|
||||
)
|
||||
|
||||
|
||||
def date_to_epoch(date_str: str) -> float:
|
||||
split_date = date_str.split("-")
|
||||
date = datetime.datetime(
|
||||
int(split_date[0]),
|
||||
int(split_date[1]),
|
||||
int(split_date[2]),
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
)
|
||||
|
||||
return date.timestamp()
|
||||
|
||||
|
||||
async def fetch_documents_from_paperless_ngx() -> list[Document]:
|
||||
ppngx = PaperlessNGXService()
|
||||
data = ppngx.get_data()
|
||||
doctypes = ppngx.get_doctypes()
|
||||
documents = []
|
||||
for doc in data:
|
||||
metadata = {
|
||||
"created_date": date_to_epoch(doc["created_date"]),
|
||||
"filename": doc["original_file_name"],
|
||||
"document_type": doctypes.get(doc["document_type"], ""),
|
||||
}
|
||||
documents.append(Document(page_content=doc["content"], metadata=metadata))
|
||||
|
||||
return documents
|
||||
|
||||
|
||||
async def index_documents():
|
||||
documents = await fetch_documents_from_paperless_ngx()
|
||||
|
||||
splits = text_splitter.split_documents(documents)
|
||||
await vector_store.aadd_documents(documents=splits)
|
||||
|
||||
|
||||
async def query_vector_store(query: str):
|
||||
retrieved_docs = vector_store.similarity_search(query, k=2)
|
||||
serialized = "\n\n".join(
|
||||
(f"Source: {doc.metadata}\nContent: {doc.page_content}")
|
||||
for doc in retrieved_docs
|
||||
)
|
||||
return serialized, retrieved_docs
|
||||
|
||||
|
||||
def get_vector_store_stats():
|
||||
"""Get statistics about the vector store."""
|
||||
collection = vector_store._collection
|
||||
count = collection.count()
|
||||
return {
|
||||
"total_documents": count,
|
||||
"collection_name": collection.name,
|
||||
}
|
||||
|
||||
|
||||
def list_all_documents(limit: int = 10):
|
||||
"""List documents in the vector store with their metadata."""
|
||||
collection = vector_store._collection
|
||||
results = collection.get(limit=limit, include=["metadatas", "documents"])
|
||||
|
||||
documents = []
|
||||
for i, doc_id in enumerate(results["ids"]):
|
||||
documents.append(
|
||||
{
|
||||
"id": doc_id,
|
||||
"metadata": results["metadatas"][i]
|
||||
if results.get("metadatas")
|
||||
else None,
|
||||
"content_preview": results["documents"][i][:200]
|
||||
if results.get("documents")
|
||||
else None,
|
||||
}
|
||||
)
|
||||
|
||||
return documents
|
||||
0
services/raggr/blueprints/rag/models.py
Normal file
0
services/raggr/blueprints/rag/models.py
Normal file
Reference in New Issue
Block a user