2 Commits

Author SHA1 Message Date
Ryan Chen
b3793d2d32 Adding web search infra 2026-01-11 17:35:05 -05:00
Ryan Chen
733ffae8cf RAG optimizations 2026-01-11 09:36:36 -05:00
6 changed files with 100 additions and 18 deletions

View File

@@ -40,6 +40,7 @@ services:
- FLASK_ENV=development - FLASK_ENV=development
- PYTHONUNBUFFERED=1 - PYTHONUNBUFFERED=1
- NODE_ENV=development - NODE_ENV=development
- TAVILY_KEY=${TAVILIY_KEY}
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy

View File

@@ -12,7 +12,6 @@ from .agents import main_agent
from .logic import ( from .logic import (
add_message_to_conversation, add_message_to_conversation,
get_conversation_by_id, get_conversation_by_id,
get_conversation_transcript,
rename_conversation, rename_conversation,
) )
from .models import ( from .models import (
@@ -43,22 +42,29 @@ async def query():
user=user, user=user,
) )
transcript = await get_conversation_transcript(user=user, conversation=conversation) # Build conversation history from recent messages (last 10 for context)
recent_messages = (
conversation.messages[-10:]
if len(conversation.messages) > 10
else conversation.messages
)
transcript_prompt = f"Here is the message transcript thus far {transcript}." messages_payload = [
prompt = f"""Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive. {
{transcript_prompt if len(transcript) > 0 else ""} "role": "system",
Respond to this prompt: {query}""" "content": "You are a helpful cat assistant named Simba that understands veterinary terms. When there are questions to you specifically, they are referring to Simba the cat. Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.\n\nIMPORTANT: When users ask factual questions about Simba's health, medical history, veterinary visits, medications, weight, or any information that would be in documents, you MUST use the simba_search tool to retrieve accurate information before answering. Do not rely on general knowledge - always search the documents for factual questions.",
}
]
payload = { # Add recent conversation history
"messages": [ for msg in recent_messages[:-1]: # Exclude the message we just added
{ role = "user" if msg.speaker == "user" else "assistant"
"role": "system", messages_payload.append({"role": role, "content": msg.text})
"content": "You are a helpful cat assistant named Simba that understands veterinary terms. When there are questions to you specifically, they are referring to Simba the cat. Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.\n\nIMPORTANT: When users ask factual questions about Simba's health, medical history, veterinary visits, medications, weight, or any information that would be in documents, you MUST use the simba_search tool to retrieve accurate information before answering. Do not rely on general knowledge - always search the documents for factual questions.",
}, # Add current query
{"role": "user", "content": prompt}, messages_payload.append({"role": "user", "content": query})
]
} payload = {"messages": messages_payload}
response = await main_agent.ainvoke(payload) response = await main_agent.ainvoke(payload)
message = response.get("messages", [])[-1].content message = response.get("messages", [])[-1].content

View File

@@ -1,10 +1,52 @@
import os
from typing import cast
from langchain.agents import create_agent from langchain.agents import create_agent
from langchain.chat_models import BaseChatModel
from langchain.tools import tool from langchain.tools import tool
from langchain_ollama import ChatOllama
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from tavily import AsyncTavilyClient
from blueprints.rag.logic import query_vector_store from blueprints.rag.logic import query_vector_store
openai_gpt_5_mini = ChatOpenAI(model="gpt-5-mini") openai_gpt_5_mini = ChatOpenAI(model="gpt-5-mini")
ollama_deepseek = ChatOllama(model="llama3.1:8b", base_url=os.getenv("OLLAMA_URL"))
model_with_fallback = cast(
BaseChatModel, ollama_deepseek.with_fallbacks([openai_gpt_5_mini])
)
client = AsyncTavilyClient(os.getenv("TAVILY_KEY"), "")
@tool
async def web_search(query: str) -> str:
"""Search the web for current information using Tavily.
Use this tool when you need to:
- Find current information not in the knowledge base
- Look up recent events, news, or updates
- Verify facts or get additional context
- Search for information outside of Simba's documents
Args:
query: The search query to look up on the web
Returns:
Search results from the web with titles, content, and source URLs
"""
response = await client.search(query=query, search_depth="basic")
results = response.get("results", [])
if not results:
return "No results found for the query."
formatted = "\n\n".join(
[
f"**{result['title']}**\n{result['content']}\nSource: {result['url']}"
for result in results[:5]
]
)
return formatted
@tool(response_format="content_and_artifact") @tool(response_format="content_and_artifact")
@@ -33,4 +75,4 @@ async def simba_search(query: str):
return serialized, docs return serialized, docs
main_agent = create_agent(model=openai_gpt_5_mini, tools=[simba_search]) main_agent = create_agent(model=model_with_fallback, tools=[simba_search, web_search])

View File

@@ -8,7 +8,7 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter
from .fetchers import PaperlessNGXService from .fetchers import PaperlessNGXService
embeddings = OpenAIEmbeddings(model="text-embedding-3-large") embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
vector_store = Chroma( vector_store = Chroma(
collection_name="simba_docs", collection_name="simba_docs",
@@ -61,7 +61,7 @@ async def index_documents():
async def query_vector_store(query: str): async def query_vector_store(query: str):
retrieved_docs = vector_store.similarity_search(query, k=2) retrieved_docs = await vector_store.asimilarity_search(query, k=2)
serialized = "\n\n".join( serialized = "\n\n".join(
(f"Source: {doc.metadata}\nContent: {doc.page_content}") (f"Source: {doc.metadata}\nContent: {doc.page_content}")
for doc in retrieved_docs for doc in retrieved_docs

View File

@@ -34,6 +34,8 @@ dependencies = [
"langchain-chroma>=1.0.0", "langchain-chroma>=1.0.0",
"langchain-community>=0.4.1", "langchain-community>=0.4.1",
"jq>=1.10.0", "jq>=1.10.0",
"langchain-ollama>=1.0.1",
"tavily-python>=0.7.17",
] ]
[tool.aerich] [tool.aerich]

31
services/raggr/uv.lock generated
View File

@@ -1281,6 +1281,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/83/bd/9df897cbc98290bf71140104ee5b9777cf5291afb80333aa7da5a497339b/langchain_core-1.2.5-py3-none-any.whl", hash = "sha256:3255944ef4e21b2551facb319bfc426057a40247c0a05de5bd6f2fc021fbfa34", size = 484851, upload-time = "2025-12-22T23:45:30.525Z" }, { url = "https://files.pythonhosted.org/packages/83/bd/9df897cbc98290bf71140104ee5b9777cf5291afb80333aa7da5a497339b/langchain_core-1.2.5-py3-none-any.whl", hash = "sha256:3255944ef4e21b2551facb319bfc426057a40247c0a05de5bd6f2fc021fbfa34", size = 484851, upload-time = "2025-12-22T23:45:30.525Z" },
] ]
[[package]]
name = "langchain-ollama"
version = "1.0.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "langchain-core" },
{ name = "ollama" },
]
sdist = { url = "https://files.pythonhosted.org/packages/73/51/72cd04d74278f3575f921084f34280e2f837211dc008c9671c268c578afe/langchain_ollama-1.0.1.tar.gz", hash = "sha256:e37880c2f41cdb0895e863b1cfd0c2c840a117868b3f32e44fef42569e367443", size = 153850, upload-time = "2025-12-12T21:48:28.68Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e3/46/f2907da16dc5a5a6c679f83b7de21176178afad8d2ca635a581429580ef6/langchain_ollama-1.0.1-py3-none-any.whl", hash = "sha256:37eb939a4718a0255fe31e19fbb0def044746c717b01b97d397606ebc3e9b440", size = 29207, upload-time = "2025-12-12T21:48:27.832Z" },
]
[[package]] [[package]]
name = "langchain-openai" name = "langchain-openai"
version = "1.1.6" version = "1.1.6"
@@ -2521,6 +2534,7 @@ dependencies = [
{ name = "langchain" }, { name = "langchain" },
{ name = "langchain-chroma" }, { name = "langchain-chroma" },
{ name = "langchain-community" }, { name = "langchain-community" },
{ name = "langchain-ollama" },
{ name = "langchain-openai" }, { name = "langchain-openai" },
{ name = "ollama" }, { name = "ollama" },
{ name = "openai" }, { name = "openai" },
@@ -2533,6 +2547,7 @@ dependencies = [
{ name = "python-dotenv" }, { name = "python-dotenv" },
{ name = "quart" }, { name = "quart" },
{ name = "quart-jwt-extended" }, { name = "quart-jwt-extended" },
{ name = "tavily-python" },
{ name = "tomlkit" }, { name = "tomlkit" },
{ name = "tortoise-orm" }, { name = "tortoise-orm" },
{ name = "tortoise-orm-stubs" }, { name = "tortoise-orm-stubs" },
@@ -2554,6 +2569,7 @@ requires-dist = [
{ name = "langchain", specifier = ">=1.2.0" }, { name = "langchain", specifier = ">=1.2.0" },
{ name = "langchain-chroma", specifier = ">=1.0.0" }, { name = "langchain-chroma", specifier = ">=1.0.0" },
{ name = "langchain-community", specifier = ">=0.4.1" }, { name = "langchain-community", specifier = ">=0.4.1" },
{ name = "langchain-ollama", specifier = ">=1.0.1" },
{ name = "langchain-openai", specifier = ">=1.1.6" }, { name = "langchain-openai", specifier = ">=1.1.6" },
{ name = "ollama", specifier = ">=0.6.0" }, { name = "ollama", specifier = ">=0.6.0" },
{ name = "openai", specifier = ">=2.0.1" }, { name = "openai", specifier = ">=2.0.1" },
@@ -2566,6 +2582,7 @@ requires-dist = [
{ name = "python-dotenv", specifier = ">=1.0.0" }, { name = "python-dotenv", specifier = ">=1.0.0" },
{ name = "quart", specifier = ">=0.20.0" }, { name = "quart", specifier = ">=0.20.0" },
{ name = "quart-jwt-extended", specifier = ">=0.1.0" }, { name = "quart-jwt-extended", specifier = ">=0.1.0" },
{ name = "tavily-python", specifier = ">=0.7.17" },
{ name = "tomlkit", specifier = ">=0.13.3" }, { name = "tomlkit", specifier = ">=0.13.3" },
{ name = "tortoise-orm", specifier = ">=0.25.1" }, { name = "tortoise-orm", specifier = ">=0.25.1" },
{ name = "tortoise-orm-stubs", specifier = ">=1.0.2" }, { name = "tortoise-orm-stubs", specifier = ">=1.0.2" },
@@ -2847,6 +2864,20 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" },
] ]
[[package]]
name = "tavily-python"
version = "0.7.17"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "httpx" },
{ name = "requests" },
{ name = "tiktoken" },
]
sdist = { url = "https://files.pythonhosted.org/packages/5f/eb/d7371ee68119380ab6561c6998eacf3031327ba89c6081d36128ab4a2184/tavily_python-0.7.17.tar.gz", hash = "sha256:437ba064639dfdce1acdbc37cbb73246abe500ab735e988a4b8698a8d5fb7df7", size = 21321, upload-time = "2025-12-17T17:08:39.3Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c7/ce/88565f0c9f7654bc90e19f1e76b3bffee7ff9c1741a2124ec2f2900fb080/tavily_python-0.7.17-py3-none-any.whl", hash = "sha256:a2725b9cba71e404e73d19ff277df916283c10100137c336e07f8e1bd7789fcf", size = 18214, upload-time = "2025-12-17T17:08:38.442Z" },
]
[[package]] [[package]]
name = "tenacity" name = "tenacity"
version = "9.1.2" version = "9.1.2"