6 Commits

Author SHA1 Message Date
ryan
97be5262a8 new feature 2026-03-03 08:23:31 -05:00
ryan
86cc269b3a yeet 2026-03-03 08:23:31 -05:00
ryan
0e3684031b Merge pull request 'Replace blue/indigo sidebar colors with warm stone neutrals' (#15) from worktree-crispy-whistling-snowglobe into main
Reviewed-on: #15
2026-03-03 08:19:32 -05:00
ryan
6d7d713532 Replace blue/indigo sidebar colors with warm stone neutrals
Align ConversationList colors with Anthropic design guidelines,
replacing indigo-300/blue-400 with stone-200/stone-300.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-03 08:18:08 -05:00
Ryan Chen
6ae36b51a0 ynab update 2026-01-31 22:47:43 -05:00
ryan
f0f72cce36 Merge pull request 'Replace Ollama with llama-server (OpenAI-compatible API)' (#14) from feature/llama-cpp-integration into main
Reviewed-on: #14
2026-01-31 21:41:19 -05:00
33 changed files with 2649 additions and 257 deletions

View File

@@ -27,6 +27,9 @@ CHROMADB_PATH=./data/chromadb
# OpenAI Configuration # OpenAI Configuration
OPENAI_API_KEY=your-openai-api-key OPENAI_API_KEY=your-openai-api-key
# Tavily Configuration (for web search)
TAVILY_API_KEY=your-tavily-api-key
# Immich Configuration # Immich Configuration
IMMICH_URL=http://192.168.1.5:2283 IMMICH_URL=http://192.168.1.5:2283
IMMICH_API_KEY=your-immich-api-key IMMICH_API_KEY=your-immich-api-key
@@ -45,3 +48,38 @@ OIDC_USE_DISCOVERY=true
# OIDC_TOKEN_ENDPOINT=https://auth.example.com/api/oidc/token # OIDC_TOKEN_ENDPOINT=https://auth.example.com/api/oidc/token
# OIDC_USERINFO_ENDPOINT=https://auth.example.com/api/oidc/userinfo # OIDC_USERINFO_ENDPOINT=https://auth.example.com/api/oidc/userinfo
# OIDC_JWKS_URI=https://auth.example.com/api/oidc/jwks # OIDC_JWKS_URI=https://auth.example.com/api/oidc/jwks
# YNAB Configuration
# Get your Personal Access Token from https://app.ynab.com/settings/developer
YNAB_ACCESS_TOKEN=your-ynab-personal-access-token
# Optional: Specify a budget ID, or leave empty to use the default/first budget
YNAB_BUDGET_ID=
# Twilio Configuration (WhatsApp)
TWILIO_ACCOUNT_SID=your-twilio-account-sid
TWILIO_AUTH_TOKEN=your-twilio-auth-token
TWILIO_WHATSAPP_NUMBER=whatsapp:+14155238886
# Comma-separated list of WhatsApp numbers allowed to use the service (e.g., whatsapp:+1234567890)
# Use * to allow any number
ALLOWED_WHATSAPP_NUMBERS=
# Set to false to disable Twilio signature validation in development
TWILIO_SIGNATURE_VALIDATION=true
# If behind a reverse proxy, set this to your public webhook URL so signature validation works
# TWILIO_WEBHOOK_URL=https://your-domain.com/api/whatsapp/webhook
# Rate limiting: max messages per window (default: 10 messages per 60 seconds)
# WHATSAPP_RATE_LIMIT_MAX=10
# WHATSAPP_RATE_LIMIT_WINDOW=60
# Obsidian Configuration (headless sync)
# Auth token from Obsidian account (Settings → Account → API token)
OBSIDIAN_AUTH_TOKEN=your-obsidian-auth-token
# Vault ID to sync (found in Obsidian sync settings)
OBSIDIAN_VAULT_ID=your-vault-id
# End-to-end encryption password (if vault uses E2E encryption)
OBSIDIAN_E2E_PASSWORD=
# Device name shown in Obsidian sync activity
OBSIDIAN_DEVICE_NAME=simbarag
# Set to true to run continuous sync in the background
OBSIDIAN_CONTINUOUS_SYNC=false
# Local path to Obsidian vault (where files are synced)
OBSIDIAN_VAULT_PATH=/app/data/obsidian

1
.envrc Normal file
View File

@@ -0,0 +1 @@
dotenv_if_exists

3
.gitignore vendored
View File

@@ -18,3 +18,6 @@ chromadb_openai/
chroma_db/ chroma_db/
database/ database/
*.db *.db
obvault/
.claude

View File

@@ -11,21 +11,21 @@ SimbaRAG is a RAG (Retrieval-Augmented Generation) conversational AI system for
### Development ### Development
```bash ```bash
# Start dev environment with hot reload # Start environment
docker compose -f docker-compose.dev.yml up --build docker compose up --build
# View logs # View logs
docker compose -f docker-compose.dev.yml logs -f raggr docker compose logs -f raggr
``` ```
### Database Migrations (Aerich/Tortoise ORM) ### Database Migrations (Aerich/Tortoise ORM)
```bash ```bash
# Generate migration (must run in Docker with DB access) # Generate migration (must run in Docker with DB access)
docker compose -f docker-compose.dev.yml exec raggr aerich migrate --name describe_change docker compose exec raggr aerich migrate --name describe_change
# Apply migrations (auto-runs on startup, manual if needed) # Apply migrations (auto-runs on startup, manual if needed)
docker compose -f docker-compose.dev.yml exec raggr aerich upgrade docker compose exec raggr aerich upgrade
# View migration history # View migration history
docker compose exec raggr aerich history docker compose exec raggr aerich history

View File

@@ -6,9 +6,9 @@ WORKDIR /app
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
build-essential \ build-essential \
curl \ curl \
&& curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \ && curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
&& apt-get install -y nodejs \ && apt-get install -y nodejs \
&& npm install -g yarn \ && npm install -g yarn obsidian-headless \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& curl -LsSf https://astral.sh/uv/install.sh | sh && curl -LsSf https://astral.sh/uv/install.sh | sh

View File

@@ -1,4 +1,8 @@
import os import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Database configuration with environment variable support # Database configuration with environment variable support
# Use DATABASE_PATH for relative paths or DATABASE_URL for full connection strings # Use DATABASE_PATH for relative paths or DATABASE_URL for full connection strings

27
app.py
View File

@@ -1,16 +1,25 @@
import logging
import os import os
from dotenv import load_dotenv
from quart import Quart, jsonify, render_template, request, send_from_directory from quart import Quart, jsonify, render_template, request, send_from_directory
from quart_jwt_extended import JWTManager, get_jwt_identity, jwt_refresh_token_required from quart_jwt_extended import JWTManager, get_jwt_identity, jwt_refresh_token_required
from tortoise.contrib.quart import register_tortoise from tortoise import Tortoise
import blueprints.conversation import blueprints.conversation
import blueprints.conversation.logic import blueprints.conversation.logic
import blueprints.rag import blueprints.rag
import blueprints.users import blueprints.users
import blueprints.whatsapp
import blueprints.users.models import blueprints.users.models
from main import consult_simba_oracle from main import consult_simba_oracle
# Load environment variables
load_dotenv()
# Configure logging
logging.basicConfig(level=logging.INFO)
app = Quart( app = Quart(
__name__, __name__,
static_folder="raggr-frontend/dist/static", static_folder="raggr-frontend/dist/static",
@@ -24,6 +33,7 @@ jwt = JWTManager(app)
app.register_blueprint(blueprints.users.user_blueprint) app.register_blueprint(blueprints.users.user_blueprint)
app.register_blueprint(blueprints.conversation.conversation_blueprint) app.register_blueprint(blueprints.conversation.conversation_blueprint)
app.register_blueprint(blueprints.rag.rag_blueprint) app.register_blueprint(blueprints.rag.rag_blueprint)
app.register_blueprint(blueprints.whatsapp.whatsapp_blueprint)
# Database configuration with environment variable support # Database configuration with environment variable support
@@ -44,12 +54,15 @@ TORTOISE_CONFIG = {
}, },
} }
# Initialize Tortoise ORM # Initialize Tortoise ORM with lifecycle hooks
register_tortoise( @app.while_serving
app, async def lifespan():
config=TORTOISE_CONFIG, logging.info("Initializing Tortoise ORM...")
generate_schemas=False, # Disabled - using Aerich for migrations await Tortoise.init(config=TORTOISE_CONFIG)
) logging.info("Tortoise ORM initialized successfully")
yield
logging.info("Closing Tortoise ORM connections...")
await Tortoise.close_connections()
# Serve React static files # Serve React static files

View File

@@ -1,6 +1,7 @@
import datetime import datetime
import json
from quart import Blueprint, jsonify, request from quart import Blueprint, jsonify, make_response, request
from quart_jwt_extended import ( from quart_jwt_extended import (
get_jwt_identity, get_jwt_identity,
jwt_refresh_token_required, jwt_refresh_token_required,
@@ -24,35 +25,7 @@ conversation_blueprint = Blueprint(
"conversation_api", __name__, url_prefix="/api/conversation" "conversation_api", __name__, url_prefix="/api/conversation"
) )
_SYSTEM_PROMPT = """You are a helpful cat assistant named Simba that understands veterinary terms. When there are questions to you specifically, they are referring to Simba the cat. Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.
@conversation_blueprint.post("/query")
@jwt_refresh_token_required
async def query():
current_user_uuid = get_jwt_identity()
user = await blueprints.users.models.User.get(id=current_user_uuid)
data = await request.get_json()
query = data.get("query")
conversation_id = data.get("conversation_id")
conversation = await get_conversation_by_id(conversation_id)
await conversation.fetch_related("messages")
await add_message_to_conversation(
conversation=conversation,
message=query,
speaker="user",
user=user,
)
# Build conversation history from recent messages (last 10 for context)
recent_messages = (
conversation.messages[-10:]
if len(conversation.messages) > 10
else conversation.messages
)
messages_payload = [
{
"role": "system",
"content": """You are a helpful cat assistant named Simba that understands veterinary terms. When there are questions to you specifically, they are referring to Simba the cat. Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.
SIMBA FACTS (as of January 2026): SIMBA FACTS (as of January 2026):
- Name: Simba - Name: Simba
@@ -84,18 +57,65 @@ Upcoming Appointments:
- Routine Examination: Due 6/1/2026 - Routine Examination: Due 6/1/2026
- FVRCP-3yr Vaccine: Due 10/2/2026 - FVRCP-3yr Vaccine: Due 10/2/2026
IMPORTANT: When users ask factual questions about Simba's health, medical history, veterinary visits, medications, weight, or any information that would be in documents, you MUST use the simba_search tool to retrieve accurate information before answering. Do not rely on general knowledge - always search the documents for factual questions.""", IMPORTANT: When users ask factual questions about Simba's health, medical history, veterinary visits, medications, weight, or any information that would be in documents, you MUST use the simba_search tool to retrieve accurate information before answering. Do not rely on general knowledge - always search the documents for factual questions.
}
]
# Add recent conversation history BUDGET & FINANCE (YNAB Integration):
You have access to Ryan's budget data through YNAB (You Need A Budget). When users ask about financial matters, use the appropriate YNAB tools:
- Use ynab_budget_summary for overall budget health and status questions
- Use ynab_search_transactions to find specific purchases or spending at particular stores
- Use ynab_category_spending to analyze spending by category for a month
- Use ynab_insights to provide spending trends, patterns, and recommendations
Always use these tools when asked about budgets, spending, transactions, or financial health.
NOTES & RESEARCH (Obsidian Integration):
You have access to Ryan's Obsidian vault through the Obsidian integration. When users ask about research, personal notes, or information that might be stored in markdown files, use the appropriate Obsidian tools:
- Use obsidian_search_notes to search through your vault for relevant information
- Use obsidian_read_note to read the full content of a specific note by path
- Use obsidian_create_note to save new findings, ideas, or research to your vault
- Use obsidian_create_task to create task notes with due dates
Always use these tools when users ask about notes, research, ideas, tasks, or when you want to save information for future reference.
DAILY JOURNAL (Task Tracking):
You have access to Ryan's daily journal notes. Each note lives at journal/YYYY/YYYY-MM-DD.md and has two sections: tasks and log.
- Use journal_get_today to read today's full daily note (tasks + log)
- Use journal_get_tasks to list tasks (done/pending) for today or a specific date
- Use journal_add_task to add a new task to today's (or a given date's) note
- Use journal_complete_task to check off a task as done
Use these tools when Ryan asks about today's tasks, wants to add something to his list, or wants to mark a task complete."""
def _build_messages_payload(conversation, query_text: str) -> list:
recent_messages = (
conversation.messages[-10:]
if len(conversation.messages) > 10
else conversation.messages
)
messages_payload = [{"role": "system", "content": _SYSTEM_PROMPT}]
for msg in recent_messages[:-1]: # Exclude the message we just added for msg in recent_messages[:-1]: # Exclude the message we just added
role = "user" if msg.speaker == "user" else "assistant" role = "user" if msg.speaker == "user" else "assistant"
messages_payload.append({"role": role, "content": msg.text}) messages_payload.append({"role": role, "content": msg.text})
messages_payload.append({"role": "user", "content": query_text})
return messages_payload
# Add current query
messages_payload.append({"role": "user", "content": query})
@conversation_blueprint.post("/query")
@jwt_refresh_token_required
async def query():
current_user_uuid = get_jwt_identity()
user = await blueprints.users.models.User.get(id=current_user_uuid)
data = await request.get_json()
query = data.get("query")
conversation_id = data.get("conversation_id")
conversation = await get_conversation_by_id(conversation_id)
await conversation.fetch_related("messages")
await add_message_to_conversation(
conversation=conversation,
message=query,
speaker="user",
user=user,
)
messages_payload = _build_messages_payload(conversation, query)
payload = {"messages": messages_payload} payload = {"messages": messages_payload}
response = await main_agent.ainvoke(payload) response = await main_agent.ainvoke(payload)
@@ -109,6 +129,75 @@ IMPORTANT: When users ask factual questions about Simba's health, medical histor
return jsonify({"response": message}) return jsonify({"response": message})
@conversation_blueprint.post("/stream-query")
@jwt_refresh_token_required
async def stream_query():
current_user_uuid = get_jwt_identity()
user = await blueprints.users.models.User.get(id=current_user_uuid)
data = await request.get_json()
query_text = data.get("query")
conversation_id = data.get("conversation_id")
conversation = await get_conversation_by_id(conversation_id)
await conversation.fetch_related("messages")
await add_message_to_conversation(
conversation=conversation,
message=query_text,
speaker="user",
user=user,
)
messages_payload = _build_messages_payload(conversation, query_text)
payload = {"messages": messages_payload}
async def event_generator():
final_message = None
try:
async for event in main_agent.astream_events(payload, version="v2"):
event_type = event.get("event")
if event_type == "on_tool_start":
yield f"data: {json.dumps({'type': 'tool_start', 'tool': event['name']})}\n\n"
elif event_type == "on_tool_end":
yield f"data: {json.dumps({'type': 'tool_end', 'tool': event['name']})}\n\n"
elif event_type == "on_chain_end":
output = event.get("data", {}).get("output")
if isinstance(output, dict):
msgs = output.get("messages", [])
if msgs:
last_msg = msgs[-1]
content = getattr(last_msg, "content", None)
if isinstance(content, str) and content:
final_message = content
except Exception as e:
yield f"data: {json.dumps({'type': 'error', 'message': str(e)})}\n\n"
if final_message:
await add_message_to_conversation(
conversation=conversation,
message=final_message,
speaker="simba",
user=user,
)
yield f"data: {json.dumps({'type': 'response', 'message': final_message})}\n\n"
else:
yield f"data: {json.dumps({'type': 'error', 'message': 'No response generated'})}\n\n"
yield "data: [DONE]\n\n"
return await make_response(
event_generator(),
200,
{
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"X-Accel-Buffering": "no",
},
)
@conversation_blueprint.route("/<conversation_id>") @conversation_blueprint.route("/<conversation_id>")
@jwt_refresh_token_required @jwt_refresh_token_required
async def get_conversation(conversation_id: str): async def get_conversation(conversation_id: str):

View File

@@ -1,6 +1,7 @@
import os import os
from typing import cast from typing import cast
from dotenv import load_dotenv
from langchain.agents import create_agent from langchain.agents import create_agent
from langchain.chat_models import BaseChatModel from langchain.chat_models import BaseChatModel
from langchain.tools import tool from langchain.tools import tool
@@ -8,6 +9,11 @@ from langchain_openai import ChatOpenAI
from tavily import AsyncTavilyClient from tavily import AsyncTavilyClient
from blueprints.rag.logic import query_vector_store from blueprints.rag.logic import query_vector_store
from utils.obsidian_service import ObsidianService
from utils.ynab_service import YNABService
# Load environment variables
load_dotenv()
# Configure LLM with llama-server or OpenAI fallback # Configure LLM with llama-server or OpenAI fallback
llama_url = os.getenv("LLAMA_SERVER_URL") llama_url = os.getenv("LLAMA_SERVER_URL")
@@ -25,7 +31,41 @@ model_with_fallback = cast(
BaseChatModel, BaseChatModel,
llama_chat.with_fallbacks([openai_fallback]) if llama_chat else openai_fallback, llama_chat.with_fallbacks([openai_fallback]) if llama_chat else openai_fallback,
) )
client = AsyncTavilyClient(os.getenv("TAVILY_KEY"), "") client = AsyncTavilyClient(api_key=os.getenv("TAVILY_API_KEY", ""))
# Initialize YNAB service (will only work if YNAB_ACCESS_TOKEN is set)
try:
ynab_service = YNABService()
ynab_enabled = True
except (ValueError, Exception) as e:
print(f"YNAB service not initialized: {e}")
ynab_enabled = False
# Initialize Obsidian service (will only work if OBSIDIAN_VAULT_PATH is set)
try:
obsidian_service = ObsidianService()
obsidian_enabled = True
except (ValueError, Exception) as e:
print(f"Obsidian service not initialized: {e}")
obsidian_enabled = False
@tool
def get_current_date() -> str:
"""Get today's date in a human-readable format.
Use this tool when you need to:
- Reference today's date in your response
- Answer questions like "what is today's date"
- Format dates in messages or documents
- Calculate time periods relative to today
Returns:
Today's date in YYYY-MM-DD format
"""
from datetime import date
return date.today().isoformat()
@tool @tool
@@ -85,4 +125,494 @@ async def simba_search(query: str):
return serialized, docs return serialized, docs
main_agent = create_agent(model=model_with_fallback, tools=[simba_search, web_search]) @tool
def ynab_budget_summary() -> str:
"""Get overall budget summary and health status from YNAB.
Use this tool when the user asks about:
- Overall budget health or status
- How much money is to be budgeted
- Total budget amounts or spending
- General budget overview questions
Returns:
Summary of budget health, to-be-budgeted amount, total budgeted,
total activity, and available amounts.
"""
if not ynab_enabled:
return "YNAB integration is not configured. Please set YNAB_ACCESS_TOKEN environment variable."
try:
summary = ynab_service.get_budget_summary()
return summary["summary"]
except Exception as e:
return f"Error fetching budget summary: {str(e)}"
@tool
def ynab_search_transactions(
start_date: str = "",
end_date: str = "",
category_name: str = "",
payee_name: str = "",
) -> str:
"""Search YNAB transactions by date range, category, or payee.
Use this tool when the user asks about:
- Specific transactions or purchases
- Spending at a particular store or payee
- Transactions in a specific category
- What was spent during a time period
Args:
start_date: Start date in YYYY-MM-DD format (optional, defaults to 30 days ago)
end_date: End date in YYYY-MM-DD format (optional, defaults to today)
category_name: Filter by category name (optional, partial match)
payee_name: Filter by payee/store name (optional, partial match)
Returns:
List of matching transactions with dates, amounts, categories, and payees.
"""
if not ynab_enabled:
return "YNAB integration is not configured. Please set YNAB_ACCESS_TOKEN environment variable."
try:
result = ynab_service.get_transactions(
start_date=start_date or None,
end_date=end_date or None,
category_name=category_name or None,
payee_name=payee_name or None,
)
if result["count"] == 0:
return "No transactions found matching the specified criteria."
# Format transactions for readability
txn_list = []
for txn in result["transactions"][:10]: # Limit to 10 for readability
txn_list.append(
f"- {txn['date']}: {txn['payee']} - ${abs(txn['amount']):.2f} ({txn['category'] or 'Uncategorized'})"
)
return (
f"Found {result['count']} transactions from {result['start_date']} to {result['end_date']}. "
f"Total: ${abs(result['total_amount']):.2f}\n\n"
+ "\n".join(txn_list)
+ (
f"\n\n(Showing first 10 of {result['count']} transactions)"
if result["count"] > 10
else ""
)
)
except Exception as e:
return f"Error searching transactions: {str(e)}"
@tool
def ynab_category_spending(month: str = "") -> str:
"""Get spending breakdown by category for a specific month.
Use this tool when the user asks about:
- Spending by category
- What categories were overspent
- Monthly spending breakdown
- Budget vs actual spending for a month
Args:
month: Month in YYYY-MM format (optional, defaults to current month)
Returns:
Spending breakdown by category with budgeted, spent, and available amounts.
"""
if not ynab_enabled:
return "YNAB integration is not configured. Please set YNAB_ACCESS_TOKEN environment variable."
try:
result = ynab_service.get_category_spending(month=month or None)
summary = (
f"Budget spending for {result['month']}:\n"
f"Total budgeted: ${result['total_budgeted']:.2f}\n"
f"Total spent: ${result['total_spent']:.2f}\n"
f"Total available: ${result['total_available']:.2f}\n"
)
if result["overspent_categories"]:
summary += (
f"\nOverspent categories ({len(result['overspent_categories'])}):\n"
)
for cat in result["overspent_categories"][:5]:
summary += f"- {cat['name']}: Budgeted ${cat['budgeted']:.2f}, Spent ${cat['spent']:.2f}, Over by ${cat['overspent_by']:.2f}\n"
# Add top spending categories
summary += "\nTop spending categories:\n"
for cat in result["categories"][:10]:
if cat["activity"] < 0: # Only show spending (negative activity)
summary += f"- {cat['category']}: ${abs(cat['activity']):.2f} (budgeted: ${cat['budgeted']:.2f}, available: ${cat['available']:.2f})\n"
return summary
except Exception as e:
return f"Error fetching category spending: {str(e)}"
@tool
def ynab_insights(months_back: int = 3) -> str:
"""Generate insights about spending patterns and budget health over time.
Use this tool when the user asks about:
- Spending trends or patterns
- Budget recommendations
- Which categories are frequently overspent
- How current spending compares to past months
- Overall budget health analysis
Args:
months_back: Number of months to analyze (default 3, max 6)
Returns:
Insights about spending trends, frequently overspent categories,
and personalized recommendations.
"""
if not ynab_enabled:
return "YNAB integration is not configured. Please set YNAB_ACCESS_TOKEN environment variable."
try:
# Limit to reasonable range
months_back = min(max(1, months_back), 6)
result = ynab_service.get_spending_insights(months_back=months_back)
if "error" in result:
return result["error"]
summary = (
f"Spending insights for the last {months_back} months:\n\n"
f"Average monthly spending: ${result['average_monthly_spending']:.2f}\n"
f"Current month spending: ${result['current_month_spending']:.2f}\n"
f"Spending trend: {result['spending_trend']}\n"
)
if result["frequently_overspent_categories"]:
summary += "\nFrequently overspent categories:\n"
for cat in result["frequently_overspent_categories"][:5]:
summary += f"- {cat['category']}: overspent in {cat['months_overspent']} of {months_back} months\n"
if result["recommendations"]:
summary += "\nRecommendations:\n"
for rec in result["recommendations"]:
summary += f"- {rec}\n"
return summary
except Exception as e:
return f"Error generating insights: {str(e)}"
@tool
async def obsidian_search_notes(query: str) -> str:
"""Search through Obsidian vault notes for information.
Use this tool when you need to:
- Find information in personal notes
- Research past ideas or thoughts from your vault
- Look up information stored in markdown files
- Search for content that would be in your notes
Args:
query: The search query to look up in your Obsidian vault
Returns:
Relevant notes with their content and metadata
"""
if not obsidian_enabled:
return "Obsidian integration is not configured. Please set OBSIDIAN_VAULT_PATH environment variable."
try:
# Query ChromaDB for obsidian documents
serialized, docs = await query_vector_store(query=query)
return serialized
except Exception as e:
return f"Error searching Obsidian notes: {str(e)}"
@tool
async def obsidian_read_note(relative_path: str) -> str:
"""Read a specific note from your Obsidian vault.
Use this tool when you want to:
- Read the full content of a specific note
- Get detailed information from a particular markdown file
- Access content from a known note path
Args:
relative_path: Path to note relative to vault root (e.g., "notes/my-note.md")
Returns:
Full content and metadata of the requested note
"""
if not obsidian_enabled:
return "Obsidian integration is not configured. Please set OBSIDIAN_VAULT_PATH environment variable."
try:
note = obsidian_service.read_note(relative_path)
content_data = note["content"]
result = f"File: {note['path']}\n\n"
result += f"Frontmatter:\n{content_data['metadata']}\n\n"
result += f"Content:\n{content_data['content']}\n\n"
result += f"Tags: {', '.join(content_data['tags'])}\n"
result += f"Contains {len(content_data['wikilinks'])} wikilinks and {len(content_data['embeds'])} embeds"
return result
except FileNotFoundError:
return f"Note not found at '{relative_path}'. Please check the path is correct."
except Exception as e:
return f"Error reading note: {str(e)}"
@tool
async def obsidian_create_note(
title: str,
content: str,
folder: str = "notes",
tags: str = "",
) -> str:
"""Create a new note in your Obsidian vault.
Use this tool when you want to:
- Save research findings or ideas to your vault
- Create a new document with a specific title
- Write notes for future reference
Args:
title: The title of the note (will be used as filename)
content: The body content of the note
folder: The folder where to create the note (default: "notes")
tags: Comma-separated list of tags to add (default: "")
Returns:
Path to the created note
"""
if not obsidian_enabled:
return "Obsidian integration is not configured. Please set OBSIDIAN_VAULT_PATH environment variable."
try:
# Parse tags from comma-separated string
tag_list = [tag.strip() for tag in tags.split(",") if tag.strip()]
relative_path = obsidian_service.create_note(
title=title,
content=content,
folder=folder,
tags=tag_list,
)
return f"Successfully created note: {relative_path}"
except Exception as e:
return f"Error creating note: {str(e)}"
@tool
def journal_get_today() -> str:
"""Get today's daily journal note, including all tasks and log entries.
Use this tool when the user asks about:
- What's on their plate today
- Today's tasks or to-do list
- Today's journal entry
- What they've logged today
Returns:
The full content of today's daily note, or a message if it doesn't exist.
"""
if not obsidian_enabled:
return "Obsidian integration is not configured."
try:
note = obsidian_service.get_daily_note()
if not note["found"]:
return f"No daily note found for {note['date']}. Use journal_add_task to create one."
return f"Daily note for {note['date']}:\n\n{note['content']}"
except Exception as e:
return f"Error reading daily note: {str(e)}"
@tool
def journal_get_tasks(date: str = "") -> str:
"""Get tasks from a daily journal note.
Use this tool when the user asks about:
- Open or pending tasks for a day
- What tasks are done or not done
- Task status for today or a specific date
Args:
date: Date in YYYY-MM-DD format (optional, defaults to today)
Returns:
List of tasks with their completion status.
"""
if not obsidian_enabled:
return "Obsidian integration is not configured."
try:
from datetime import datetime as dt
parsed_date = dt.strptime(date, "%Y-%m-%d") if date else None
result = obsidian_service.get_daily_tasks(parsed_date)
if not result["found"]:
return f"No daily note found for {result['date']}."
if not result["tasks"]:
return f"No tasks found in the {result['date']} note."
lines = [f"Tasks for {result['date']}:"]
for task in result["tasks"]:
status = "[x]" if task["done"] else "[ ]"
lines.append(f"- {status} {task['text']}")
return "\n".join(lines)
except Exception as e:
return f"Error reading tasks: {str(e)}"
@tool
def journal_add_task(task: str, date: str = "") -> str:
"""Add a task to a daily journal note.
Use this tool when the user wants to:
- Add a task or to-do to today's note
- Remind themselves to do something
- Track a new item in their daily note
Args:
task: The task description to add
date: Date in YYYY-MM-DD format (optional, defaults to today)
Returns:
Confirmation of the added task.
"""
if not obsidian_enabled:
return "Obsidian integration is not configured."
try:
from datetime import datetime as dt
parsed_date = dt.strptime(date, "%Y-%m-%d") if date else None
result = obsidian_service.add_task_to_daily_note(task, parsed_date)
if result["success"]:
note_date = date or dt.now().strftime("%Y-%m-%d")
extra = " (created new note)" if result["created_note"] else ""
return f"Added task '{task}' to {note_date}{extra}."
return "Failed to add task."
except Exception as e:
return f"Error adding task: {str(e)}"
@tool
def journal_complete_task(task: str, date: str = "") -> str:
"""Mark a task as complete in a daily journal note.
Use this tool when the user wants to:
- Check off a task as done
- Mark something as completed
- Update task status in their daily note
Args:
task: The task text to mark complete (exact or partial match)
date: Date in YYYY-MM-DD format (optional, defaults to today)
Returns:
Confirmation that the task was marked complete.
"""
if not obsidian_enabled:
return "Obsidian integration is not configured."
try:
from datetime import datetime as dt
parsed_date = dt.strptime(date, "%Y-%m-%d") if date else None
result = obsidian_service.complete_task_in_daily_note(task, parsed_date)
if result["success"]:
return f"Marked '{result['completed_task']}' as complete."
return f"Could not complete task: {result.get('error', 'unknown error')}"
except Exception as e:
return f"Error completing task: {str(e)}"
@tool
async def obsidian_create_task(
title: str,
content: str = "",
folder: str = "tasks",
due_date: str = "",
tags: str = "",
) -> str:
"""Create a new task note in your Obsidian vault.
Use this tool when you want to:
- Create a task to remember to do something
- Add a task with a due date
- Track tasks in your vault
Args:
title: The title of the task
content: The description of the task (optional)
folder: The folder to place the task (default: "tasks")
due_date: Due date in YYYY-MM-DD format (optional)
tags: Comma-separated list of tags to add (optional)
Returns:
Path to the created task note
"""
if not obsidian_enabled:
return "Obsidian integration is not configured. Please set OBSIDIAN_VAULT_PATH environment variable."
try:
# Parse tags from comma-separated string
tag_list = [tag.strip() for tag in tags.split(",") if tag.strip()]
relative_path = obsidian_service.create_task(
title=title,
content=content,
folder=folder,
due_date=due_date or None,
tags=tag_list,
)
return f"Successfully created task: {relative_path}"
except Exception as e:
return f"Error creating task: {str(e)}"
# Create tools list based on what's available
tools = [get_current_date, simba_search, web_search]
if ynab_enabled:
tools.extend(
[
ynab_budget_summary,
ynab_search_transactions,
ynab_category_spending,
ynab_insights,
]
)
if obsidian_enabled:
tools.extend(
[
obsidian_search_notes,
obsidian_read_note,
obsidian_create_note,
obsidian_create_task,
journal_get_today,
journal_get_tasks,
journal_add_task,
journal_complete_task,
]
)
# Llama 3.1 supports native function calling via OpenAI-compatible API
main_agent = create_agent(model=model_with_fallback, tools=tools)

View File

@@ -1,7 +1,7 @@
from quart import Blueprint, jsonify from quart import Blueprint, jsonify
from quart_jwt_extended import jwt_refresh_token_required from quart_jwt_extended import jwt_refresh_token_required
from .logic import get_vector_store_stats, index_documents, vector_store from .logic import fetch_obsidian_documents, get_vector_store_stats, index_documents, index_obsidian_documents, vector_store
from blueprints.users.decorators import admin_required from blueprints.users.decorators import admin_required
rag_blueprint = Blueprint("rag_api", __name__, url_prefix="/api/rag") rag_blueprint = Blueprint("rag_api", __name__, url_prefix="/api/rag")
@@ -45,3 +45,15 @@ async def trigger_reindex():
return jsonify({"status": "success", "stats": stats}) return jsonify({"status": "success", "stats": stats})
except Exception as e: except Exception as e:
return jsonify({"status": "error", "message": str(e)}), 500 return jsonify({"status": "error", "message": str(e)}), 500
@rag_blueprint.post("/index-obsidian")
@admin_required
async def trigger_obsidian_index():
"""Index all Obsidian markdown documents into vector store. Admin only."""
try:
result = await index_obsidian_documents()
stats = get_vector_store_stats()
return jsonify({"status": "success", "result": result, "stats": stats})
except Exception as e:
return jsonify({"status": "error", "message": str(e)}), 500

View File

@@ -1,8 +1,12 @@
import os import os
import tempfile import tempfile
from dotenv import load_dotenv
import httpx import httpx
# Load environment variables
load_dotenv()
class PaperlessNGXService: class PaperlessNGXService:
def __init__(self): def __init__(self):

View File

@@ -1,12 +1,17 @@
import datetime import datetime
import os import os
from dotenv import load_dotenv
from langchain_chroma import Chroma from langchain_chroma import Chroma
from langchain_core.documents import Document from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_text_splitters import RecursiveCharacterTextSplitter
from .fetchers import PaperlessNGXService from .fetchers import PaperlessNGXService
from utils.obsidian_service import ObsidianService
# Load environment variables
load_dotenv()
embeddings = OpenAIEmbeddings(model="text-embedding-3-small") embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
@@ -54,12 +59,75 @@ async def fetch_documents_from_paperless_ngx() -> list[Document]:
async def index_documents(): async def index_documents():
"""Index Paperless-NGX documents into vector store."""
documents = await fetch_documents_from_paperless_ngx() documents = await fetch_documents_from_paperless_ngx()
splits = text_splitter.split_documents(documents) splits = text_splitter.split_documents(documents)
await vector_store.aadd_documents(documents=splits) await vector_store.aadd_documents(documents=splits)
async def fetch_obsidian_documents() -> list[Document]:
"""Fetch all markdown documents from Obsidian vault.
Returns:
List of LangChain Document objects with source='obsidian' metadata.
"""
obsidian_service = ObsidianService()
documents = []
for md_path in obsidian_service.walk_vault():
try:
# Read markdown file
with open(md_path, "r", encoding="utf-8") as f:
content = f.read()
# Parse metadata
parsed = obsidian_service.parse_markdown(content, md_path)
# Create LangChain Document with obsidian source
document = Document(
page_content=parsed["content"],
metadata={
"source": "obsidian",
"filepath": parsed["filepath"],
"tags": parsed["tags"],
"created_at": parsed["metadata"].get("created_at"),
**{k: v for k, v in parsed["metadata"].items() if k not in ["created_at", "created_by"]},
},
)
documents.append(document)
except Exception as e:
print(f"Error reading {md_path}: {e}")
continue
return documents
async def index_obsidian_documents():
"""Index all Obsidian markdown documents into vector store.
Deletes existing obsidian source chunks before re-indexing.
"""
obsidian_service = ObsidianService()
documents = await fetch_obsidian_documents()
if not documents:
print("No Obsidian documents found to index")
return {"indexed": 0}
# Delete existing obsidian chunks
existing_results = vector_store.get(where={"source": "obsidian"})
if existing_results.get("ids"):
await vector_store.adelete(existing_results["ids"])
# Split and index documents
splits = text_splitter.split_documents(documents)
await vector_store.aadd_documents(documents=splits)
return {"indexed": len(documents)}
async def query_vector_store(query: str): async def query_vector_store(query: str):
retrieved_docs = await vector_store.asimilarity_search(query, k=2) retrieved_docs = await vector_store.asimilarity_search(query, k=2)
serialized = "\n\n".join( serialized = "\n\n".join(

View File

@@ -10,6 +10,7 @@ class User(Model):
username = fields.CharField(max_length=255) username = fields.CharField(max_length=255)
password = fields.BinaryField(null=True) # Hashed - nullable for OIDC users password = fields.BinaryField(null=True) # Hashed - nullable for OIDC users
email = fields.CharField(max_length=100, unique=True) email = fields.CharField(max_length=100, unique=True)
whatsapp_number = fields.CharField(max_length=30, unique=True, null=True, index=True)
# OIDC fields # OIDC fields
oidc_subject = fields.CharField( oidc_subject = fields.CharField(

View File

@@ -0,0 +1,254 @@
import os
import logging
import asyncio
import functools
import time
from collections import defaultdict
from quart import Blueprint, request, jsonify, abort
from twilio.request_validator import RequestValidator
from twilio.twiml.messaging_response import MessagingResponse
from blueprints.users.models import User
from blueprints.conversation.logic import (
get_conversation_for_user,
add_message_to_conversation,
get_conversation_transcript,
)
from blueprints.conversation.agents import main_agent
whatsapp_blueprint = Blueprint("whatsapp_api", __name__, url_prefix="/api/whatsapp")
# Configure logging
logger = logging.getLogger(__name__)
# Rate limiting: per-number message timestamps
# Format: {phone_number: [timestamp1, timestamp2, ...]}
_rate_limit_store: dict[str, list[float]] = defaultdict(list)
# Configurable via env: max messages per window (default: 10 per 60s)
RATE_LIMIT_MAX = int(os.getenv("WHATSAPP_RATE_LIMIT_MAX", "10"))
RATE_LIMIT_WINDOW = int(os.getenv("WHATSAPP_RATE_LIMIT_WINDOW", "60"))
# Max message length to process (WhatsApp max is 4096, but we cap for LLM sanity)
MAX_MESSAGE_LENGTH = 2000
def _twiml_response(text: str) -> tuple[str, int]:
"""Helper to return a TwiML MessagingResponse."""
resp = MessagingResponse()
resp.message(text)
return str(resp), 200
def _check_rate_limit(phone_number: str) -> bool:
"""Check if a phone number has exceeded the rate limit.
Returns True if the request is allowed, False if rate-limited.
Also cleans up expired entries.
"""
now = time.monotonic()
cutoff = now - RATE_LIMIT_WINDOW
# Remove expired timestamps
timestamps = _rate_limit_store[phone_number]
_rate_limit_store[phone_number] = [t for t in timestamps if t > cutoff]
if len(_rate_limit_store[phone_number]) >= RATE_LIMIT_MAX:
return False
_rate_limit_store[phone_number].append(now)
return True
def validate_twilio_request(f):
"""Decorator to validate that the request comes from Twilio.
Validates the X-Twilio-Signature header using the TWILIO_AUTH_TOKEN.
Set TWILIO_WEBHOOK_URL if behind a reverse proxy (e.g., ngrok, Caddy)
so the validated URL matches what Twilio signed against.
Set TWILIO_SIGNATURE_VALIDATION=false to disable in development.
"""
@functools.wraps(f)
async def decorated_function(*args, **kwargs):
if os.getenv("TWILIO_SIGNATURE_VALIDATION", "true").lower() == "false":
return await f(*args, **kwargs)
auth_token = os.getenv("TWILIO_AUTH_TOKEN")
if not auth_token:
logger.error("TWILIO_AUTH_TOKEN not set — rejecting request")
abort(403)
twilio_signature = request.headers.get("X-Twilio-Signature")
if not twilio_signature:
logger.warning("Missing X-Twilio-Signature header")
abort(403)
# Use configured webhook URL if behind a proxy, otherwise use request URL
url = os.getenv("TWILIO_WEBHOOK_URL") or request.url
form_data = await request.form
validator = RequestValidator(auth_token)
if not validator.validate(url, form_data, twilio_signature):
logger.warning(f"Invalid Twilio signature for URL: {url}")
abort(403)
return await f(*args, **kwargs)
return decorated_function
@whatsapp_blueprint.route("/webhook", methods=["POST"])
@validate_twilio_request
async def webhook():
"""
Handle incoming WhatsApp messages from Twilio.
"""
form_data = await request.form
from_number = form_data.get("From") # e.g., "whatsapp:+1234567890"
body = form_data.get("Body")
if not from_number or not body:
return _twiml_response("Invalid message received.") if from_number else ("Missing From or Body", 400)
# Strip whitespace and check for empty body
body = body.strip()
if not body:
return _twiml_response("I received an empty message. Please send some text!")
# Rate limiting
if not _check_rate_limit(from_number):
logger.warning(f"Rate limit exceeded for {from_number}")
return _twiml_response("You're sending messages too quickly. Please wait a moment and try again.")
# Truncate overly long messages
if len(body) > MAX_MESSAGE_LENGTH:
body = body[:MAX_MESSAGE_LENGTH]
logger.info(f"Truncated long message from {from_number} to {MAX_MESSAGE_LENGTH} chars")
logger.info(f"Received WhatsApp message from {from_number}: {body[:100]}")
# Identify or create user
user = await User.filter(whatsapp_number=from_number).first()
if not user:
# Check if number is in allowlist
allowed_numbers = os.getenv("ALLOWED_WHATSAPP_NUMBERS", "").split(",")
if from_number not in allowed_numbers and "*" not in allowed_numbers:
return _twiml_response("Sorry, you are not authorized to use this service.")
# Create a new user for this WhatsApp number
username = f"wa_{from_number.split(':')[-1]}"
try:
user = await User.create(
username=username,
email=f"{username}@whatsapp.simbarag.local",
whatsapp_number=from_number,
auth_provider="whatsapp"
)
logger.info(f"Created new user for WhatsApp: {username}")
except Exception as e:
logger.error(f"Failed to create user for {from_number}: {e}")
return _twiml_response("Sorry, something went wrong setting up your account. Please try again later.")
# Get or create a conversation for this user
try:
conversation = await get_conversation_for_user(user=user)
await conversation.fetch_related("messages")
except Exception as e:
logger.error(f"Failed to get conversation for user {user.username}: {e}")
return _twiml_response("Sorry, something went wrong. Please try again later.")
# Add user message to conversation
await add_message_to_conversation(
conversation=conversation,
message=body,
speaker="user",
user=user,
)
# Get transcript for context
transcript = await get_conversation_transcript(user=user, conversation=conversation)
# Build messages payload for LangChain agent with system prompt and conversation history
try:
# System prompt with Simba's facts and medical information
system_prompt = """You are a helpful cat assistant named Simba that understands veterinary terms. When there are questions to you specifically, they are referring to Simba the cat. Answer the user in as if you were a cat named Simba. Don't act too catlike. Be assertive.
SIMBA FACTS (as of January 2026):
- Name: Simba
- Species: Feline (Domestic Short Hair / American Short Hair)
- Sex: Male, Neutered
- Date of Birth: August 8, 2016 (approximately 9 years 5 months old)
- Color: Orange
- Current Weight: 16 lbs (as of 1/8/2026)
- Owner: Ryan Chen
- Location: Long Island City, NY
- Veterinarian: Court Square Animal Hospital
Medical Conditions:
- Hypertrophic Cardiomyopathy (HCM): Diagnosed 12/11/2025. Concentric left ventricular hypertrophy with no left atrial dilation. Grade II-III/VI systolic heart murmur. No cardiac medications currently needed. Must avoid Domitor, acepromazine, and ketamine during anesthesia.
- Dental Issues: Prior extraction of teeth 307 and 407 due to resorption. Tooth 107 extracted on 1/8/2026. Early resorption lesions present on teeth 207, 309, and 409.
Recent Medical Events:
- 1/8/2026: Dental cleaning and tooth 107 extraction. Prescribed Onsior for 3 days. Oravet sealant applied.
- 12/11/2025: Echocardiogram confirming HCM diagnosis. Pre-op bloodwork was normal.
- 12/1/2025: Visited for decreased appetite/nausea. Received subcutaneous fluids and Cerenia.
Diet & Lifestyle:
- Diet: Hill's I/D wet and dry food
- Supplements: Plaque Off
- Indoor only cat, only pet in the household
Upcoming Appointments:
- Rabies Vaccine: Due 2/19/2026
- Routine Examination: Due 6/1/2026
- FVRCP-3yr Vaccine: Due 10/2/2026
IMPORTANT: When users ask factual questions about Simba's health, medical history, veterinary visits, medications, weight, or any information that would be in documents, you MUST use the simba_search tool to retrieve accurate information before answering. Do not rely on general knowledge - always search the documents for factual questions.
BUDGET & FINANCE (YNAB Integration):
You have access to Ryan's budget data through YNAB (You Need A Budget). When users ask about financial matters, use the appropriate YNAB tools:
- Use ynab_budget_summary for overall budget health and status questions
- Use ynab_search_transactions to find specific purchases or spending at particular stores
- Use ynab_category_spending to analyze spending by category for a month
- Use ynab_insights to provide spending trends, patterns, and recommendations
Always use these tools when asked about budgets, spending, transactions, or financial health."""
# Get last 10 messages for conversation history
messages = await conversation.messages.all()
recent_messages = list(messages)[-10:]
# Build messages payload
messages_payload = [{"role": "system", "content": system_prompt}]
# Add recent conversation history (exclude the message we just added)
for msg in recent_messages[:-1]:
role = "user" if msg.speaker == "user" else "assistant"
messages_payload.append({"role": role, "content": msg.text})
# Add current query
messages_payload.append({"role": "user", "content": body})
# Invoke LangChain agent
logger.info(f"Invoking LangChain agent with {len(messages_payload)} messages")
response = await main_agent.ainvoke({"messages": messages_payload})
response_text = response.get("messages", [])[-1].content
# Log YNAB availability
if os.getenv("YNAB_ACCESS_TOKEN"):
logger.info("YNAB integration is available for this conversation")
else:
logger.info("YNAB integration is not configured")
except Exception as e:
logger.error(f"Error invoking agent: {e}")
response_text = "Sorry, I'm having trouble thinking right now. 😿"
# Add Simba's response to conversation
await add_message_to_conversation(
conversation=conversation,
message=response_text,
speaker="simba",
user=user,
)
return _twiml_response(response_text)

View File

@@ -7,6 +7,10 @@ from typing import Dict, Any
from authlib.jose import jwt from authlib.jose import jwt
from authlib.jose.errors import JoseError from authlib.jose.errors import JoseError
import httpx import httpx
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
class OIDCConfig: class OIDCConfig:

View File

@@ -1,71 +0,0 @@
services:
postgres:
image: postgres:16-alpine
environment:
- POSTGRES_USER=raggr
- POSTGRES_PASSWORD=raggr_dev_password
- POSTGRES_DB=raggr
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U raggr"]
interval: 5s
timeout: 5s
retries: 5
# raggr service disabled - run locally for development
# raggr:
# build:
# context: .
# dockerfile: Dockerfile.dev
# image: torrtle/simbarag:dev
# ports:
# - "8080:8080"
# env_file:
# - .env
# environment:
# - PAPERLESS_TOKEN=${PAPERLESS_TOKEN}
# - BASE_URL=${BASE_URL}
# - OLLAMA_URL=${OLLAMA_URL:-http://localhost:11434}
# - CHROMADB_PATH=/app/data/chromadb
# - OPENAI_API_KEY=${OPENAI_API_KEY}
# - JWT_SECRET_KEY=${JWT_SECRET_KEY}
# - OIDC_ISSUER=${OIDC_ISSUER}
# - OIDC_CLIENT_ID=${OIDC_CLIENT_ID}
# - OIDC_CLIENT_SECRET=${OIDC_CLIENT_SECRET}
# - OIDC_REDIRECT_URI=${OIDC_REDIRECT_URI}
# - OIDC_USE_DISCOVERY=${OIDC_USE_DISCOVERY:-true}
# - DATABASE_URL=postgres://raggr:raggr_dev_password@postgres:5432/raggr
# - FLASK_ENV=development
# - PYTHONUNBUFFERED=1
# - NODE_ENV=development
# - TAVILY_KEY=${TAVILIY_KEY}
# depends_on:
# postgres:
# condition: service_healthy
# volumes:
# - chromadb_data:/app/data/chromadb
# - ./migrations:/app/migrations # Bind mount for migrations (bidirectional)
# develop:
# watch:
# # Sync+restart on any file change in root directory
# - action: sync+restart
# path: .
# target: /app
# ignore:
# - __pycache__/
# - "*.pyc"
# - "*.pyo"
# - "*.pyd"
# - .git/
# - chromadb/
# - node_modules/
# - raggr-frontend/dist/
# - docs/
# - .venv/
volumes:
chromadb_data:
postgres_data:

View File

@@ -32,18 +32,35 @@ services:
- CHROMADB_PATH=/app/data/chromadb - CHROMADB_PATH=/app/data/chromadb
- OPENAI_API_KEY=${OPENAI_API_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY}
- JWT_SECRET_KEY=${JWT_SECRET_KEY} - JWT_SECRET_KEY=${JWT_SECRET_KEY}
- LLAMA_SERVER_URL=${LLAMA_SERVER_URL}
- LLAMA_MODEL_NAME=${LLAMA_MODEL_NAME}
- OIDC_ISSUER=${OIDC_ISSUER} - OIDC_ISSUER=${OIDC_ISSUER}
- OIDC_CLIENT_ID=${OIDC_CLIENT_ID} - OIDC_CLIENT_ID=${OIDC_CLIENT_ID}
- OIDC_CLIENT_SECRET=${OIDC_CLIENT_SECRET} - OIDC_CLIENT_SECRET=${OIDC_CLIENT_SECRET}
- OIDC_REDIRECT_URI=${OIDC_REDIRECT_URI} - OIDC_REDIRECT_URI=${OIDC_REDIRECT_URI}
- OIDC_USE_DISCOVERY=${OIDC_USE_DISCOVERY:-true} - OIDC_USE_DISCOVERY=${OIDC_USE_DISCOVERY:-true}
- DATABASE_URL=${DATABASE_URL:-postgres://raggr:changeme@postgres:5432/raggr} - DATABASE_URL=${DATABASE_URL:-postgres://raggr:changeme@postgres:5432/raggr}
- TAVILY_KEY=${TAVILIY_KEY} - TAVILY_API_KEY=${TAVILIY_API_KEY}
- YNAB_ACCESS_TOKEN=${YNAB_ACCESS_TOKEN}
- YNAB_BUDGET_ID=${YNAB_BUDGET_ID}
- TWILIO_ACCOUNT_SID=${TWILIO_ACCOUNT_SID}
- TWILIO_AUTH_TOKEN=${TWILIO_AUTH_TOKEN}
- TWILIO_WHATSAPP_NUMBER=${TWILIO_WHATSAPP_NUMBER}
- ALLOWED_WHATSAPP_NUMBERS=${ALLOWED_WHATSAPP_NUMBERS}
- TWILIO_SIGNATURE_VALIDATION=${TWILIO_SIGNATURE_VALIDATION:-true}
- TWILIO_WEBHOOK_URL=${TWILIO_WEBHOOK_URL:-}
- OBSIDIAN_AUTH_TOKEN=${OBSIDIAN_AUTH_TOKEN}
- OBSIDIAN_VAULT_ID=${OBSIDIAN_VAULT_ID}
- OBSIDIAN_E2E_PASSWORD=${OBSIDIAN_E2E_PASSWORD}
- OBSIDIAN_DEVICE_NAME=${OBSIDIAN_DEVICE_NAME}
- OBSIDIAN_CONTINUOUS_SYNC=${OBSIDIAN_CONTINUOUS_SYNC:-false}
- OBSIDIAN_VAULT_PATH=${OBSIDIAN_VAULT_PATH:-/app/data/obsidian}
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy
volumes: volumes:
- chromadb_data:/app/data/chromadb - chromadb_data:/app/data/chromadb
- ./obvault:/app/data/obsidian
restart: unless-stopped restart: unless-stopped
volumes: volumes:

View File

View File

@@ -225,6 +225,10 @@ def filter_indexed_files(docs):
def reindex(): def reindex():
with sqlite3.connect("database/visited.db") as conn: with sqlite3.connect("database/visited.db") as conn:
c = conn.cursor() c = conn.cursor()
# Ensure the table exists before trying to delete from it
c.execute(
"CREATE TABLE IF NOT EXISTS indexed_documents (id INTEGER PRIMARY KEY AUTOINCREMENT, paperless_id INTEGER)"
)
c.execute("DELETE FROM indexed_documents") c.execute("DELETE FROM indexed_documents")
conn.commit() conn.commit()

View File

@@ -0,0 +1,42 @@
from tortoise import BaseDBAsyncClient
RUN_IN_TRANSACTION = True
async def upgrade(db: BaseDBAsyncClient) -> str:
return """
ALTER TABLE "users" ADD "whatsapp_number" VARCHAR(20) UNIQUE;"""
async def downgrade(db: BaseDBAsyncClient) -> str:
return """
DROP INDEX IF EXISTS "uid_users_whatsap_e6b586";
ALTER TABLE "users" DROP COLUMN "whatsapp_number";"""
MODELS_STATE = (
"eJztmm1v4jgQx78Kyquu1KtatnRX1emkQOkttwuceNinXhWZxECuiZ2NnaWo6nc/2yTESR"
"wgFCjs8aYtYw+2fx5n/p70SXOxBR1yVsPoJ/QJoDZG2nXpSUPAhewPZftpSQOeF7dyAwUD"
"RziYUk/RAgaE+sCkrHEIHAKZyYLE9G0vHAwFjsON2GQdbTSKTQGyfwTQoHgE6Rj6rOHunp"
"ltZMFHSKKP3oMxtKFjJeZtW3xsYTfo1BO2fr9xcyt68uEGhomdwEVxb29KxxjNuweBbZ1x"
"H942ggj6gEJLWgafZbjsyDSbMTNQP4DzqVqxwYJDEDgchvb7MEAmZ1ASI/Efl39oBfAw1B"
"ytjShn8fQ8W1W8ZmHV+FC1D3rn5O3VG7FKTOjIF42CiPYsHAEFM1fBNQYpfmdQ1sbAV6OM"
"+qdgsomugzEyxBzjGIpARoDWo6a54NFwIBrRMftYrlQWYPysdwRJ1kugxCyuZ1HfCpvKsz"
"aONEZo+pAv2QA0C/KGtVDbhWqYSc8UUit0PYv+2FPAbA1WGznT8BAs4NtrNOvdnt78m6/E"
"JeSHIxDpvTpvKQvrNGU9uUptxfxLSl8avQ8l/rH0vd2qp2N/3q/3XeNzAgHFBsITA1jSeY"
"2sEZjExgaetebGJj2PG/uqGxtOXtpXAn2jWAaRXF6QRsK57XAT108aPPUOH5Q5g8PIwrvF"
"PrRH6COcCoQNNg+ATFWyCEVHP/yafYUWW+NZ+GAyVyNyULDVsTVBOsueerem39Q1wXAAzI"
"cJ8C0jB6YLCQEjSLJAq6Hn7ccOdObSTM1SFnDN2Tfu51Mlj61ghctYYpSgl21yy27aAhBb"
"txWOzUdaQGeJCpYgriaGDXkjj6L4oEUxhY+KlN9jVjXKqP+hiOJFqbz+tZfI4pH0PWnqX9"
"8kMvmnduvPqLsklWuf2tWjQv4VhVRWIRMPggeVGOAXyDoK3IwUSOyu5P7KR0frd+ud6xLP"
"6P+gbqNZ1a9LxHYHQFttixO3zIvzFS6ZF+e5d0zelDpAcqIp9phXuG7ymX+gEtZMFbxeKG"
"XT9bO9pbhU0yrCpai23aaSE3cGhXSL7hL5Wo0f7aM2O3xtxvexaNFS9jkUjbaDwqUHCJlg"
"XxGZVRsBf6qGKXulYA6mdHb/2dcrvQpeletVWW4xZNVGS+98U0veqL8ct9VvvbqeogtdYD"
"tFonTusJkQXX7iNxmgF+eriZ5FqicjeyZjQAl7pBtMSQ7yZKYapsJ1LazpUN0t1fIqUMv5"
"TMsZpNi2TIMEg3+hqbiM5fNM+x0izG08Q9n1aGx4Pv5pW8UCNOO4u8SkOdgEzgsye5JrZZ"
"UgreQHaSUTpI4FPGPk48BTlEX/6rZbaqQptxTQPmKrvLNsk56WHJvQ+63hvbvfjmriK19c"
"m0mXYVJpin/BsTbzP6nNHN9e/hIbO385krljL3uzlPlXnc28Xtpnfb/b10o69G1zrCnKEW"
"HL6aKCBIj77E1FooFy3nAoCxIccyoYwp1/1XuJeLn3W/ni8t3l+7dXl+9ZFzGTueXdgodB"
"o9VbUoDgB0FZNczXepLLsfwQS2d2NIoI5ln3wwS4lesxG5FCpEjv+RJZcnkteby1Qs7G5H"
"GBbLv59PL8Hy/ZG1k="
)

View File

@@ -20,7 +20,7 @@ dependencies = [
"pony>=0.7.19", "pony>=0.7.19",
"flask-login>=0.6.3", "flask-login>=0.6.3",
"quart>=0.20.0", "quart>=0.20.0",
"tortoise-orm>=0.25.1", "tortoise-orm>=0.25.1,<1.0.0",
"quart-jwt-extended>=0.1.0", "quart-jwt-extended>=0.1.0",
"pre-commit>=4.3.0", "pre-commit>=4.3.0",
"tortoise-orm-stubs>=1.0.2", "tortoise-orm-stubs>=1.0.2",
@@ -34,6 +34,9 @@ dependencies = [
"langchain-community>=0.4.1", "langchain-community>=0.4.1",
"jq>=1.10.0", "jq>=1.10.0",
"tavily-python>=0.7.17", "tavily-python>=0.7.17",
"ynab>=1.3.0",
"ollama>=0.6.1",
"twilio>=9.10.2",
] ]
[tool.aerich] [tool.aerich]

View File

@@ -1,7 +1,170 @@
@import "tailwindcss"; @import "tailwindcss";
@import url('https://fonts.googleapis.com/css2?family=Nunito:wght@400;500;600;700&family=Playfair+Display:ital,wght@0,600;0,700;1,600&display=swap');
@theme {
--color-cream: #FBF7F0;
--color-cream-dark: #F3EDE2;
--color-warm-white: #FFFDF9;
--color-amber-glow: #E8943A;
--color-amber-soft: #F5C882;
--color-amber-pale: #FFF0D6;
--color-forest: #2D5A3D;
--color-forest-light: #3D763A;
--color-forest-pale: #E8F5E4;
--color-charcoal: #2C2420;
--color-warm-gray: #8A7E74;
--color-sand: #D4C5B0;
--color-sand-light: #E8DED0;
--color-blush: #F2D1B3;
--color-sidebar-bg: #2C2420;
--color-sidebar-hover: #3D352F;
--color-sidebar-active: #4A3F38;
--font-display: 'Playfair Display', Georgia, serif;
--font-body: 'Nunito', -apple-system, BlinkMacSystemFont, sans-serif;
}
* {
box-sizing: border-box;
}
body { body {
margin: 0; margin: 0;
font-family: Inter, Avenir, Helvetica, Arial, sans-serif; font-family: var(--font-body);
background-color: #F9F5EB; background-color: var(--color-cream);
color: var(--color-charcoal);
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
/* Scrollbar styling */
::-webkit-scrollbar {
width: 6px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: var(--color-sand);
border-radius: 3px;
}
::-webkit-scrollbar-thumb:hover {
background: var(--color-warm-gray);
}
/* Markdown content styling in answer bubbles */
.markdown-content h1,
.markdown-content h2,
.markdown-content h3 {
font-family: var(--font-display);
font-weight: 600;
margin-top: 1em;
margin-bottom: 0.5em;
line-height: 1.3;
}
.markdown-content h1 { font-size: 1.25rem; }
.markdown-content h2 { font-size: 1.1rem; }
.markdown-content h3 { font-size: 1rem; }
.markdown-content p {
margin: 0.5em 0;
line-height: 1.65;
}
.markdown-content ul,
.markdown-content ol {
padding-left: 1.5em;
margin: 0.5em 0;
}
.markdown-content li {
margin: 0.25em 0;
line-height: 1.6;
}
.markdown-content code {
background: rgba(0, 0, 0, 0.06);
padding: 0.15em 0.4em;
border-radius: 4px;
font-size: 0.88em;
font-family: 'SF Mono', 'Fira Code', monospace;
}
.markdown-content pre {
background: var(--color-charcoal);
color: #F3EDE2;
padding: 1em;
border-radius: 8px;
overflow-x: auto;
margin: 0.75em 0;
}
.markdown-content pre code {
background: none;
padding: 0;
color: inherit;
}
.markdown-content a {
color: var(--color-forest);
text-decoration: underline;
text-underline-offset: 2px;
}
.markdown-content blockquote {
border-left: 3px solid var(--color-amber-glow);
padding-left: 1em;
margin: 0.75em 0;
color: var(--color-warm-gray);
font-style: italic;
}
/* Loading skeleton animation */
@keyframes shimmer {
0% { background-position: -200% 0; }
100% { background-position: 200% 0; }
}
.skeleton-shimmer {
background: linear-gradient(
90deg,
var(--color-sand-light) 25%,
var(--color-cream) 50%,
var(--color-sand-light) 75%
);
background-size: 200% 100%;
animation: shimmer 1.8s ease-in-out infinite;
}
/* Fade-in animation for messages */
@keyframes fadeSlideUp {
from {
opacity: 0;
transform: translateY(12px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
.message-enter {
animation: fadeSlideUp 0.35s ease-out forwards;
}
/* Subtle pulse for loading dots */
@keyframes catPulse {
0%, 80%, 100% { opacity: 0.3; transform: scale(0.8); }
40% { opacity: 1; transform: scale(1); }
}
.loading-dot {
animation: catPulse 1.4s ease-in-out infinite;
}
.loading-dot:nth-child(2) { animation-delay: 0.2s; }
.loading-dot:nth-child(3) { animation-delay: 0.4s; }
/* Textarea focus glow */
textarea:focus {
outline: none;
box-shadow: 0 0 0 2px var(--color-amber-soft);
} }

View File

@@ -5,6 +5,7 @@ import { AuthProvider } from "./contexts/AuthContext";
import { ChatScreen } from "./components/ChatScreen"; import { ChatScreen } from "./components/ChatScreen";
import { LoginScreen } from "./components/LoginScreen"; import { LoginScreen } from "./components/LoginScreen";
import { conversationService } from "./api/conversationService"; import { conversationService } from "./api/conversationService";
import catIcon from "./assets/cat.png";
const AppContainer = () => { const AppContainer = () => {
const [isAuthenticated, setAuthenticated] = useState<boolean>(false); const [isAuthenticated, setAuthenticated] = useState<boolean>(false);
@@ -44,8 +45,15 @@ const AppContainer = () => {
// Show loading state while checking authentication // Show loading state while checking authentication
if (isChecking) { if (isChecking) {
return ( return (
<div className="h-screen flex items-center justify-center bg-white/85"> <div className="h-screen flex flex-col items-center justify-center bg-cream gap-4">
<div className="text-xl">Loading...</div> <img
src={catIcon}
alt="Simba"
className="w-16 h-16 animate-bounce"
/>
<p className="text-warm-gray font-medium text-lg tracking-wide">
waking up simba...
</p>
</div> </div>
); );
} }

View File

@@ -1,5 +1,13 @@
import { userService } from "./userService"; import { userService } from "./userService";
export type SSEEvent =
| { type: "tool_start"; tool: string }
| { type: "tool_end"; tool: string }
| { type: "response"; message: string }
| { type: "error"; message: string };
export type SSEEventCallback = (event: SSEEvent) => void;
interface Message { interface Message {
id: string; id: string;
text: string; text: string;
@@ -35,12 +43,14 @@ class ConversationService {
async sendQuery( async sendQuery(
query: string, query: string,
conversation_id: string, conversation_id: string,
signal?: AbortSignal,
): Promise<QueryResponse> { ): Promise<QueryResponse> {
const response = await userService.fetchWithRefreshToken( const response = await userService.fetchWithRefreshToken(
`${this.conversationBaseUrl}/query`, `${this.conversationBaseUrl}/query`,
{ {
method: "POST", method: "POST",
body: JSON.stringify({ query, conversation_id }), body: JSON.stringify({ query, conversation_id }),
signal,
}, },
); );
@@ -110,6 +120,59 @@ class ConversationService {
return await response.json(); return await response.json();
} }
async streamQuery(
query: string,
conversation_id: string,
onEvent: SSEEventCallback,
signal?: AbortSignal,
): Promise<void> {
const response = await userService.fetchWithRefreshToken(
`${this.conversationBaseUrl}/stream-query`,
{
method: "POST",
body: JSON.stringify({ query, conversation_id }),
signal,
},
);
if (!response.ok) {
throw new Error("Failed to stream query");
}
await this._readSSEStream(response, onEvent);
}
private async _readSSEStream(
response: Response,
onEvent: SSEEventCallback,
): Promise<void> {
const reader = response.body!.getReader();
const decoder = new TextDecoder();
let buffer = "";
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const parts = buffer.split("\n\n");
buffer = parts.pop() ?? "";
for (const part of parts) {
const line = part.trim();
if (!line.startsWith("data: ")) continue;
const data = line.slice(6);
if (data === "[DONE]") return;
try {
const event = JSON.parse(data) as SSEEvent;
onEvent(event);
} catch {
// ignore malformed events
}
}
}
}
} }
export const conversationService = new ConversationService(); export const conversationService = new ConversationService();

View File

@@ -2,13 +2,14 @@ import { useEffect, useState, useRef } from "react";
import { conversationService } from "../api/conversationService"; import { conversationService } from "../api/conversationService";
import { QuestionBubble } from "./QuestionBubble"; import { QuestionBubble } from "./QuestionBubble";
import { AnswerBubble } from "./AnswerBubble"; import { AnswerBubble } from "./AnswerBubble";
import { ToolBubble } from "./ToolBubble";
import { MessageInput } from "./MessageInput"; import { MessageInput } from "./MessageInput";
import { ConversationList } from "./ConversationList"; import { ConversationList } from "./ConversationList";
import catIcon from "../assets/cat.png"; import catIcon from "../assets/cat.png";
type Message = { type Message = {
text: string; text: string;
speaker: "simba" | "user"; speaker: "simba" | "user" | "tool";
}; };
type QuestionAnswer = { type QuestionAnswer = {
@@ -25,6 +26,24 @@ type ChatScreenProps = {
setAuthenticated: (isAuth: boolean) => void; setAuthenticated: (isAuth: boolean) => void;
}; };
const TOOL_MESSAGES: Record<string, string> = {
simba_search: "🔍 Searching Simba's records...",
web_search: "🌐 Searching the web...",
get_current_date: "📅 Checking today's date...",
ynab_budget_summary: "💰 Checking budget summary...",
ynab_search_transactions: "💳 Looking up transactions...",
ynab_category_spending: "📊 Analyzing category spending...",
ynab_insights: "📈 Generating budget insights...",
obsidian_search_notes: "📝 Searching notes...",
obsidian_read_note: "📖 Reading note...",
obsidian_create_note: "✏️ Saving note...",
obsidian_create_task: "✅ Creating task...",
journal_get_today: "📔 Reading today's journal...",
journal_get_tasks: "📋 Getting tasks...",
journal_add_task: " Adding task...",
journal_complete_task: "✔️ Completing task...",
};
export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => { export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => {
const [query, setQuery] = useState<string>(""); const [query, setQuery] = useState<string>("");
const [answer, setAnswer] = useState<string>(""); const [answer, setAnswer] = useState<string>("");
@@ -43,12 +62,26 @@ export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => {
const [isLoading, setIsLoading] = useState<boolean>(false); const [isLoading, setIsLoading] = useState<boolean>(false);
const messagesEndRef = useRef<HTMLDivElement>(null); const messagesEndRef = useRef<HTMLDivElement>(null);
const isMountedRef = useRef<boolean>(true);
const abortControllerRef = useRef<AbortController | null>(null);
const simbaAnswers = ["meow.", "hiss...", "purrrrrr", "yowOWROWWowowr"]; const simbaAnswers = ["meow.", "hiss...", "purrrrrr", "yowOWROWWowowr"];
const scrollToBottom = () => { const scrollToBottom = () => {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
}; };
// Cleanup effect to handle component unmounting
useEffect(() => {
isMountedRef.current = true;
return () => {
isMountedRef.current = false;
// Abort any pending requests when component unmounts
if (abortControllerRef.current) {
abortControllerRef.current.abort();
}
};
}, []);
const handleSelectConversation = (conversation: Conversation) => { const handleSelectConversation = (conversation: Conversation) => {
setShowConversations(false); setShowConversations(false);
setSelectedConversation(conversation); setSelectedConversation(conversation);
@@ -80,8 +113,6 @@ export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => {
})); }));
setConversations(parsedConversations); setConversations(parsedConversations);
setSelectedConversation(parsedConversations[0]); setSelectedConversation(parsedConversations[0]);
console.log(parsedConversations);
console.log("JELLYFISH@");
} catch (error) { } catch (error) {
console.error("Failed to load messages:", error); console.error("Failed to load messages:", error);
} }
@@ -106,8 +137,6 @@ export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => {
useEffect(() => { useEffect(() => {
const loadMessages = async () => { const loadMessages = async () => {
console.log(selectedConversation);
console.log("JELLYFISH");
if (selectedConversation == null) return; if (selectedConversation == null) return;
try { try {
const conversation = await conversationService.getConversation( const conversation = await conversationService.getConversation(
@@ -140,7 +169,6 @@ export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => {
setIsLoading(true); setIsLoading(true);
if (simbaMode) { if (simbaMode) {
console.log("simba mode activated");
const randomIndex = Math.floor(Math.random() * simbaAnswers.length); const randomIndex = Math.floor(Math.random() * simbaAnswers.length);
const randomElement = simbaAnswers[randomIndex]; const randomElement = simbaAnswers[randomIndex];
setAnswer(randomElement); setAnswer(randomElement);
@@ -156,26 +184,49 @@ export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => {
return; return;
} }
// Create a new AbortController for this request
const abortController = new AbortController();
abortControllerRef.current = abortController;
try { try {
const result = await conversationService.sendQuery( await conversationService.streamQuery(
query, query,
selectedConversation.id, selectedConversation.id,
(event) => {
if (!isMountedRef.current) return;
if (event.type === "tool_start") {
const friendly =
TOOL_MESSAGES[event.tool] ?? `🔧 Using ${event.tool}...`;
setMessages((prev) => prev.concat([{ text: friendly, speaker: "tool" }]));
} else if (event.type === "response") {
setMessages((prev) =>
prev.concat([{ text: event.message, speaker: "simba" }]),
); );
setQuestionsAnswers( } else if (event.type === "error") {
questionsAnswers.concat([{ question: query, answer: result.response }]), console.error("Stream error:", event.message);
); }
setMessages( },
currMessages.concat([{ text: result.response, speaker: "simba" }]), abortController.signal,
); );
} catch (error) { } catch (error) {
// Ignore abort errors (these are intentional cancellations)
if (error instanceof Error && error.name === "AbortError") {
console.log("Request was aborted");
} else {
console.error("Failed to send query:", error); console.error("Failed to send query:", error);
// If session expired, redirect to login // If session expired, redirect to login
if (error instanceof Error && error.message.includes("Session expired")) { if (error instanceof Error && error.message.includes("Session expired")) {
setAuthenticated(false); setAuthenticated(false);
} }
}
} finally { } finally {
// Only update loading state if component is still mounted
if (isMountedRef.current) {
setIsLoading(false); setIsLoading(false);
} }
// Clear the abort controller reference
abortControllerRef.current = null;
}
}; };
const handleQueryChange = (event: React.ChangeEvent<HTMLTextAreaElement>) => { const handleQueryChange = (event: React.ChangeEvent<HTMLTextAreaElement>) => {
@@ -190,43 +241,62 @@ export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => {
} }
}; };
const handleLogout = () => {
localStorage.removeItem("access_token");
localStorage.removeItem("refresh_token");
setAuthenticated(false);
};
return ( return (
<div className="h-screen flex flex-row bg-[#F9F5EB]"> <div className="h-screen flex flex-row bg-cream">
{/* Sidebar - Expanded */} {/* Sidebar */}
<aside <aside
className={`hidden md:flex md:flex-col bg-[#F9F5EB] border-r border-gray-200 p-4 overflow-y-auto transition-all duration-300 ${sidebarCollapsed ? "w-20" : "w-64"}`} className={`hidden md:flex md:flex-col bg-sidebar-bg transition-all duration-300 ease-in-out ${
sidebarCollapsed ? "w-[68px]" : "w-72"
}`}
> >
{!sidebarCollapsed ? ( {!sidebarCollapsed ? (
<div className="bg-[#F9F5EB]"> <div className="flex flex-col h-full">
<div className="flex flex-row items-center gap-2 mb-6"> {/* Sidebar header */}
<div className="flex items-center gap-3 px-5 py-5 border-b border-white/10">
<img <img
src={catIcon} src={catIcon}
alt="Simba" alt="Simba"
className="cursor-pointer hover:opacity-80" className="w-9 h-9 cursor-pointer hover:scale-110 transition-transform duration-200 flex-shrink-0"
onClick={() => setSidebarCollapsed(true)} onClick={() => setSidebarCollapsed(true)}
/> />
<h2 className="text-3xl bg-[#F9F5EB] font-semibold">asksimba!</h2> <h2 className="font-[family-name:var(--font-display)] text-xl font-bold text-cream tracking-tight">
asksimba
</h2>
</div> </div>
{/* Conversations */}
<div className="flex-1 overflow-y-auto px-3 py-3">
<ConversationList <ConversationList
conversations={conversations} conversations={conversations}
onCreateNewConversation={handleCreateNewConversation} onCreateNewConversation={handleCreateNewConversation}
onSelectConversation={handleSelectConversation} onSelectConversation={handleSelectConversation}
selectedId={selectedConversation?.id}
/> />
<div className="mt-auto pt-4"> </div>
{/* Logout */}
<div className="px-3 pb-4 pt-2 border-t border-white/10">
<button <button
className="w-full p-2 border border-red-400 bg-red-200 hover:bg-red-400 cursor-pointer rounded-md text-sm" className="w-full py-2.5 px-3 text-sm text-cream/60 hover:text-cream hover:bg-white/5
onClick={() => setAuthenticated(false)} rounded-lg transition-all duration-200 cursor-pointer"
onClick={handleLogout}
> >
logout Sign out
</button> </button>
</div> </div>
</div> </div>
) : ( ) : (
<div className="flex flex-col items-center gap-4"> <div className="flex flex-col items-center py-5 h-full">
<img <img
src={catIcon} src={catIcon}
alt="Simba" alt="Simba"
className="cursor-pointer hover:opacity-80" className="w-9 h-9 cursor-pointer hover:scale-110 transition-transform duration-200"
onClick={() => setSidebarCollapsed(false)} onClick={() => setSidebarCollapsed(false)}
/> />
</div> </div>
@@ -236,54 +306,79 @@ export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => {
{/* Main chat area */} {/* Main chat area */}
<div className="flex-1 flex flex-col h-screen overflow-hidden"> <div className="flex-1 flex flex-col h-screen overflow-hidden">
{/* Mobile header */} {/* Mobile header */}
<header className="md:hidden flex flex-row justify-between items-center gap-3 p-4 border-b border-gray-200 bg-white"> <header className="md:hidden flex items-center justify-between px-4 py-3 bg-warm-white border-b border-sand-light">
<div className="flex flex-row items-center gap-2"> <div className="flex items-center gap-2.5">
<img src={catIcon} alt="Simba" className="w-10 h-10" /> <img src={catIcon} alt="Simba" className="w-8 h-8" />
<h1 className="text-xl">asksimba!</h1> <h1 className="font-[family-name:var(--font-display)] text-lg font-bold text-charcoal">
asksimba
</h1>
</div> </div>
<div className="flex flex-row gap-2"> <div className="flex items-center gap-2">
<button <button
className="p-2 border border-green-400 bg-green-200 hover:bg-green-400 cursor-pointer rounded-md text-sm" className="px-3 py-1.5 text-xs font-medium rounded-lg bg-cream-dark text-charcoal
hover:bg-sand-light transition-colors cursor-pointer"
onClick={() => setShowConversations(!showConversations)} onClick={() => setShowConversations(!showConversations)}
> >
{showConversations ? "hide" : "show"} {showConversations ? "Hide" : "Threads"}
</button> </button>
<button <button
className="p-2 border border-red-400 bg-red-200 hover:bg-red-400 cursor-pointer rounded-md text-sm" className="px-3 py-1.5 text-xs font-medium rounded-lg text-warm-gray
onClick={() => setAuthenticated(false)} hover:bg-cream-dark transition-colors cursor-pointer"
onClick={handleLogout}
> >
logout Sign out
</button> </button>
</div> </div>
</header> </header>
{/* Messages area */} {/* Conversation title bar */}
{selectedConversation && ( {selectedConversation && (
<div className="sticky top-0 mx-auto w-full"> <div className="bg-warm-white/80 backdrop-blur-sm border-b border-sand-light/50 px-6 py-3">
<div className="bg-[#F9F5EB] text-black px-6 w-full py-3"> <h2 className="text-sm font-semibold text-charcoal truncate max-w-2xl mx-auto">
<h2 className="text-lg font-semibold">
{selectedConversation.title || "Untitled Conversation"} {selectedConversation.title || "Untitled Conversation"}
</h2> </h2>
</div> </div>
</div>
)} )}
<div className="flex-1 overflow-y-auto relative px-4 py-6">
{/* Floating conversation name */}
{/* Messages area */}
<div className="flex-1 overflow-y-auto px-4 py-6">
<div className="max-w-2xl mx-auto flex flex-col gap-4"> <div className="max-w-2xl mx-auto flex flex-col gap-4">
{/* Mobile conversation list */}
{showConversations && ( {showConversations && (
<div className="md:hidden"> <div className="md:hidden mb-2">
<ConversationList <ConversationList
conversations={conversations} conversations={conversations}
onCreateNewConversation={handleCreateNewConversation} onCreateNewConversation={handleCreateNewConversation}
onSelectConversation={handleSelectConversation} onSelectConversation={handleSelectConversation}
selectedId={selectedConversation?.id}
/> />
</div> </div>
)} )}
{/* Empty state */}
{messages.length === 0 && !isLoading && (
<div className="flex flex-col items-center justify-center py-20 gap-4">
<div className="relative">
<div className="absolute -inset-4 bg-amber-soft/20 rounded-full blur-2xl" />
<img
src={catIcon}
alt="Simba"
className="relative w-16 h-16 opacity-60"
/>
</div>
<div className="text-center">
<p className="text-warm-gray text-sm">
Ask Simba anything
</p>
</div>
</div>
)}
{messages.map((msg, index) => { {messages.map((msg, index) => {
if (msg.speaker === "simba") { if (msg.speaker === "tool")
return <ToolBubble key={index} text={msg.text} />;
if (msg.speaker === "simba")
return <AnswerBubble key={index} text={msg.text} />; return <AnswerBubble key={index} text={msg.text} />;
}
return <QuestionBubble key={index} text={msg.text} />; return <QuestionBubble key={index} text={msg.text} />;
})} })}
{isLoading && <AnswerBubble text="" loading={true} />} {isLoading && <AnswerBubble text="" loading={true} />}
@@ -292,8 +387,8 @@ export const ChatScreen = ({ setAuthenticated }: ChatScreenProps) => {
</div> </div>
{/* Input area */} {/* Input area */}
<footer className="p-4 bg-[#F9F5EB]"> <footer className="border-t border-sand-light/50 bg-warm-white/60 backdrop-blur-sm">
<div className="max-w-2xl mx-auto"> <div className="max-w-2xl mx-auto px-4 py-4">
<MessageInput <MessageInput
query={query} query={query}
handleQueryChange={handleQueryChange} handleQueryChange={handleQueryChange}

View File

@@ -44,12 +44,12 @@ export const ConversationList = ({
}, []); }, []);
return ( return (
<div className="bg-indigo-300 rounded-md p-3 sm:p-4 flex flex-col gap-1"> <div className="bg-stone-200 rounded-md p-3 sm:p-4 flex flex-col gap-1">
{conservations.map((conversation) => { {conservations.map((conversation) => {
return ( return (
<div <div
key={conversation.id} key={conversation.id}
className="border-blue-400 bg-indigo-300 hover:bg-indigo-200 cursor-pointer rounded-md p-3 min-h-[44px] flex items-center" className="bg-stone-200 hover:bg-stone-300 cursor-pointer rounded-md p-3 min-h-[44px] flex items-center"
onClick={() => onSelectConversation(conversation)} onClick={() => onSelectConversation(conversation)}
> >
<p className="text-sm sm:text-base truncate w-full"> <p className="text-sm sm:text-base truncate w-full">
@@ -59,7 +59,7 @@ export const ConversationList = ({
); );
})} })}
<div <div
className="border-blue-400 bg-indigo-300 hover:bg-indigo-200 cursor-pointer rounded-md p-3 min-h-[44px] flex items-center" className="bg-stone-200 hover:bg-stone-300 cursor-pointer rounded-md p-3 min-h-[44px] flex items-center"
onClick={() => onCreateNewConversation()} onClick={() => onCreateNewConversation()}
> >
<p className="text-sm sm:text-base"> + Start a new thread</p> <p className="text-sm sm:text-base"> + Start a new thread</p>

View File

@@ -1,6 +1,7 @@
import { useState, useEffect } from "react"; import { useState, useEffect } from "react";
import { userService } from "../api/userService"; import { userService } from "../api/userService";
import { oidcService } from "../api/oidcService"; import { oidcService } from "../api/oidcService";
import catIcon from "../assets/cat.png";
type LoginScreenProps = { type LoginScreenProps = {
setAuthenticated: (isAuth: boolean) => void; setAuthenticated: (isAuth: boolean) => void;
@@ -76,54 +77,77 @@ export const LoginScreen = ({ setAuthenticated }: LoginScreenProps) => {
// Show loading state while checking authentication or processing callback // Show loading state while checking authentication or processing callback
if (isChecking || isLoggingIn) { if (isChecking || isLoggingIn) {
return ( return (
<div className="h-screen bg-opacity-20"> <div className="h-screen flex flex-col items-center justify-center bg-cream gap-4">
<div className="bg-white/85 h-screen flex items-center justify-center"> <img
<div className="text-center"> src={catIcon}
<p className="text-lg sm:text-xl"> alt="Simba"
{isLoggingIn ? "Logging in..." : "Checking authentication..."} className="w-16 h-16 animate-bounce"
/>
<p className="text-warm-gray font-medium text-lg tracking-wide">
{isLoggingIn ? "letting you in..." : "checking credentials..."}
</p> </p>
</div> </div>
</div>
</div>
); );
} }
return ( return (
<div className="h-screen bg-opacity-20"> <div className="h-screen bg-cream flex items-center justify-center p-4">
<div className="bg-white/85 h-screen"> {/* Decorative background texture */}
<div className="flex flex-row justify-center py-4"> <div className="fixed inset-0 opacity-[0.03] pointer-events-none"
<div className="flex flex-col gap-4 w-full px-4 sm:w-11/12 sm:max-w-2xl lg:max-w-4xl sm:px-0"> style={{
<div className="flex flex-col gap-4"> backgroundImage: `radial-gradient(circle at 1px 1px, var(--color-charcoal) 1px, transparent 0)`,
<div className="flex flex-grow justify-center w-full bg-amber-400 p-2"> backgroundSize: '24px 24px'
<h1 className="text-base sm:text-xl font-bold text-center"> }}
I AM LOOKING FOR A DESIGNER. THIS APP WILL REMAIN UGLY UNTIL A />
DESIGNER COMES.
</h1>
</div>
<header className="flex flex-row justify-center gap-2 grow sticky top-0 z-10 bg-white">
<h1 className="text-2xl sm:text-3xl">ask simba!</h1>
</header>
<div className="relative w-full max-w-sm">
{/* Cat icon & branding */}
<div className="flex flex-col items-center mb-8">
<div className="relative mb-4">
<div className="absolute -inset-3 bg-amber-soft/40 rounded-full blur-xl" />
<img
src={catIcon}
alt="Simba"
className="relative w-20 h-20 drop-shadow-lg"
/>
</div>
<h1 className="font-[family-name:var(--font-display)] text-4xl font-bold text-charcoal tracking-tight">
asksimba
</h1>
<p className="text-warm-gray text-sm mt-1.5 tracking-wide">
your feline knowledge companion
</p>
</div>
{/* Login card */}
<div className="bg-warm-white rounded-2xl shadow-lg shadow-sand/40 border border-sand-light/60 p-8">
{error && ( {error && (
<div className="text-red-600 font-semibold text-sm sm:text-base bg-red-50 p-3 rounded-md"> <div className="mb-4 text-sm bg-red-50 text-red-700 p-3 rounded-xl border border-red-200">
{error} {error}
</div> </div>
)} )}
<div className="text-center text-sm sm:text-base text-gray-600 py-2"> <p className="text-center text-warm-gray text-sm mb-6">
Click below to login with Authelia Sign in to start chatting with Simba
</div> </p>
</div>
<button <button
className="p-3 sm:p-4 min-h-[44px] border border-blue-400 bg-blue-200 hover:bg-blue-400 cursor-pointer rounded-md flex-grow text-sm sm:text-base font-semibold" className="w-full py-3.5 px-4 bg-forest text-white font-semibold rounded-xl
hover:bg-forest-light transition-all duration-200
active:scale-[0.98] disabled:opacity-50 disabled:cursor-not-allowed
shadow-md shadow-forest/20 hover:shadow-lg hover:shadow-forest/30
cursor-pointer text-sm tracking-wide"
onClick={handleOIDCLogin} onClick={handleOIDCLogin}
disabled={isLoggingIn} disabled={isLoggingIn}
> >
{isLoggingIn ? "Redirecting..." : "Login with Authelia"} {isLoggingIn ? "Redirecting..." : "Sign in with Authelia"}
</button> </button>
</div> </div>
</div>
{/* Footer paw prints */}
<p className="text-center text-sand mt-6 text-xs tracking-widest select-none">
~ meow ~
</p>
</div> </div>
</div> </div>
); );

View File

@@ -0,0 +1,5 @@
export const ToolBubble = ({ text }: { text: string }) => (
<div className="text-sm text-gray-500 italic px-3 py-1 self-start">
{text}
</div>
);

View File

@@ -3,8 +3,17 @@
echo "Running database migrations..." echo "Running database migrations..."
aerich upgrade aerich upgrade
echo "Starting reindex process..." # Ensure Obsidian vault directory exists
python main.py "" --reindex mkdir -p /app/data/obsidian
echo "Starting Flask application..." # Start continuous Obsidian sync if enabled
if [ "${OBSIDIAN_CONTINUOUS_SYNC}" = "true" ]; then
echo "Starting Obsidian continuous sync in background..."
ob sync --continuous &
fi
echo "Starting reindex process in background..."
python main.py "" --reindex &
echo "Starting application..."
python app.py python app.py

189
tickets.md Normal file
View File

@@ -0,0 +1,189 @@
# Integration: Twilio API for WhatsApp Interface (Multi-User)
## Overview
Integrate Twilio's WhatsApp API to allow users to interact with Simba via WhatsApp. This requires multi-user support, linking WhatsApp numbers to existing or new user accounts.
## Tasks
### Phase 1: Infrastructure and Database Changes
- [x] **[TICKET-001]** Update `User` model to include `whatsapp_number`.
- [x] **[TICKET-002]** Generate and apply migrations for the database changes.
### Phase 2: Twilio Integration Blueprint
- [x] **[TICKET-003]** Create a new blueprint for Twilio/WhatsApp webhook.
- [x] **[TICKET-004]** Implement Twilio signature validation for security.
- Decorator enabled on webhook. Set `TWILIO_SIGNATURE_VALIDATION=false` to disable in dev. Set `TWILIO_WEBHOOK_URL` if behind a reverse proxy.
- [x] **[TICKET-005]** Implement User identification from WhatsApp phone number.
### Phase 3: Core Messaging Logic
- [x] **[TICKET-006]** Integrate `consult_simba_oracle` with the WhatsApp blueprint.
- [x] **[TICKET-007]** Implement outgoing WhatsApp message responses.
- [x] **[TICKET-008]** Handle conversation context in WhatsApp.
### Phase 4: Configuration and Deployment
- [x] **[TICKET-009]** Add Twilio credentials to environment variables.
- Keys: `TWILIO_ACCOUNT_SID`, `TWILIO_AUTH_TOKEN`, `TWILIO_WHATSAPP_NUMBER`.
- [ ] **[TICKET-010]** Document the Twilio webhook setup in `docs/whatsapp_integration.md`.
- Include: Webhook URL format, Twilio Console setup instructions.
### Phase 5: Multi-user & Edge Cases
- [ ] **[TICKET-011]** Handle first-time users (auto-creation of accounts or invitation system).
- [ ] **[TICKET-012]** Handle media messages (optional/future: images, audio).
- [x] **[TICKET-013]** Rate limiting and error handling for Twilio requests.
## Implementation Details
### Twilio Webhook Payload (POST)
- `SmsMessageSid`, `NumMedia`, `Body`, `From`, `To`, `AccountSid`, etc.
- We primarily care about `Body` (user message) and `From` (user WhatsApp number).
### Workflow
1. Twilio receives a message -> POST to `/api/whatsapp/webhook`.
2. Validate signature.
3. Identify `User` by `From` number.
4. If not found, create a new `User` or return an error.
5. Get/create `Conversation` for this `User`.
6. Call `consult_simba_oracle` with the query and context.
7. Return response via TwiML `<Message>` tag.
---
# Integration: Obsidian Bidirectional Data Store
## Overview
Integrate Obsidian as a bidirectional data store using the [`obsidian-headless`](https://github.com/obsidianmd/obsidian-headless) npm package. SimbaRAG will be able to read/search Obsidian notes for RAG context and write new notes, research summaries, and tasks back to the vault via the LangChain agent.
## Tasks
### Phase 1: Infrastructure
- [ ] **[OBS-001]** Upgrade Node.js from 20 to 22 in `Dockerfile` (required by obsidian-headless).
- [ ] **[OBS-002]** Install `obsidian-headless` globally via npm in `Dockerfile`.
- [ ] **[OBS-003]** Add `obsidian_vault_data` volume and Obsidian env vars to `docker-compose.yml`.
- [ ] **[OBS-004]** Document Obsidian env vars in `.env.example` (`OBSIDIAN_AUTH_TOKEN`, `OBSIDIAN_VAULT_ID`, `OBSIDIAN_E2E_PASSWORD`, `OBSIDIAN_DEVICE_NAME`, `OBSIDIAN_CONTINUOUS_SYNC`).
- [ ] **[OBS-005]** Update `startup.sh` to conditionally run `ob sync --continuous` in background when `OBSIDIAN_CONTINUOUS_SYNC=true`.
### Phase 2: Core Service
- [ ] **[OBS-006]** Create `utils/obsidian_service.py` with `ObsidianService` class.
- Vault setup via `ob sync-setup` (async subprocess)
- One-time sync via `ob sync`
- Sync status via `ob sync-status`
- Walk vault directory for `.md` files (skip `.obsidian/`)
- Parse Obsidian markdown: YAML frontmatter → metadata, wikilink conversion, embed stripping, tag extraction
- Read specific note by relative path
- Create new note with frontmatter (auto-adds `created_by: simbarag` + timestamp)
- Create task note in configurable tasks folder
### Phase 3: RAG Indexing (Read)
- [ ] **[OBS-007]** Add `fetch_obsidian_documents()` to `blueprints/rag/logic.py` — uses `ObsidianService` to parse all vault `.md` files into LangChain `Document` objects with `source=obsidian` metadata.
- [ ] **[OBS-008]** Add `index_obsidian_documents()` to `blueprints/rag/logic.py` — deletes existing `source=obsidian` chunks, splits documents with shared `text_splitter`, embeds into shared `vector_store`.
- [ ] **[OBS-009]** Add `POST /api/rag/index-obsidian` endpoint (`@admin_required`) to `blueprints/rag/__init__.py`.
### Phase 4: Agent Tools (Read + Write)
- [ ] **[OBS-010]** Add `obsidian_search_notes` tool to `blueprints/conversation/agents.py` — semantic search via ChromaDB with `where={"source": "obsidian"}` filter.
- [ ] **[OBS-011]** Add `obsidian_read_note` tool to `blueprints/conversation/agents.py` — reads a specific note by relative path.
- [ ] **[OBS-012]** Add `obsidian_create_note` tool to `blueprints/conversation/agents.py` — creates a new markdown note in the vault (title, content, folder, tags).
- [ ] **[OBS-013]** Add `obsidian_create_task` tool to `blueprints/conversation/agents.py` — creates a task note with optional due date.
- [ ] **[OBS-014]** Register Obsidian tools conditionally (follow YNAB pattern: `obsidian_enabled` flag).
- [ ] **[OBS-015]** Update system prompt in `blueprints/conversation/__init__.py` with Obsidian tool usage instructions.
### Phase 5: Testing & Verification
- [ ] **[OBS-016]** Verify Docker image builds with Node.js 22 + obsidian-headless.
- [ ] **[OBS-017]** Test vault sync: setup → sync → verify files appear in `/app/data/obsidian`.
- [ ] **[OBS-018]** Test indexing: `POST /api/rag/index-obsidian` → verify chunks in ChromaDB with `source=obsidian`.
- [ ] **[OBS-019]** Test agent read tools: chat queries trigger `obsidian_search_notes` and `obsidian_read_note`.
- [ ] **[OBS-020]** Test agent write tools: chat creates notes/tasks → files appear in vault → sync pushes to Obsidian.
## Implementation Details
### Key Files
- `utils/obsidian_service.py` — new, core service (follows `utils/ynab_service.py` pattern)
- `blueprints/conversation/agents.py` — add tools (follows YNAB tool pattern at lines 101-279)
- `blueprints/conversation/__init__.py` — update system prompt (line ~94)
- `blueprints/rag/logic.py` — add indexing functions (reuse `vector_store`, `text_splitter`)
- `blueprints/rag/__init__.py` — add index endpoint
### Write-back Model
Files written to the vault directory are automatically synced to Obsidian Sync by the `ob sync --continuous` background process. No separate push step needed.
### Environment Variables
| Variable | Required | Description |
|----------|----------|-------------|
| `OBSIDIAN_AUTH_TOKEN` | Yes | Auth token for Obsidian Sync (non-interactive) |
| `OBSIDIAN_VAULT_ID` | Yes | Remote vault ID or name |
| `OBSIDIAN_E2E_PASSWORD` | If E2EE | End-to-end encryption password |
| `OBSIDIAN_DEVICE_NAME` | No | Client identifier (default: `simbarag-server`) |
| `OBSIDIAN_CONTINUOUS_SYNC` | No | Enable background sync (default: `false`) |
---
# Integration: WhatsApp to LangChain Agent Migration
## Overview
Migrate the WhatsApp blueprint from custom LLM logic to the LangChain agent-based system used by the conversation blueprint. This will provide Tavily web search, YNAB integration, and improved message handling capabilities.
## Tasks
### Phase 1: Import and Setup Changes
- [x] **[WA-001]** Remove dependency on `main.py`'s `consult_simba_oracle` import in `blueprints/whatsapp/__init__.py`.
- [x] **[WA-002]** Import `main_agent` from `blueprints.conversation.agents` in `blueprints/whatsapp/__init__.py`.
- [ ] **[WA-003]** Add import for `query_vector_store` from `blueprints.rag.logic` (if needed for simba_search tool).
- [x] **[WA-004]** Verify `main_agent` is already initialized as a global variable in `agents.py` (it is at line 295).
### Phase 2: Agent Invocation Adaptation
- [x] **[WA-005]** Replace `consult_simba_oracle()` call (lines 171-178) with LangChain agent invocation.
- [x] **[WA-006]** Add system prompt with Simba facts, medical conditions, and recent events from `blueprints/conversation/__init__.py` (lines 55-95).
- [x] **[WA-007]** Build messages payload with role-based conversation history (last 10 messages).
- [x] **[WA-008]** Handle agent response extraction: `response.get("messages", [])[-1].content`.
- [x] **[WA-009]** Keep existing error handling around agent invocation (try/except block).
### Phase 3: Configuration and Logging
- [x] **[WA-010]** Add YNAB availability logging (check `os.getenv("YNAB_ACCESS_TOKEN")` is not None) in webhook handler.
- [x] **[WA-011]** Ensure `main_agent` tools include `simba_search`, `web_search`, and optionally YNAB tools (already configured in `agents.py`).
- [x] **[WA-012]** Verify `simba_search` tool uses `query_vector_store()` which supports `where={"source": "paperless"}` filter (no change needed, works with existing ChromaDB collection).
### Phase 4: Testing Strategy
- [ ] **[WA-013]** Test Simba queries (e.g., "How much does Simba weigh?") — should use `simba_search` tool.
- [ ] **[WA-014]** Test general chat queries (e.g., "What's the weather?") — should use LLM directly, no tools.
- [ ] **[WA-015]** Test web search capability (e.g., "What's the latest cat health research?") — should use `web_search` tool with Tavily.
- [ ] **[WA-016]** Test YNAB integration if configured (e.g., "How much did I spend on food?") — should use appropriate YNAB tool.
- [ ] **[WA-017]** Test conversation context preservation (send multiple messages in sequence).
- [ ] **[WA-018]** Test rate limiting still works after migration.
- [ ] **[WA-019]** Test user creation and allowlist still function correctly.
- [ ] **[WA-020]** Test error handling for agent failures (returns "Sorry, I'm having trouble thinking right now. 😿").
### Phase 5: Cleanup and Documentation
- [ ] **[WA-021]** Optionally remove or deprecate deprecated `main.py` functions: `classify_query()`, `consult_oracle()`, `llm_chat()`, `consult_simba_oracle()` (keep for CLI tool usage).
- [ ] **[WA-022]** Update code comments in `main.py` to indicate WhatsApp no longer uses these functions.
- [ ] **[WA-023]** Document the agent-based approach in `docs/whatsapp_integration.md` (if file exists) or create new documentation.
## Implementation Details
### Current WhatsApp Flow
1. Twilio webhook → `blueprints/whatsapp/__init__.webhook()`
2. Call `consult_simba_oracle(input, transcript)` from `main.py`
3. `consult_simba_oracle()` uses custom `QueryGenerator` to classify query
4. Routes to `consult_oracle()` (ChromaDB) or `llm_chat()` (simple chat)
5. Returns text response
### Target WhatsApp Flow
1. Twilio webhook → `blueprints/whatsapp/__init__.webhook()`
2. Build LangChain messages payload with system prompt and conversation history
3. Invoke `main_agent.ainvoke({"messages": messages_payload})`
4. Agent decides when to use tools (simba_search, web_search, YNAB)
5. Returns text response from last message
### Key Differences
1. **No manual query classification** — Agent decides based on LLM reasoning
2. **Tavily web_search** now available for current information
3. **YNAB integration** available if configured
4. **System prompt consistency** with conversation blueprint
5. **Message format** — LangChain messages array vs transcript string
### Environment Variables
No new environment variables needed. Uses existing:
- `LLAMA_SERVER_URL` — for LLM model
- `TAVILY_API_KEY` — for web search
- `YNAB_ACCESS_TOKEN` — for budget integration (optional)
### Files Modified
- `blueprints/whatsapp/__init__.py` — Main webhook handler

446
utils/obsidian_service.py Normal file
View File

@@ -0,0 +1,446 @@
"""Obsidian headless sync service for querying and modifying vaults."""
import os
import re
import yaml
from datetime import datetime
from pathlib import Path
from typing import Any, Optional
from subprocess import run
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
class ObsidianService:
"""Service for interacting with Obsidian vault via obsidian-headless CLI."""
def __init__(self):
"""Initialize Obsidian Sync client."""
self.vault_path = os.getenv("OBSIDIAN_VAULT_PATH", "/app/data/obsidian")
# Create vault path if it doesn't exist
Path(self.vault_path).mkdir(parents=True, exist_ok=True)
# Validate vault has .md files
self._validate_vault()
def _validate_vault(self) -> None:
"""Validate that vault directory exists and has .md files."""
vault_dir = Path(self.vault_path)
if not vault_dir.exists():
raise ValueError(
f"Obsidian vault path '{self.vault_path}' does not exist. "
"Please ensure the vault is synced to this location."
)
md_files = list(vault_dir.rglob("*.md"))
if not md_files:
raise ValueError(
f"Vault at '{self.vault_path}' contains no markdown files. "
"Please ensure the vault is synced with obsidian-headless."
)
def walk_vault(self) -> list[Path]:
"""Walk through vault directory and return paths to .md files.
Returns:
List of paths to markdown files, excluding .obsidian directory.
"""
vault_dir = Path(self.vault_path)
md_files = []
# Walk vault, excluding .obsidian directory
for md_file in vault_dir.rglob("*.md"):
# Skip .obsidian directory and its contents
if ".obsidian" in md_file.parts:
continue
md_files.append(md_file)
return md_files
def parse_markdown(self, content: str, filepath: Optional[Path] = None) -> dict[str, Any]:
"""Parse Obsidian markdown to extract metadata and clean content.
Args:
content: Raw markdown content
filepath: Optional file path for context
Returns:
Dictionary containing parsed content:
- metadata: Parsed YAML frontmatter (or empty dict if none)
- content: Cleaned body content
- tags: Extracted tags
- wikilinks: List of wikilinks found
- embeds: List of embeds found
"""
# Split frontmatter from content
frontmatter_pattern = r"^---\n(.*?)\n---"
match = re.match(frontmatter_pattern, content, re.DOTALL)
metadata = {}
body_content = content
if match:
frontmatter = match.group(1)
body_content = content[match.end():].strip()
try:
metadata = yaml.safe_load(frontmatter) or {}
except yaml.YAMLError:
# Invalid YAML, treat as empty metadata
metadata = {}
# Extract tags (#tag format)
tags = re.findall(r"#(\w+)", content)
tags = [tag for tag in tags if tag] # Remove empty strings
# Extract wikilinks [[wiki link]]
wikilinks = re.findall(r"\[\[([^\]]+)\]\]", content)
# Extract embeds [[!embed]] or [[!embed:file]]
embeds = re.findall(r"\[\[!(.*?)\]\]", content)
embeds = [e.split(":")[0].strip() if ":" in e else e.strip() for e in embeds]
# Clean body content
# Remove wikilinks [[...]] and embeds [[!...]]
cleaned_content = re.sub(r"\[\[.*?\]\]", "", body_content)
cleaned_content = re.sub(r"\n{3,}", "\n\n", cleaned_content).strip()
return {
"metadata": metadata,
"content": cleaned_content,
"tags": tags,
"wikilinks": wikilinks,
"embeds": embeds,
"filepath": str(filepath) if filepath else None,
}
def read_note(self, relative_path: str) -> dict[str, Any]:
"""Read a specific note from the vault.
Args:
relative_path: Path to note relative to vault root (e.g., "My Notes/simba.md")
Returns:
Dictionary containing parsed note content and metadata.
"""
vault_dir = Path(self.vault_path)
note_path = vault_dir / relative_path
if not note_path.exists():
raise FileNotFoundError(f"Note not found at '{relative_path}'")
with open(note_path, "r", encoding="utf-8") as f:
content = f.read()
parsed = self.parse_markdown(content, note_path)
return {
"content": parsed,
"path": relative_path,
"full_path": str(note_path),
}
def create_note(
self,
title: str,
content: str,
folder: str = "notes",
tags: Optional[list[str]] = None,
frontmatter: Optional[dict[str, Any]] = None,
) -> str:
"""Create a new note in the vault.
Args:
title: Note title (will be used as filename)
content: Note body content
folder: Folder path (default: "notes")
tags: List of tags to add
frontmatter: Optional custom frontmatter to merge with defaults
Returns:
Path to created note (relative to vault root).
"""
vault_dir = Path(self.vault_path)
note_folder = vault_dir / folder
note_folder.mkdir(parents=True, exist_ok=True)
# Sanitize title for filename
safe_title = re.sub(r"[^a-z0-9-_]", "-", title.lower().strip())
safe_title = re.sub(r"-+", "-", safe_title).strip("-")
note_path = note_folder / f"{safe_title}.md"
# Build frontmatter
default_frontmatter = {
"created_by": "simbarag",
"created_at": datetime.now().isoformat(),
}
if frontmatter:
default_frontmatter.update(frontmatter)
# Add tags to frontmatter if provided
if tags:
default_frontmatter.setdefault("tags", []).extend(tags)
# Write note
frontmatter_yaml = yaml.dump(default_frontmatter, allow_unicode=True, default_flow_style=False)
full_content = f"---\n{frontmatter_yaml}---\n\n{content}"
with open(note_path, "w", encoding="utf-8") as f:
f.write(full_content)
return f"{folder}/{safe_title}.md"
def create_task(
self,
title: str,
content: str = "",
folder: str = "tasks",
due_date: Optional[str] = None,
tags: Optional[list[str]] = None,
) -> str:
"""Create a task note in the vault.
Args:
title: Task title
content: Task description
folder: Folder to place task (default: "tasks")
due_date: Optional due date in YYYY-MM-DD format
tags: Optional list of tags to add
Returns:
Path to created task note (relative to vault root).
"""
task_content = f"# {title}\n\n{content}"
# Add checkboxes if content is empty (simple task)
if not content.strip():
task_content += "\n- [ ]"
# Add due date if provided
if due_date:
task_content += f"\n\n**Due**: {due_date}"
# Add tags if provided
if tags:
task_content += "\n\n" + " ".join([f"#{tag}" for tag in tags])
return self.create_note(
title=title,
content=task_content,
folder=folder,
tags=tags,
)
def get_daily_note_path(self, date: Optional[datetime] = None) -> str:
"""Return the relative vault path for a daily note.
Args:
date: Date for the note (defaults to today)
Returns:
Relative path like "journal/2026/2026-03-03.md"
"""
if date is None:
date = datetime.now()
return f"journal/{date.strftime('%Y')}/{date.strftime('%Y-%m-%d')}.md"
def get_daily_note(self, date: Optional[datetime] = None) -> dict[str, Any]:
"""Read a daily note from the vault.
Args:
date: Date for the note (defaults to today)
Returns:
Dictionary with found status, path, raw content, and date string.
"""
if date is None:
date = datetime.now()
relative_path = self.get_daily_note_path(date)
note_path = Path(self.vault_path) / relative_path
if not note_path.exists():
return {"found": False, "path": relative_path, "content": None, "date": date.strftime("%Y-%m-%d")}
with open(note_path, "r", encoding="utf-8") as f:
content = f.read()
return {"found": True, "path": relative_path, "content": content, "date": date.strftime("%Y-%m-%d")}
def get_daily_tasks(self, date: Optional[datetime] = None) -> dict[str, Any]:
"""Extract tasks from a daily note's tasks section.
Args:
date: Date for the note (defaults to today)
Returns:
Dictionary with tasks list (each has "text" and "done" keys) and metadata.
"""
if date is None:
date = datetime.now()
note = self.get_daily_note(date)
if not note["found"]:
return {"found": False, "tasks": [], "date": note["date"], "path": note["path"]}
tasks = []
in_tasks = False
for line in note["content"].split("\n"):
if re.match(r"^###\s+tasks\s*$", line, re.IGNORECASE):
in_tasks = True
continue
if in_tasks and re.match(r"^#{1,3}\s", line):
break
if in_tasks:
done_match = re.match(r"^- \[x\] (.+)$", line, re.IGNORECASE)
todo_match = re.match(r"^- \[ \] (.+)$", line)
if done_match:
tasks.append({"text": done_match.group(1), "done": True})
elif todo_match:
tasks.append({"text": todo_match.group(1), "done": False})
return {"found": True, "tasks": tasks, "date": note["date"], "path": note["path"]}
def add_task_to_daily_note(self, task_text: str, date: Optional[datetime] = None) -> dict[str, Any]:
"""Add a task checkbox to a daily note, creating the note if needed.
Args:
task_text: The task description text
date: Date for the note (defaults to today)
Returns:
Dictionary with success status, path, and whether note was created.
"""
if date is None:
date = datetime.now()
relative_path = self.get_daily_note_path(date)
note_path = Path(self.vault_path) / relative_path
if not note_path.exists():
note_path.parent.mkdir(parents=True, exist_ok=True)
content = (
f"---\nmodified: {datetime.now().isoformat()}\n---\n"
f"### tasks\n\n- [ ] {task_text}\n\n### log\n"
)
with open(note_path, "w", encoding="utf-8") as f:
f.write(content)
return {"success": True, "created_note": True, "path": relative_path}
with open(note_path, "r", encoding="utf-8") as f:
content = f.read()
# Insert before ### log if present, otherwise append before end
log_match = re.search(r"\n(### log)", content, re.IGNORECASE)
if log_match:
insert_pos = log_match.start()
content = content[:insert_pos] + f"\n- [ ] {task_text}" + content[insert_pos:]
else:
content = content.rstrip() + f"\n- [ ] {task_text}\n"
with open(note_path, "w", encoding="utf-8") as f:
f.write(content)
return {"success": True, "created_note": False, "path": relative_path}
def complete_task_in_daily_note(self, task_text: str, date: Optional[datetime] = None) -> dict[str, Any]:
"""Mark a task as complete in a daily note by matching task text.
Searches for a task matching the given text (exact or partial) and
replaces `- [ ]` with `- [x]`.
Args:
task_text: The task text to search for (exact or partial match)
date: Date for the note (defaults to today)
Returns:
Dictionary with success status, matched task text, and path.
"""
if date is None:
date = datetime.now()
relative_path = self.get_daily_note_path(date)
note_path = Path(self.vault_path) / relative_path
if not note_path.exists():
return {"success": False, "error": "Note not found", "path": relative_path}
with open(note_path, "r", encoding="utf-8") as f:
content = f.read()
# Try exact match first, then partial
exact = f"- [ ] {task_text}"
if exact in content:
content = content.replace(exact, f"- [x] {task_text}", 1)
else:
match = re.search(r"- \[ \] .*" + re.escape(task_text) + r".*", content, re.IGNORECASE)
if not match:
return {"success": False, "error": f"Task '{task_text}' not found", "path": relative_path}
completed = match.group(0).replace("- [ ]", "- [x]", 1)
content = content.replace(match.group(0), completed, 1)
task_text = match.group(0).replace("- [ ] ", "")
with open(note_path, "w", encoding="utf-8") as f:
f.write(content)
return {"success": True, "completed_task": task_text, "path": relative_path}
def sync_vault(self) -> dict[str, Any]:
"""Trigger a one-time sync of the vault.
Returns:
Dictionary containing sync result and output.
"""
try:
result = run(
["ob", "sync"],
capture_output=True,
text=True,
timeout=300,
)
if result.returncode != 0:
return {
"success": False,
"error": result.stderr or "Sync failed",
"stdout": result.stdout,
}
return {
"success": True,
"message": "Vault synced successfully",
"stdout": result.stdout,
}
except Exception as e:
return {
"success": False,
"error": str(e),
}
def sync_status(self) -> dict[str, Any]:
"""Check sync status of the vault.
Returns:
Dictionary containing sync status information.
"""
try:
result = run(
["ob", "sync-status"],
capture_output=True,
text=True,
timeout=60,
)
return {
"success": True,
"output": result.stdout,
"stderr": result.stderr,
}
except Exception as e:
return {
"success": False,
"error": str(e),
}

342
utils/ynab_service.py Normal file
View File

@@ -0,0 +1,342 @@
"""YNAB API service for querying budget data."""
import os
from datetime import datetime, timedelta
from typing import Any, Optional
from dotenv import load_dotenv
import ynab
# Load environment variables
load_dotenv()
class YNABService:
"""Service for interacting with YNAB API."""
def __init__(self):
"""Initialize YNAB API client."""
self.access_token = os.getenv("YNAB_ACCESS_TOKEN", "")
self.budget_id = os.getenv("YNAB_BUDGET_ID", "")
if not self.access_token:
raise ValueError("YNAB_ACCESS_TOKEN environment variable is required")
# Configure API client
configuration = ynab.Configuration(access_token=self.access_token)
self.api_client = ynab.ApiClient(configuration)
# Initialize API endpoints
self.budgets_api = ynab.BudgetsApi(self.api_client)
self.transactions_api = ynab.TransactionsApi(self.api_client)
self.months_api = ynab.MonthsApi(self.api_client)
self.categories_api = ynab.CategoriesApi(self.api_client)
# Get budget ID if not provided
if not self.budget_id:
budgets_response = self.budgets_api.get_budgets()
if budgets_response.data and budgets_response.data.budgets:
self.budget_id = budgets_response.data.budgets[0].id
else:
raise ValueError("No YNAB budgets found")
def get_budget_summary(self) -> dict[str, Any]:
"""Get overall budget summary and health status.
Returns:
Dictionary containing budget summary with to-be-budgeted amount,
total budgeted, total activity, and overall budget health.
"""
budget_response = self.budgets_api.get_budget_by_id(self.budget_id)
budget_data = budget_response.data.budget
# Calculate totals from categories
to_be_budgeted = (
budget_data.months[0].to_be_budgeted / 1000 if budget_data.months else 0
)
total_budgeted = 0
total_activity = 0
total_available = 0
for category_group in budget_data.category_groups or []:
if category_group.deleted or category_group.hidden:
continue
for category in category_group.categories or []:
if category.deleted or category.hidden:
continue
total_budgeted += category.budgeted / 1000
total_activity += category.activity / 1000
total_available += category.balance / 1000
return {
"budget_name": budget_data.name,
"to_be_budgeted": round(to_be_budgeted, 2),
"total_budgeted": round(total_budgeted, 2),
"total_activity": round(total_activity, 2),
"total_available": round(total_available, 2),
"currency_format": budget_data.currency_format.iso_code
if budget_data.currency_format
else "USD",
"summary": f"Budget '{budget_data.name}' has ${abs(to_be_budgeted):.2f} {'to be budgeted' if to_be_budgeted > 0 else 'overbudgeted'}. "
f"Total budgeted: ${total_budgeted:.2f}, Total spent: ${abs(total_activity):.2f}, "
f"Total available: ${total_available:.2f}.",
}
def get_transactions(
self,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
category_name: Optional[str] = None,
payee_name: Optional[str] = None,
limit: int = 50,
) -> dict[str, Any]:
"""Get transactions filtered by date range, category, or payee.
Args:
start_date: Start date in YYYY-MM-DD format (defaults to 30 days ago)
end_date: End date in YYYY-MM-DD format (defaults to today)
category_name: Filter by category name (case-insensitive partial match)
payee_name: Filter by payee name (case-insensitive partial match)
limit: Maximum number of transactions to return (default 50)
Returns:
Dictionary containing matching transactions and summary statistics.
"""
# Set default date range if not provided
if not start_date:
start_date = (datetime.now() - timedelta(days=30)).strftime("%Y-%m-%d")
if not end_date:
end_date = datetime.now().strftime("%Y-%m-%d")
# Get transactions
transactions_response = self.transactions_api.get_transactions(
self.budget_id, since_date=start_date
)
transactions = transactions_response.data.transactions or []
# Filter by date range, category, and payee
filtered_transactions = []
total_amount = 0
for txn in transactions:
# Skip if deleted or before start date or after end date
if txn.deleted:
continue
txn_date = str(txn.date)
if txn_date < start_date or txn_date > end_date:
continue
# Filter by category if specified
if category_name and txn.category_name:
if category_name.lower() not in txn.category_name.lower():
continue
# Filter by payee if specified
if payee_name and txn.payee_name:
if payee_name.lower() not in txn.payee_name.lower():
continue
amount = txn.amount / 1000 # Convert milliunits to dollars
filtered_transactions.append(
{
"date": txn_date,
"payee": txn.payee_name,
"category": txn.category_name,
"memo": txn.memo,
"amount": round(amount, 2),
"approved": txn.approved,
}
)
total_amount += amount
# Sort by date (most recent first) and limit
filtered_transactions.sort(key=lambda x: x["date"], reverse=True)
filtered_transactions = filtered_transactions[:limit]
return {
"transactions": filtered_transactions,
"count": len(filtered_transactions),
"total_amount": round(total_amount, 2),
"start_date": start_date,
"end_date": end_date,
"filters": {"category": category_name, "payee": payee_name},
}
def get_category_spending(self, month: Optional[str] = None) -> dict[str, Any]:
"""Get spending breakdown by category for a specific month.
Args:
month: Month in YYYY-MM format (defaults to current month)
Returns:
Dictionary containing spending by category and summary.
"""
if not month:
month = datetime.now().strftime("%Y-%m-01")
else:
# Ensure format is YYYY-MM-01
if len(month) == 7: # YYYY-MM
month = f"{month}-01"
# Get budget month
month_response = self.months_api.get_budget_month(self.budget_id, month)
month_data = month_response.data.month
categories_spending = []
total_budgeted = 0
total_activity = 0
total_available = 0
overspent_categories = []
for category in month_data.categories or []:
if category.deleted or category.hidden:
continue
budgeted = category.budgeted / 1000
activity = category.activity / 1000
available = category.balance / 1000
total_budgeted += budgeted
total_activity += activity
total_available += available
# Track overspent categories
if available < 0:
overspent_categories.append(
{
"name": category.name,
"budgeted": round(budgeted, 2),
"spent": round(abs(activity), 2),
"overspent_by": round(abs(available), 2),
}
)
# Only include categories with activity
if activity != 0:
categories_spending.append(
{
"category": category.name,
"budgeted": round(budgeted, 2),
"activity": round(activity, 2),
"available": round(available, 2),
"goal_type": category.goal_type
if hasattr(category, "goal_type")
else None,
}
)
# Sort by absolute activity (highest spending first)
categories_spending.sort(key=lambda x: abs(x["activity"]), reverse=True)
return {
"month": month[:7], # Return YYYY-MM format
"categories": categories_spending,
"total_budgeted": round(total_budgeted, 2),
"total_spent": round(abs(total_activity), 2),
"total_available": round(total_available, 2),
"overspent_categories": overspent_categories,
"to_be_budgeted": round(month_data.to_be_budgeted / 1000, 2)
if month_data.to_be_budgeted
else 0,
}
def get_spending_insights(self, months_back: int = 3) -> dict[str, Any]:
"""Generate insights about spending patterns and budget health.
Args:
months_back: Number of months to analyze (default 3)
Returns:
Dictionary containing spending insights, trends, and recommendations.
"""
current_month = datetime.now()
monthly_data = []
# Collect data for the last N months
for i in range(months_back):
month_date = current_month - timedelta(days=30 * i)
month_str = month_date.strftime("%Y-%m-01")
try:
month_spending = self.get_category_spending(month_str)
monthly_data.append(
{
"month": month_str[:7],
"total_spent": month_spending["total_spent"],
"total_budgeted": month_spending["total_budgeted"],
"overspent_categories": month_spending["overspent_categories"],
}
)
except Exception:
# Skip months that don't have data yet
continue
if not monthly_data:
return {"error": "No spending data available for analysis"}
# Calculate average spending
avg_spending = sum(m["total_spent"] for m in monthly_data) / len(monthly_data)
current_spending = monthly_data[0]["total_spent"] if monthly_data else 0
# Identify spending trend
if len(monthly_data) >= 2:
recent_avg = sum(m["total_spent"] for m in monthly_data[:2]) / 2
older_avg = sum(m["total_spent"] for m in monthly_data[1:]) / (
len(monthly_data) - 1
)
trend = (
"increasing"
if recent_avg > older_avg * 1.1
else "decreasing"
if recent_avg < older_avg * 0.9
else "stable"
)
else:
trend = "insufficient data"
# Find frequently overspent categories
overspent_frequency = {}
for month in monthly_data:
for cat in month["overspent_categories"]:
cat_name = cat["name"]
if cat_name not in overspent_frequency:
overspent_frequency[cat_name] = 0
overspent_frequency[cat_name] += 1
frequently_overspent = [
{"category": cat, "months_overspent": count}
for cat, count in overspent_frequency.items()
if count > 1
]
frequently_overspent.sort(key=lambda x: x["months_overspent"], reverse=True)
# Generate recommendations
recommendations = []
if current_spending > avg_spending * 1.2:
recommendations.append(
f"Current month spending (${current_spending:.2f}) is significantly higher than your {months_back}-month average (${avg_spending:.2f})"
)
if frequently_overspent:
top_overspent = frequently_overspent[0]
recommendations.append(
f"'{top_overspent['category']}' has been overspent in {top_overspent['months_overspent']} of the last {months_back} months"
)
if trend == "increasing":
recommendations.append(
"Your spending trend is increasing. Consider reviewing your budget allocations."
)
return {
"analysis_period": f"Last {months_back} months",
"average_monthly_spending": round(avg_spending, 2),
"current_month_spending": round(current_spending, 2),
"spending_trend": trend,
"frequently_overspent_categories": frequently_overspent,
"recommendations": recommendations,
"monthly_breakdown": monthly_data,
}

72
uv.lock generated
View File

@@ -103,6 +103,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b4/63/278a98c715ae467624eafe375542d8ba9b4383a016df8fdefe0ae28382a7/aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344", size = 499694, upload-time = "2026-01-03T17:32:24.546Z" }, { url = "https://files.pythonhosted.org/packages/b4/63/278a98c715ae467624eafe375542d8ba9b4383a016df8fdefe0ae28382a7/aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344", size = 499694, upload-time = "2026-01-03T17:32:24.546Z" },
] ]
[[package]]
name = "aiohttp-retry"
version = "2.9.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "aiohttp" },
]
sdist = { url = "https://files.pythonhosted.org/packages/9d/61/ebda4d8e3d8cfa1fd3db0fb428db2dd7461d5742cea35178277ad180b033/aiohttp_retry-2.9.1.tar.gz", hash = "sha256:8eb75e904ed4ee5c2ec242fefe85bf04240f685391c4879d8f541d6028ff01f1", size = 13608, upload-time = "2024-11-06T10:44:54.574Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1a/99/84ba7273339d0f3dfa57901b846489d2e5c2cd731470167757f1935fffbd/aiohttp_retry-2.9.1-py3-none-any.whl", hash = "sha256:66d2759d1921838256a05a3f80ad7e724936f083e35be5abb5e16eed6be6dc54", size = 9981, upload-time = "2024-11-06T10:44:52.917Z" },
]
[[package]] [[package]]
name = "aiosignal" name = "aiosignal"
version = "1.4.0" version = "1.4.0"
@@ -1281,19 +1293,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/83/bd/9df897cbc98290bf71140104ee5b9777cf5291afb80333aa7da5a497339b/langchain_core-1.2.5-py3-none-any.whl", hash = "sha256:3255944ef4e21b2551facb319bfc426057a40247c0a05de5bd6f2fc021fbfa34", size = 484851, upload-time = "2025-12-22T23:45:30.525Z" }, { url = "https://files.pythonhosted.org/packages/83/bd/9df897cbc98290bf71140104ee5b9777cf5291afb80333aa7da5a497339b/langchain_core-1.2.5-py3-none-any.whl", hash = "sha256:3255944ef4e21b2551facb319bfc426057a40247c0a05de5bd6f2fc021fbfa34", size = 484851, upload-time = "2025-12-22T23:45:30.525Z" },
] ]
[[package]]
name = "langchain-ollama"
version = "1.0.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "langchain-core" },
{ name = "ollama" },
]
sdist = { url = "https://files.pythonhosted.org/packages/73/51/72cd04d74278f3575f921084f34280e2f837211dc008c9671c268c578afe/langchain_ollama-1.0.1.tar.gz", hash = "sha256:e37880c2f41cdb0895e863b1cfd0c2c840a117868b3f32e44fef42569e367443", size = 153850, upload-time = "2025-12-12T21:48:28.68Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e3/46/f2907da16dc5a5a6c679f83b7de21176178afad8d2ca635a581429580ef6/langchain_ollama-1.0.1-py3-none-any.whl", hash = "sha256:37eb939a4718a0255fe31e19fbb0def044746c717b01b97d397606ebc3e9b440", size = 29207, upload-time = "2025-12-12T21:48:27.832Z" },
]
[[package]] [[package]]
name = "langchain-openai" name = "langchain-openai"
version = "1.1.6" version = "1.1.6"
@@ -1715,15 +1714,15 @@ wheels = [
[[package]] [[package]]
name = "ollama" name = "ollama"
version = "0.6.0" version = "0.6.1"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
dependencies = [ dependencies = [
{ name = "httpx" }, { name = "httpx" },
{ name = "pydantic" }, { name = "pydantic" },
] ]
sdist = { url = "https://files.pythonhosted.org/packages/d6/47/f9ee32467fe92744474a8c72e138113f3b529fc266eea76abfdec9a33f3b/ollama-0.6.0.tar.gz", hash = "sha256:da2b2d846b5944cfbcee1ca1e6ee0585f6c9d45a2fe9467cbcd096a37383da2f", size = 50811, upload-time = "2025-09-24T22:46:02.417Z" } sdist = { url = "https://files.pythonhosted.org/packages/9d/5a/652dac4b7affc2b37b95386f8ae78f22808af09d720689e3d7a86b6ed98e/ollama-0.6.1.tar.gz", hash = "sha256:478c67546836430034b415ed64fa890fd3d1ff91781a9d548b3325274e69d7c6", size = 51620, upload-time = "2025-11-13T23:02:17.416Z" }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/b5/c1/edc9f41b425ca40b26b7c104c5f6841a4537bb2552bfa6ca66e81405bb95/ollama-0.6.0-py3-none-any.whl", hash = "sha256:534511b3ccea2dff419ae06c3b58d7f217c55be7897c8ce5868dfb6b219cf7a0", size = 14130, upload-time = "2025-09-24T22:46:01.19Z" }, { url = "https://files.pythonhosted.org/packages/47/4f/4a617ee93d8208d2bcf26b2d8b9402ceaed03e3853c754940e2290fed063/ollama-0.6.1-py3-none-any.whl", hash = "sha256:fc4c984b345735c5486faeee67d8a265214a31cbb828167782dc642ce0a2bf8c", size = 14354, upload-time = "2025-11-13T23:02:16.292Z" },
] ]
[[package]] [[package]]
@@ -2534,7 +2533,6 @@ dependencies = [
{ name = "langchain" }, { name = "langchain" },
{ name = "langchain-chroma" }, { name = "langchain-chroma" },
{ name = "langchain-community" }, { name = "langchain-community" },
{ name = "langchain-ollama" },
{ name = "langchain-openai" }, { name = "langchain-openai" },
{ name = "ollama" }, { name = "ollama" },
{ name = "openai" }, { name = "openai" },
@@ -2551,6 +2549,8 @@ dependencies = [
{ name = "tomlkit" }, { name = "tomlkit" },
{ name = "tortoise-orm" }, { name = "tortoise-orm" },
{ name = "tortoise-orm-stubs" }, { name = "tortoise-orm-stubs" },
{ name = "twilio" },
{ name = "ynab" },
] ]
[package.metadata] [package.metadata]
@@ -2569,9 +2569,8 @@ requires-dist = [
{ name = "langchain", specifier = ">=1.2.0" }, { name = "langchain", specifier = ">=1.2.0" },
{ name = "langchain-chroma", specifier = ">=1.0.0" }, { name = "langchain-chroma", specifier = ">=1.0.0" },
{ name = "langchain-community", specifier = ">=0.4.1" }, { name = "langchain-community", specifier = ">=0.4.1" },
{ name = "langchain-ollama", specifier = ">=1.0.1" },
{ name = "langchain-openai", specifier = ">=1.1.6" }, { name = "langchain-openai", specifier = ">=1.1.6" },
{ name = "ollama", specifier = ">=0.6.0" }, { name = "ollama", specifier = ">=0.6.1" },
{ name = "openai", specifier = ">=2.0.1" }, { name = "openai", specifier = ">=2.0.1" },
{ name = "pillow", specifier = ">=10.0.0" }, { name = "pillow", specifier = ">=10.0.0" },
{ name = "pillow-heif", specifier = ">=1.1.1" }, { name = "pillow-heif", specifier = ">=1.1.1" },
@@ -2584,8 +2583,10 @@ requires-dist = [
{ name = "quart-jwt-extended", specifier = ">=0.1.0" }, { name = "quart-jwt-extended", specifier = ">=0.1.0" },
{ name = "tavily-python", specifier = ">=0.7.17" }, { name = "tavily-python", specifier = ">=0.7.17" },
{ name = "tomlkit", specifier = ">=0.13.3" }, { name = "tomlkit", specifier = ">=0.13.3" },
{ name = "tortoise-orm", specifier = ">=0.25.1" }, { name = "tortoise-orm", specifier = ">=0.25.1,<1.0.0" },
{ name = "tortoise-orm-stubs", specifier = ">=1.0.2" }, { name = "tortoise-orm-stubs", specifier = ">=1.0.2" },
{ name = "twilio", specifier = ">=9.10.2" },
{ name = "ynab", specifier = ">=1.3.0" },
] ]
[[package]] [[package]]
@@ -3000,6 +3001,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
] ]
[[package]]
name = "twilio"
version = "9.10.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "aiohttp" },
{ name = "aiohttp-retry" },
{ name = "pyjwt" },
{ name = "requests" },
]
sdist = { url = "https://files.pythonhosted.org/packages/1c/a1/44cd8604eb69b1c5e7c0f07f0e4305b1884a3b75e23eb8d89350fe7bb982/twilio-9.10.2.tar.gz", hash = "sha256:f17d778870a7419a7278d5747b0e80a1c89e6f5ab14acf5456a004f8f2016bfa", size = 1618748, upload-time = "2026-02-18T04:40:44.279Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/6c/ac/e1937f70544075f896bfcd6b23fa7c15cad945e4598bcfa7017b7c120ad8/twilio-9.10.2-py2.py3-none-any.whl", hash = "sha256:8722bb59bacf31fab5725d6f5d3fac2224265c669d38f653f53179165533da43", size = 2256481, upload-time = "2026-02-18T04:40:42.226Z" },
]
[[package]] [[package]]
name = "typer" name = "typer"
version = "0.19.2" version = "0.19.2"
@@ -3385,6 +3401,22 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, { url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" },
] ]
[[package]]
name = "ynab"
version = "1.9.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "pydantic" },
{ name = "python-dateutil" },
{ name = "typing-extensions" },
{ name = "urllib3" },
]
sdist = { url = "https://files.pythonhosted.org/packages/9a/3e/36599ae876db3e1d32e393ab0934547df75bab70373c14ca5805246f99bc/ynab-1.9.0.tar.gz", hash = "sha256:fa50bdff641b3a273661e9f6e8a210f5ad98991a998dc09dec0a8122d734d1c6", size = 64898, upload-time = "2025-10-06T19:14:32.707Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b2/9c/0ccd11bcdf7522fcb2823fcd7ffbb48e3164d72caaf3f920c7b068347175/ynab-1.9.0-py3-none-any.whl", hash = "sha256:72ac0219605b4280149684ecd0fec3bd75d938772d65cdeea9b3e66a1b2f470d", size = 208674, upload-time = "2025-10-06T19:14:31.719Z" },
]
[[package]] [[package]]
name = "zipp" name = "zipp"
version = "3.23.0" version = "3.23.0"