Spaces:
Running
Running
bigwolfe
commited on
Commit
·
c449c99
1
Parent(s):
f820354
bugs
Browse files
backend/src/services/rag_index.py
CHANGED
|
@@ -2,9 +2,13 @@
|
|
| 2 |
|
| 3 |
import logging
|
| 4 |
import os
|
|
|
|
| 5 |
from pathlib import Path
|
| 6 |
from typing import Optional, List
|
| 7 |
|
|
|
|
|
|
|
|
|
|
| 8 |
from llama_index.core import (
|
| 9 |
VectorStoreIndex,
|
| 10 |
SimpleDirectoryReader,
|
|
@@ -14,6 +18,7 @@ from llama_index.core import (
|
|
| 14 |
Settings
|
| 15 |
)
|
| 16 |
|
|
|
|
| 17 |
try:
|
| 18 |
from llama_index.llms.google_genai import Gemini
|
| 19 |
from llama_index.embeddings.google_genai import GeminiEmbedding
|
|
@@ -29,15 +34,29 @@ from .config import get_config
|
|
| 29 |
from .vault import VaultService
|
| 30 |
from ..models.rag import ChatMessage, ChatResponse, SourceReference, StatusResponse
|
| 31 |
|
| 32 |
-
logger = logging.getLogger(__name__)
|
| 33 |
-
|
| 34 |
class RAGIndexService:
|
| 35 |
"""Service for managing LlamaIndex vector stores."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
def __init__(self):
|
|
|
|
|
|
|
|
|
|
| 38 |
self.vault_service = VaultService()
|
| 39 |
self.config = get_config()
|
|
|
|
| 40 |
self._setup_gemini()
|
|
|
|
| 41 |
|
| 42 |
def _setup_gemini(self):
|
| 43 |
"""Configure global LlamaIndex settings for Gemini."""
|
|
@@ -71,17 +90,18 @@ class RAGIndexService:
|
|
| 71 |
|
| 72 |
def get_or_build_index(self, user_id: str) -> VectorStoreIndex:
|
| 73 |
"""Load existing index or build a new one from vault notes."""
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
| 85 |
|
| 86 |
def build_index(self, user_id: str) -> VectorStoreIndex:
|
| 87 |
"""Build a new index from the user's vault."""
|
|
@@ -90,6 +110,15 @@ class RAGIndexService:
|
|
| 90 |
|
| 91 |
# Read notes from VaultService
|
| 92 |
notes = self.vault_service.list_notes(user_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
documents = []
|
| 94 |
|
| 95 |
for note_summary in notes:
|
|
@@ -131,10 +160,25 @@ class RAGIndexService:
|
|
| 131 |
persist_dir = self.get_persist_dir(user_id)
|
| 132 |
doc_store_path = os.path.join(persist_dir, "docstore.json")
|
| 133 |
|
| 134 |
-
|
| 135 |
-
|
| 136 |
|
| 137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
def chat(self, user_id: str, messages: List[ChatMessage]) -> ChatResponse:
|
| 140 |
"""Run RAG chat query with history."""
|
|
|
|
| 2 |
|
| 3 |
import logging
|
| 4 |
import os
|
| 5 |
+
import threading
|
| 6 |
from pathlib import Path
|
| 7 |
from typing import Optional, List
|
| 8 |
|
| 9 |
+
# Configure logger first so it can be used in try/except
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
from llama_index.core import (
|
| 13 |
VectorStoreIndex,
|
| 14 |
SimpleDirectoryReader,
|
|
|
|
| 18 |
Settings
|
| 19 |
)
|
| 20 |
|
| 21 |
+
# Try to import Gemini, handle missing dependency gracefully
|
| 22 |
try:
|
| 23 |
from llama_index.llms.google_genai import Gemini
|
| 24 |
from llama_index.embeddings.google_genai import GeminiEmbedding
|
|
|
|
| 34 |
from .vault import VaultService
|
| 35 |
from ..models.rag import ChatMessage, ChatResponse, SourceReference, StatusResponse
|
| 36 |
|
|
|
|
|
|
|
| 37 |
class RAGIndexService:
|
| 38 |
"""Service for managing LlamaIndex vector stores."""
|
| 39 |
+
|
| 40 |
+
_instance = None
|
| 41 |
+
_lock = threading.Lock()
|
| 42 |
+
|
| 43 |
+
def __new__(cls):
|
| 44 |
+
if cls._instance is None:
|
| 45 |
+
with cls._lock:
|
| 46 |
+
if cls._instance is None:
|
| 47 |
+
cls._instance = super(RAGIndexService, cls).__new__(cls)
|
| 48 |
+
cls._instance._initialized = False
|
| 49 |
+
return cls._instance
|
| 50 |
|
| 51 |
def __init__(self):
|
| 52 |
+
if getattr(self, "_initialized", False):
|
| 53 |
+
return
|
| 54 |
+
|
| 55 |
self.vault_service = VaultService()
|
| 56 |
self.config = get_config()
|
| 57 |
+
self._index_lock = threading.Lock() # Per-instance lock for index ops
|
| 58 |
self._setup_gemini()
|
| 59 |
+
self._initialized = True
|
| 60 |
|
| 61 |
def _setup_gemini(self):
|
| 62 |
"""Configure global LlamaIndex settings for Gemini."""
|
|
|
|
| 90 |
|
| 91 |
def get_or_build_index(self, user_id: str) -> VectorStoreIndex:
|
| 92 |
"""Load existing index or build a new one from vault notes."""
|
| 93 |
+
with self._index_lock:
|
| 94 |
+
persist_dir = self.get_persist_dir(user_id)
|
| 95 |
+
|
| 96 |
+
# check if index files exist (docstore.json, index_store.json etc)
|
| 97 |
+
try:
|
| 98 |
+
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
|
| 99 |
+
index = load_index_from_storage(storage_context)
|
| 100 |
+
logger.info(f"Loaded existing index for user {user_id}")
|
| 101 |
+
return index
|
| 102 |
+
except Exception:
|
| 103 |
+
logger.info(f"No valid index found for {user_id}, building new one...")
|
| 104 |
+
return self.build_index(user_id)
|
| 105 |
|
| 106 |
def build_index(self, user_id: str) -> VectorStoreIndex:
|
| 107 |
"""Build a new index from the user's vault."""
|
|
|
|
| 110 |
|
| 111 |
# Read notes from VaultService
|
| 112 |
notes = self.vault_service.list_notes(user_id)
|
| 113 |
+
if not notes:
|
| 114 |
+
# Handle empty vault (Fix #8)
|
| 115 |
+
logger.info(f"No notes found for {user_id}, creating empty index")
|
| 116 |
+
index = VectorStoreIndex.from_documents([])
|
| 117 |
+
# Persist empty index to avoid rebuilding every time?
|
| 118 |
+
# LlamaIndex might not persist empty index well.
|
| 119 |
+
# Let's just return it.
|
| 120 |
+
return index
|
| 121 |
+
|
| 122 |
documents = []
|
| 123 |
|
| 124 |
for note_summary in notes:
|
|
|
|
| 160 |
persist_dir = self.get_persist_dir(user_id)
|
| 161 |
doc_store_path = os.path.join(persist_dir, "docstore.json")
|
| 162 |
|
| 163 |
+
doc_count = 0
|
| 164 |
+
status = "building"
|
| 165 |
|
| 166 |
+
if os.path.exists(doc_store_path):
|
| 167 |
+
status = "ready"
|
| 168 |
+
try:
|
| 169 |
+
# Simple line count or file size check to avoid loading whole JSON
|
| 170 |
+
# Actually, docstore.json is a dict.
|
| 171 |
+
# Let's just load it if it's small, or stat it.
|
| 172 |
+
# For MVP, just checking existence is "ready".
|
| 173 |
+
# To get count, we can try loading keys.
|
| 174 |
+
import json
|
| 175 |
+
with open(doc_store_path, 'r') as f:
|
| 176 |
+
data = json.load(f)
|
| 177 |
+
doc_count = len(data.get("docstore/data", {}))
|
| 178 |
+
except Exception:
|
| 179 |
+
logger.warning(f"Failed to read docstore for status: {doc_store_path}")
|
| 180 |
+
|
| 181 |
+
return StatusResponse(status=status, doc_count=doc_count, last_updated=None)
|
| 182 |
|
| 183 |
def chat(self, user_id: str, messages: List[ChatMessage]) -> ChatResponse:
|
| 184 |
"""Run RAG chat query with history."""
|
backend/tests/unit/test_rag_service.py
CHANGED
|
@@ -82,10 +82,10 @@ def test_get_status(mock_exists, rag_service):
|
|
| 82 |
def test_chat(mock_storage, mock_load, rag_service):
|
| 83 |
user_id = "test-user"
|
| 84 |
|
| 85 |
-
# Mock Index and
|
| 86 |
mock_index = MagicMock()
|
| 87 |
-
|
| 88 |
-
mock_index.
|
| 89 |
mock_load.return_value = mock_index
|
| 90 |
|
| 91 |
# Mock Response
|
|
@@ -99,7 +99,7 @@ def test_chat(mock_storage, mock_load, rag_service):
|
|
| 99 |
mock_node.score = 0.9
|
| 100 |
mock_response.source_nodes = [mock_node]
|
| 101 |
|
| 102 |
-
|
| 103 |
|
| 104 |
from backend.src.models.rag import ChatMessage
|
| 105 |
messages = [ChatMessage(role="user", content="Question")]
|
|
@@ -109,4 +109,4 @@ def test_chat(mock_storage, mock_load, rag_service):
|
|
| 109 |
assert response.answer == "AI Answer"
|
| 110 |
assert len(response.sources) == 1
|
| 111 |
assert response.sources[0].path == "note.md"
|
| 112 |
-
|
|
|
|
| 82 |
def test_chat(mock_storage, mock_load, rag_service):
|
| 83 |
user_id = "test-user"
|
| 84 |
|
| 85 |
+
# Mock Index and ChatEngine
|
| 86 |
mock_index = MagicMock()
|
| 87 |
+
mock_chat_engine = MagicMock()
|
| 88 |
+
mock_index.as_chat_engine.return_value = mock_chat_engine
|
| 89 |
mock_load.return_value = mock_index
|
| 90 |
|
| 91 |
# Mock Response
|
|
|
|
| 99 |
mock_node.score = 0.9
|
| 100 |
mock_response.source_nodes = [mock_node]
|
| 101 |
|
| 102 |
+
mock_chat_engine.chat.return_value = mock_response
|
| 103 |
|
| 104 |
from backend.src.models.rag import ChatMessage
|
| 105 |
messages = [ChatMessage(role="user", content="Question")]
|
|
|
|
| 109 |
assert response.answer == "AI Answer"
|
| 110 |
assert len(response.sources) == 1
|
| 111 |
assert response.sources[0].path == "note.md"
|
| 112 |
+
mock_chat_engine.chat.assert_called()
|
frontend/src/components/ChatPanel.tsx
CHANGED
|
@@ -34,14 +34,16 @@ export function ChatPanel({ onNavigateToNote }: ChatPanelProps) {
|
|
| 34 |
timestamp: new Date().toISOString()
|
| 35 |
};
|
| 36 |
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
setInput('');
|
| 39 |
setIsLoading(true);
|
| 40 |
|
| 41 |
try {
|
| 42 |
-
|
| 43 |
-
const history = [...messages, userMsg];
|
| 44 |
-
const response = await sendChat({ messages: history });
|
| 45 |
|
| 46 |
const assistantMsg: ChatMessageType = {
|
| 47 |
role: 'assistant',
|
|
|
|
| 34 |
timestamp: new Date().toISOString()
|
| 35 |
};
|
| 36 |
|
| 37 |
+
// Construct new history immediately
|
| 38 |
+
const newHistory = [...messages, userMsg];
|
| 39 |
+
|
| 40 |
+
// Optimistically update UI
|
| 41 |
+
setMessages(newHistory);
|
| 42 |
setInput('');
|
| 43 |
setIsLoading(true);
|
| 44 |
|
| 45 |
try {
|
| 46 |
+
const response = await sendChat({ messages: newHistory });
|
|
|
|
|
|
|
| 47 |
|
| 48 |
const assistantMsg: ChatMessageType = {
|
| 49 |
role: 'assistant',
|