File size: 12,794 Bytes
e0b680e
9a5aebe
 
e0b680e
505874e
2185b3f
 
9a5aebe
 
2185b3f
 
 
 
e0b680e
2185b3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0b680e
2185b3f
 
 
 
 
 
 
 
 
 
 
e0b680e
9a5aebe
 
 
 
e0b680e
9a5aebe
 
2185b3f
9a5aebe
 
2185b3f
 
 
9a5aebe
 
 
 
 
 
 
 
 
2185b3f
 
 
 
 
 
 
8fbc2e2
2185b3f
 
 
 
 
 
 
 
 
 
 
 
505874e
2185b3f
 
505874e
2185b3f
 
 
505874e
2185b3f
 
e0b680e
2185b3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a5aebe
2185b3f
 
 
 
 
 
9a5aebe
 
 
2185b3f
 
 
 
 
 
 
9a5aebe
 
 
2185b3f
 
 
 
 
 
 
9a5aebe
 
 
 
 
 
 
 
 
 
 
 
 
 
2185b3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505874e
aa52203
9a5aebe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2185b3f
9a5aebe
 
 
 
 
 
2185b3f
9a5aebe
2185b3f
6b12097
2185b3f
 
 
9a5aebe
2185b3f
9a5aebe
2185b3f
 
 
 
 
 
 
 
 
 
 
9a5aebe
2185b3f
 
 
 
 
 
 
e0b680e
2185b3f
9a5aebe
 
 
 
 
 
 
 
2185b3f
 
9a5aebe
 
 
 
 
 
 
 
 
3f7a0f7
9a5aebe
 
2185b3f
9a5aebe
 
 
 
 
3f7a0f7
9a5aebe
 
 
 
2185b3f
 
 
 
8fbc2e2
2185b3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a5aebe
2185b3f
e0b680e
2185b3f
9a5aebe
 
 
 
 
 
2185b3f
 
 
 
 
 
 
 
e0b680e
cdf3c8a
9a5aebe
8fbc2e2
2185b3f
9a5aebe
 
 
 
 
 
2185b3f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
"""
Performance-Optimized Hugging Face Spaces Entry Point
Solves slow response and loading issues
"""
import os
import sys
from pathlib import Path
import asyncio
from concurrent.futures import ThreadPoolExecutor

# Add the current directory to Python path for Spaces environment
sys.path.insert(0, str(Path(__file__).parent))

from openai import OpenAI
from src.config import Config
from src.vector_store import VectorStoreManager
from src.rag_query import RAGQueryEngine
from src.question_generator import QuestionGenerator
from src.knowledge_graph import KnowledgeGraphGenerator
from src.gradio_interface import GradioInterfaceBuilder

# Import personalized learning if available
try:
    from modules.personalized_learning import UserProfilingSystem, LearningPathGenerator, AdaptiveLearningEngine
    PERSONALIZED_LEARNING_AVAILABLE = True
except ImportError:
    PERSONALIZED_LEARNING_AVAILABLE = False
    print("⚠️ Personalized learning modules not available")

# Import proactive learning if available
try:
    from modules.proactive_learning import ProactiveLearningEngine
    PROACTIVE_LEARNING_AVAILABLE = True
except ImportError:
    PROACTIVE_LEARNING_AVAILABLE = False
    print("⚠️ Proactive learning modules not available")

# Import scenario contextualization if available
try:
    from modules.scenario_contextualization.database.scenario_database import ScenarioDatabase
    from modules.scenario_contextualization.integration.feature_extractor import ADASFeatureExtractor
    from modules.scenario_contextualization.retrieval.scenario_retriever import ScenarioRetriever
    from modules.scenario_contextualization.formatting.constructive_formatter import ConstructiveFormatter
    from modules.scenario_contextualization.integration.enhanced_rag_engine import EnhancedRAGEngine
    SCENARIO_CONTEXTUALIZATION_AVAILABLE = True
except ImportError as e:
    SCENARIO_CONTEXTUALIZATION_AVAILABLE = False
    print(f"⚠️ Scenario contextualization modules not available: {e}")

# Performance configuration
ENABLE_CACHING = True  # Enable query caching
MAX_WORKERS = 4  # Thread pool size
QUERY_TIMEOUT = 30  # Query timeout in seconds

# Global thread pool for async processing
executor = ThreadPoolExecutor(max_workers=MAX_WORKERS)

# Simple in-memory cache for queries
query_cache = {}


def initialize_system(config: Config) -> dict:
    """
    Initialize the RAG system components with performance optimization
    
    Args:
        config: Configuration object
        
    Returns:
        Dictionary containing all initialized components
    """
    print("πŸ”§ Initializing core components...")
    
    # Initialize OpenAI client
    if not config.openai_api_key:
        raise ValueError(
            "OPENAI_API_KEY not found! Please set it in Hugging Face Spaces Secrets. "
            "Go to Settings > Secrets and add OPENAI_API_KEY"
        )
    
    client = OpenAI(api_key=config.openai_api_key)
    
    # Initialize vector store manager
    vector_store_manager = VectorStoreManager(client)
    
    # Get or create vector store
    vector_store_id = config.get_vector_store_id()
    
    if not vector_store_id:
        print("πŸ“¦ Creating new vector store...")
        pdf_files = config.get_pdf_files()
        
        if not pdf_files:
            raise ValueError(f"No PDF files found in {config.car_manual_dir}")
        
        vector_store_details = vector_store_manager.create_vector_store(config.vector_store_name)
        if not vector_store_details:
            raise RuntimeError("Failed to create vector store")
        
        vector_store_id = vector_store_details["id"]
        config.save_vector_store_id(vector_store_id, config.vector_store_name)
        
        # Upload files
        upload_stats = vector_store_manager.upload_pdf_files(pdf_files, vector_store_id)
        if upload_stats["successful_uploads"] == 0:
            raise RuntimeError("Failed to upload any files")
    else:
        print(f"βœ… Using existing vector store: {vector_store_id}")
    
    # Initialize RAG query engine
    print("πŸ”§ Initializing RAG engine...")
    rag_engine = RAGQueryEngine(client, vector_store_id, config.model)
    
    # Initialize question generator
    print("πŸ”§ Initializing question generator...")
    question_generator = QuestionGenerator(client, rag_engine)
    
    # Initialize knowledge graph generator
    print("πŸ”§ Initializing knowledge graph...")
    knowledge_graph = KnowledgeGraphGenerator(client, vector_store_id, str(config.output_dir))
    
    # Initialize optional modules (with reduced logging)
    user_profiling = None
    learning_path_generator = None
    adaptive_engine = None
    
    if PERSONALIZED_LEARNING_AVAILABLE:
        try:
            user_profiling = UserProfilingSystem()
            learning_path_generator = LearningPathGenerator(user_profiling, config.available_topics)
            adaptive_engine = AdaptiveLearningEngine(user_profiling, learning_path_generator)
            print("βœ… Personalized Learning System initialized!")
        except Exception as e:
            print(f"⚠️ Error initializing Personalized Learning System: {e}")
    
    proactive_engine = None
    if PROACTIVE_LEARNING_AVAILABLE and user_profiling:
        try:
            proactive_engine = ProactiveLearningEngine(
                client, rag_engine, user_profiling, adaptive_engine, config.available_topics
            )
            print("βœ… Proactive Learning Assistance initialized!")
        except Exception as e:
            print(f"⚠️ Error initializing Proactive Learning Assistance: {e}")
    
    enhanced_rag_engine = None
    if SCENARIO_CONTEXTUALIZATION_AVAILABLE:
        try:
            scenario_database = ScenarioDatabase()
            feature_extractor = ADASFeatureExtractor(use_llm=False, client=client)
            scenario_retriever = ScenarioRetriever(
                scenario_database=scenario_database,
                scenario_vector_store_id=None,
                client=client
            )
            formatter = ConstructiveFormatter()
            enhanced_rag_engine = EnhancedRAGEngine(
                base_rag_engine=rag_engine,
                scenario_retriever=scenario_retriever,
                feature_extractor=feature_extractor,
                formatter=formatter
            )
            print("βœ… Scenario Contextualization initialized!")
        except Exception as e:
            print(f"⚠️ Error initializing Scenario Contextualization: {e}")
    
    print("βœ… Core system initialized!")
    return {
        "client": client,
        "vector_store_manager": vector_store_manager,
        "rag_engine": rag_engine,
        "question_generator": question_generator,
        "knowledge_graph": knowledge_graph,
        "user_profiling": user_profiling,
        "learning_path_generator": learning_path_generator,
        "adaptive_engine": adaptive_engine,
        "proactive_engine": proactive_engine,
        "enhanced_rag_engine": enhanced_rag_engine,
        "config": config
    }


def create_optimized_query_wrapper(rag_engine):
    """
    Create an optimized query wrapper with caching, timeout, and async processing
    
    Args:
        rag_engine: The RAG query engine to wrap
        
    Returns:
        Optimized query function
    """
    def query_with_optimization(question: str, use_cache: bool = True) -> str:
        """
        Optimized query function with caching and timeout protection
        
        Args:
            question: User's question
            use_cache: Whether to use cache (default: True)
            
        Returns:
            Answer string
        """
        if not question or not question.strip():
            return "Please enter a question."
        
        # Normalize question for cache key
        cache_key = question.strip().lower()
        
        # Check cache
        if use_cache and ENABLE_CACHING and cache_key in query_cache:
            print(f"πŸ“‹ Using cached result for: {question[:50]}...")
            return query_cache[cache_key]
        
        try:
            print(f"πŸ” Processing query: {question[:50]}...")
            
            # Execute query using thread pool (non-blocking)
            future = executor.submit(rag_engine.query, question)
            
            # Wait for result with timeout
            result = future.result(timeout=QUERY_TIMEOUT)
            
            # Cache the result
            if ENABLE_CACHING:
                query_cache[cache_key] = result
                # Limit cache size
                if len(query_cache) > 100:
                    # Remove oldest entry
                    query_cache.pop(next(iter(query_cache)))
            
            print(f"βœ… Query completed successfully")
            return result
            
        except TimeoutError:
            error_msg = "⏱️ Query timeout. Please try a simpler question or try again later."
            print(error_msg)
            return error_msg
        except Exception as e:
            error_msg = f"❌ Error processing query: {str(e)}"
            print(error_msg)
            return error_msg
    
    return query_with_optimization


def create_app():
    """
    Create and return the optimized Gradio app for Hugging Face Spaces
    
    Returns:
        Gradio Blocks app
    """
    print("=" * 60)
    print("πŸš— CSRC Car Manual RAG System - Performance Optimized")
    print("=" * 60)
    
    # Load configuration
    config = Config()
    
    # Initialize system
    try:
        components = initialize_system(config)
    except Exception as e:
        print(f"❌ Error initializing system: {e}")
        import traceback
        traceback.print_exc()
        import gradio as gr
        
        error_msg = f"""
        # ❌ Initialization Error
        
        **Error:** {str(e)}
        
        Please check the logs for more details.
        """
        
        return gr.Interface(
            fn=lambda: error_msg,
            inputs=None,
            outputs=gr.Markdown(),
            title="CSRC Car Manual RAG System",
        )
    
    # Create optimized query wrapper
    optimized_query = create_optimized_query_wrapper(components["rag_engine"])
    
    # Replace original RAG engine query method with optimized version
    original_query = components["rag_engine"].query
    components["rag_engine"].query = optimized_query
    
    # Build Gradio interface
    print("\n🌐 Building Gradio interface...")
    try:
        interface_builder = GradioInterfaceBuilder(
            rag_engine=components["rag_engine"],
            question_generator=components["question_generator"],
            knowledge_graph=components["knowledge_graph"],
            config=components["config"],
            user_profiling=components["user_profiling"],
            adaptive_engine=components["adaptive_engine"],
            proactive_engine=components["proactive_engine"]
        )
        
        print("πŸ“¦ Creating interface components...")
        demo = interface_builder.create_interface()
        
        # Enable queue for better performance
        print("⚑ Enabling queue for better performance...")
        demo.queue(
            max_size=20,  # Maximum queue size
            default_concurrency_limit=5  # Concurrency limit
        )
        
        print("βœ… Gradio interface created successfully!")
        return demo
        
    except Exception as e:
        print(f"❌ Error building Gradio interface: {e}")
        import traceback
        traceback.print_exc()
        
        import gradio as gr
        error_msg = f"""
        # ❌ Interface Building Error
        
        **Error:** {str(e)}
        """
        
        return gr.Interface(
            fn=lambda: error_msg,
            inputs=None,
            outputs=gr.Markdown(),
            title="CSRC Car Manual RAG System",
        )


# Prevent multiple initializations using singleton pattern
_app_instance = None

def get_app():
    """
    Get or create the app instance (singleton pattern)
    
    Returns:
        Gradio app instance
    """
    global _app_instance
    if _app_instance is None:
        print("πŸ”„ Creating new app instance...")
        _app_instance = create_app()
        print("βœ… App instance created!")
    else:
        print("♻️  Reusing existing app instance")
    return _app_instance


# For Hugging Face Spaces auto-detection
if __name__ == "__main__":
    demo = get_app()
    demo.launch(
        server_name="0.0.0.0",
        server_port=7860,
        show_error=True,  # Show detailed errors
        favicon_path=None,  # Skip favicon loading for faster startup
    )
else:
    # Module-level variable for Spaces auto-detection
    demo = get_app()