Spaces:
Sleeping
Sleeping
Upload 34 files
Browse files- .gitattributes +4 -0
- car_manual/Function of Active Distance Assist DISTRONIC.pdf +3 -0
- car_manual/Function of Active Lane Change Assist.pdf +3 -0
- car_manual/Function of Active Steering Assist.pdf +3 -0
- car_manual/Function of Active Stop-and-Go Assist.pdf +3 -0
- config/vector_store_config.json +6 -0
- modules/__pycache__/cold_start_onboarding.cpython-312.pyc +0 -0
- modules/__pycache__/personalized_learning.cpython-312.pyc +0 -0
- modules/__pycache__/proactive_learning.cpython-312.pyc +0 -0
- modules/cold_start_onboarding.py +186 -0
- modules/integrate_personalized_learning.py +326 -0
- modules/personalized_learning.py +794 -0
- modules/proactive_learning.py +522 -0
- modules/scenario_contextualization/__init__.py +5 -0
- modules/scenario_contextualization/__pycache__/__init__.cpython-312.pyc +0 -0
- modules/scenario_contextualization/database/__init__.py +4 -0
- modules/scenario_contextualization/database/__pycache__/__init__.cpython-312.pyc +0 -0
- modules/scenario_contextualization/database/__pycache__/scenario_database.cpython-312.pyc +0 -0
- modules/scenario_contextualization/database/__pycache__/scenario_models.cpython-312.pyc +0 -0
- modules/scenario_contextualization/database/scenario_database.py +105 -0
- modules/scenario_contextualization/database/scenario_models.py +153 -0
- modules/scenario_contextualization/formatting/__init__.py +4 -0
- modules/scenario_contextualization/formatting/__pycache__/__init__.cpython-312.pyc +0 -0
- modules/scenario_contextualization/formatting/__pycache__/constructive_formatter.cpython-312.pyc +0 -0
- modules/scenario_contextualization/formatting/constructive_formatter.py +152 -0
- modules/scenario_contextualization/integration/__init__.py +4 -0
- modules/scenario_contextualization/integration/__pycache__/__init__.cpython-312.pyc +0 -0
- modules/scenario_contextualization/integration/__pycache__/enhanced_rag_engine.cpython-312.pyc +0 -0
- modules/scenario_contextualization/integration/__pycache__/feature_extractor.cpython-312.pyc +0 -0
- modules/scenario_contextualization/integration/enhanced_rag_engine.py +94 -0
- modules/scenario_contextualization/integration/feature_extractor.py +95 -0
- modules/scenario_contextualization/retrieval/__init__.py +4 -0
- modules/scenario_contextualization/retrieval/__pycache__/__init__.cpython-312.pyc +0 -0
- modules/scenario_contextualization/retrieval/__pycache__/scenario_retriever.cpython-312.pyc +0 -0
- modules/scenario_contextualization/retrieval/scenario_retriever.py +154 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
car_manual/Function[[:space:]]of[[:space:]]Active[[:space:]]Distance[[:space:]]Assist[[:space:]]DISTRONIC.pdf filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
car_manual/Function[[:space:]]of[[:space:]]Active[[:space:]]Lane[[:space:]]Change[[:space:]]Assist.pdf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
car_manual/Function[[:space:]]of[[:space:]]Active[[:space:]]Steering[[:space:]]Assist.pdf filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
car_manual/Function[[:space:]]of[[:space:]]Active[[:space:]]Stop-and-Go[[:space:]]Assist.pdf filter=lfs diff=lfs merge=lfs -text
|
car_manual/Function of Active Distance Assist DISTRONIC.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a8c42ee32602cf2f5a5a19a494d3f9c1f80073b009c5cda9b45cf4bb69317577
|
| 3 |
+
size 1959700
|
car_manual/Function of Active Lane Change Assist.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f8edf5b9e2c91a3ff9ae6bc810558cd97b27f4db96bd76fed2df0f49f6ad76f3
|
| 3 |
+
size 2540001
|
car_manual/Function of Active Steering Assist.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3350c956d673f631d1d10b11c8a0a13e241d40730cc7ebfd21496e45373f6a4d
|
| 3 |
+
size 1594043
|
car_manual/Function of Active Stop-and-Go Assist.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f20350e91c7f645416fd4fe72e47a73c2b7bc7eaa06195bd8b2be7440adbb8c
|
| 3 |
+
size 1068285
|
config/vector_store_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "vs_69022a9edd74819199bf9a34a83e877b",
|
| 3 |
+
"name": "mercedes_manual_store_local",
|
| 4 |
+
"created_at": 1761749663,
|
| 5 |
+
"file_count": 0
|
| 6 |
+
}
|
modules/__pycache__/cold_start_onboarding.cpython-312.pyc
ADDED
|
Binary file (8.08 kB). View file
|
|
|
modules/__pycache__/personalized_learning.cpython-312.pyc
ADDED
|
Binary file (37.2 kB). View file
|
|
|
modules/__pycache__/proactive_learning.cpython-312.pyc
ADDED
|
Binary file (23.4 kB). View file
|
|
|
modules/cold_start_onboarding.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Cold start onboarding module
|
| 3 |
+
Used to collect initial information from new users
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from typing import Dict, List
|
| 8 |
+
try:
|
| 9 |
+
from modules.personalized_learning import UserProfilingSystem
|
| 10 |
+
except ImportError:
|
| 11 |
+
# Fallback for direct import
|
| 12 |
+
from personalized_learning import UserProfilingSystem
|
| 13 |
+
|
| 14 |
+
def create_onboarding_interface(user_profiling: UserProfilingSystem, available_topics: List[str]):
|
| 15 |
+
"""Create cold start onboarding interface"""
|
| 16 |
+
|
| 17 |
+
def process_onboarding(user_id: str, background: str, learning_style: str,
|
| 18 |
+
learning_pace: str, learning_goals: List[str],
|
| 19 |
+
knowledge_survey: Dict[str, float]) -> Dict:
|
| 20 |
+
"""Process cold start data collection"""
|
| 21 |
+
|
| 22 |
+
# Build onboarding data
|
| 23 |
+
onboarding_data = {
|
| 24 |
+
'learning_style': learning_style,
|
| 25 |
+
'learning_pace': learning_pace,
|
| 26 |
+
'background_experience': background,
|
| 27 |
+
'learning_goals': learning_goals if learning_goals else [],
|
| 28 |
+
'initial_knowledge_survey': knowledge_survey,
|
| 29 |
+
'initial_assessment_completed': True
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
# Complete cold start setup
|
| 33 |
+
profile = user_profiling.complete_onboarding(user_id, onboarding_data)
|
| 34 |
+
|
| 35 |
+
return {
|
| 36 |
+
"status": "success",
|
| 37 |
+
"message": f"Onboarding completed for {user_id}",
|
| 38 |
+
"profile_summary": user_profiling.get_profile_summary(user_id)
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
def create_onboarding_form():
|
| 42 |
+
"""Create cold start form"""
|
| 43 |
+
with gr.Blocks(title="Welcome! Let's Get Started") as onboarding:
|
| 44 |
+
gr.Markdown("# 🎯 Welcome to Personalized Learning!")
|
| 45 |
+
gr.Markdown("We need some information to create your personalized learning path.")
|
| 46 |
+
|
| 47 |
+
with gr.Row():
|
| 48 |
+
user_id_input = gr.Textbox(
|
| 49 |
+
label="User ID",
|
| 50 |
+
placeholder="Enter your user ID",
|
| 51 |
+
value="new_user"
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
with gr.Accordion("📋 Step 1: Background Information", open=True):
|
| 55 |
+
background_input = gr.Radio(
|
| 56 |
+
label="What's your experience with ADAS systems?",
|
| 57 |
+
choices=[
|
| 58 |
+
("Beginner - I'm new to ADAS systems", "beginner"),
|
| 59 |
+
("Intermediate - I know some basics", "intermediate"),
|
| 60 |
+
("Experienced - I have good knowledge", "experienced")
|
| 61 |
+
],
|
| 62 |
+
value="beginner"
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
with gr.Accordion("🎨 Step 2: Learning Preferences", open=True):
|
| 66 |
+
learning_style_input = gr.Radio(
|
| 67 |
+
label="How do you prefer to learn?",
|
| 68 |
+
choices=[
|
| 69 |
+
("Visual - I like diagrams and illustrations", "visual"),
|
| 70 |
+
("Textual - I prefer reading and explanations", "textual"),
|
| 71 |
+
("Practical - I learn by doing", "practical"),
|
| 72 |
+
("Mixed - I like a combination", "mixed")
|
| 73 |
+
],
|
| 74 |
+
value="mixed"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
learning_pace_input = gr.Radio(
|
| 78 |
+
label="What's your preferred learning pace?",
|
| 79 |
+
choices=[
|
| 80 |
+
("Slow - I like to take my time", "slow"),
|
| 81 |
+
("Medium - Normal pace is fine", "medium"),
|
| 82 |
+
("Fast - I want to learn quickly", "fast")
|
| 83 |
+
],
|
| 84 |
+
value="medium"
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
with gr.Accordion("🎯 Step 3: Learning Goals", open=True):
|
| 88 |
+
learning_goals_input = gr.CheckboxGroup(
|
| 89 |
+
label="What are your learning goals? (Select all that apply)",
|
| 90 |
+
choices=[
|
| 91 |
+
"Understand basic ADAS functions",
|
| 92 |
+
"Learn how to operate ADAS features",
|
| 93 |
+
"Master advanced ADAS capabilities",
|
| 94 |
+
"Troubleshoot ADAS issues",
|
| 95 |
+
"Prepare for certification",
|
| 96 |
+
"General knowledge improvement"
|
| 97 |
+
],
|
| 98 |
+
value=["Understand basic ADAS functions"]
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
with gr.Accordion("📊 Step 4: Initial Knowledge Assessment", open=True):
|
| 102 |
+
gr.Markdown("Rate your familiarity with each topic (0 = No knowledge, 1 = Expert)")
|
| 103 |
+
|
| 104 |
+
knowledge_sliders = {}
|
| 105 |
+
for topic in available_topics:
|
| 106 |
+
# Simplify topic name for display
|
| 107 |
+
display_name = topic.replace("Function of ", "").replace(" Assist", "")
|
| 108 |
+
knowledge_sliders[topic] = gr.Slider(
|
| 109 |
+
label=display_name,
|
| 110 |
+
minimum=0.0,
|
| 111 |
+
maximum=1.0,
|
| 112 |
+
value=0.0,
|
| 113 |
+
step=0.1
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
with gr.Row():
|
| 117 |
+
submit_btn = gr.Button("Complete Setup", variant="primary")
|
| 118 |
+
|
| 119 |
+
output_result = gr.JSON(label="Setup Result")
|
| 120 |
+
|
| 121 |
+
def submit_onboarding(user_id: str, background: str, learning_style: str,
|
| 122 |
+
learning_pace: str, learning_goals: List[str],
|
| 123 |
+
**knowledge_values):
|
| 124 |
+
"""Submit cold start data"""
|
| 125 |
+
# Build knowledge survey dictionary
|
| 126 |
+
knowledge_survey = {}
|
| 127 |
+
for topic in available_topics:
|
| 128 |
+
knowledge_survey[topic] = knowledge_values.get(topic, 0.0)
|
| 129 |
+
|
| 130 |
+
# Process background selection (extract value from tuple)
|
| 131 |
+
if isinstance(background, tuple):
|
| 132 |
+
background = background[1] if len(background) > 1 else background[0]
|
| 133 |
+
|
| 134 |
+
if isinstance(learning_style, tuple):
|
| 135 |
+
learning_style = learning_style[1] if len(learning_style) > 1 else learning_style[0]
|
| 136 |
+
|
| 137 |
+
if isinstance(learning_pace, tuple):
|
| 138 |
+
learning_pace = learning_pace[1] if len(learning_pace) > 1 else learning_pace[0]
|
| 139 |
+
|
| 140 |
+
result = process_onboarding(
|
| 141 |
+
user_id, background, learning_style, learning_pace,
|
| 142 |
+
learning_goals, knowledge_survey
|
| 143 |
+
)
|
| 144 |
+
return result
|
| 145 |
+
|
| 146 |
+
# Build input list
|
| 147 |
+
inputs = [user_id_input, background_input, learning_style_input,
|
| 148 |
+
learning_pace_input, learning_goals_input] + list(knowledge_sliders.values())
|
| 149 |
+
|
| 150 |
+
submit_btn.click(
|
| 151 |
+
submit_onboarding,
|
| 152 |
+
inputs=inputs,
|
| 153 |
+
outputs=output_result
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
return onboarding
|
| 157 |
+
|
| 158 |
+
return create_onboarding_form()
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def check_and_show_onboarding(user_profiling: UserProfilingSystem, user_id: str) -> bool:
|
| 162 |
+
"""Check if cold start interface needs to be shown"""
|
| 163 |
+
return user_profiling.is_cold_start(user_id)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def get_onboarding_data_summary(user_profiling: UserProfilingSystem, user_id: str) -> Dict:
|
| 167 |
+
"""Get summary of data collected during cold start"""
|
| 168 |
+
if user_profiling.is_cold_start(user_id):
|
| 169 |
+
return {
|
| 170 |
+
"status": "cold_start",
|
| 171 |
+
"message": "User has not completed onboarding"
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
profile = user_profiling.get_or_create_profile(user_id)
|
| 175 |
+
|
| 176 |
+
return {
|
| 177 |
+
"status": "completed",
|
| 178 |
+
"has_completed_onboarding": profile.has_completed_onboarding,
|
| 179 |
+
"background_experience": profile.background_experience,
|
| 180 |
+
"learning_style": profile.learning_style,
|
| 181 |
+
"learning_pace": profile.learning_pace,
|
| 182 |
+
"learning_goals": profile.learning_goals if profile.learning_goals else [],
|
| 183 |
+
"initial_knowledge_survey": profile.initial_knowledge_survey if profile.initial_knowledge_survey else {},
|
| 184 |
+
"initial_assessment_completed": profile.initial_assessment_completed
|
| 185 |
+
}
|
| 186 |
+
|
modules/integrate_personalized_learning.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Integrate personalized learning pathway functionality into Gradio interface
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import json
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from personalized_learning import (
|
| 9 |
+
UserProfilingSystem,
|
| 10 |
+
LearningPathGenerator,
|
| 11 |
+
AdaptiveLearningEngine
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
# Initialize system components
|
| 15 |
+
def initialize_personalized_learning(available_topics: list, client):
|
| 16 |
+
"""Initialize personalized learning system"""
|
| 17 |
+
user_profiling = UserProfilingSystem()
|
| 18 |
+
learning_path_generator = LearningPathGenerator(user_profiling, available_topics)
|
| 19 |
+
adaptive_engine = AdaptiveLearningEngine(user_profiling, learning_path_generator)
|
| 20 |
+
|
| 21 |
+
return user_profiling, learning_path_generator, adaptive_engine
|
| 22 |
+
|
| 23 |
+
# Create personalized learning path tab
|
| 24 |
+
def create_personalized_learning_tab(adaptive_engine, user_profiling, query_rag_model,
|
| 25 |
+
generate_multiple_choice_questions, client):
|
| 26 |
+
"""Create personalized learning path tab"""
|
| 27 |
+
|
| 28 |
+
with gr.TabItem("Personalized Learning Path"):
|
| 29 |
+
gr.Markdown("## 🎯 Your Personalized Learning Journey")
|
| 30 |
+
gr.Markdown("Get a customized learning path based on your knowledge profile and performance.")
|
| 31 |
+
|
| 32 |
+
# User ID input
|
| 33 |
+
with gr.Row():
|
| 34 |
+
user_id_input = gr.Textbox(
|
| 35 |
+
label="User ID",
|
| 36 |
+
placeholder="Enter your user ID (e.g., user_001)",
|
| 37 |
+
value="default_user"
|
| 38 |
+
)
|
| 39 |
+
load_profile_btn = gr.Button("Load My Profile")
|
| 40 |
+
|
| 41 |
+
# User profile display
|
| 42 |
+
with gr.Column(visible=False) as profile_container:
|
| 43 |
+
profile_summary = gr.Markdown()
|
| 44 |
+
|
| 45 |
+
with gr.Row():
|
| 46 |
+
with gr.Column():
|
| 47 |
+
gr.Markdown("### 📊 Knowledge Profile")
|
| 48 |
+
knowledge_level_display = gr.JSON()
|
| 49 |
+
|
| 50 |
+
with gr.Column():
|
| 51 |
+
gr.Markdown("### 📈 Learning Statistics")
|
| 52 |
+
learning_stats = gr.JSON()
|
| 53 |
+
|
| 54 |
+
# Learning path section
|
| 55 |
+
with gr.Row():
|
| 56 |
+
focus_areas_input = gr.CheckboxGroup(
|
| 57 |
+
label="Focus Areas (Optional)",
|
| 58 |
+
choices=[],
|
| 59 |
+
value=[],
|
| 60 |
+
interactive=True
|
| 61 |
+
)
|
| 62 |
+
generate_path_btn = gr.Button("Generate Learning Path", variant="primary")
|
| 63 |
+
|
| 64 |
+
# Learning path visualization
|
| 65 |
+
with gr.Column(visible=False) as path_container:
|
| 66 |
+
gr.Markdown("### 🗺️ Your Learning Path")
|
| 67 |
+
|
| 68 |
+
path_progress = gr.HTML()
|
| 69 |
+
path_visualization = gr.HTML()
|
| 70 |
+
|
| 71 |
+
# Current node information
|
| 72 |
+
with gr.Row():
|
| 73 |
+
with gr.Column():
|
| 74 |
+
current_node_info = gr.Markdown()
|
| 75 |
+
with gr.Column():
|
| 76 |
+
next_action_btn = gr.Button("Start This Node", variant="primary")
|
| 77 |
+
skip_node_btn = gr.Button("Skip This Node")
|
| 78 |
+
|
| 79 |
+
# Recommendations section
|
| 80 |
+
with gr.Row():
|
| 81 |
+
with gr.Column():
|
| 82 |
+
gr.Markdown("### 💡 Recommendations")
|
| 83 |
+
recommendations_display = gr.JSON()
|
| 84 |
+
|
| 85 |
+
# Learning activity history
|
| 86 |
+
with gr.Column(visible=False) as history_container:
|
| 87 |
+
gr.Markdown("### 📚 Learning History")
|
| 88 |
+
learning_history = gr.Dataframe(
|
| 89 |
+
headers=["Date", "Topic", "Activity", "Score"],
|
| 90 |
+
interactive=False
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
# Handler functions
|
| 94 |
+
def load_user_profile(user_id):
|
| 95 |
+
"""Load user profile"""
|
| 96 |
+
if not user_id:
|
| 97 |
+
return (
|
| 98 |
+
gr.update(visible=False), # profile_container
|
| 99 |
+
"", # profile_summary
|
| 100 |
+
{}, # knowledge_level_display
|
| 101 |
+
{}, # learning_stats
|
| 102 |
+
[], # focus_areas_input choices
|
| 103 |
+
gr.update(visible=False) # path_container
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
profile = user_profiling.get_or_create_profile(user_id)
|
| 107 |
+
summary = user_profiling.get_profile_summary(user_id)
|
| 108 |
+
|
| 109 |
+
# Generate summary text
|
| 110 |
+
summary_text = f"""
|
| 111 |
+
### 👤 User Profile: {user_id}
|
| 112 |
+
|
| 113 |
+
**Learning Style:** {summary['learning_style'].title()}
|
| 114 |
+
**Learning Pace:** {summary['learning_pace'].title()}
|
| 115 |
+
**Overall Progress:** {summary['overall_progress']:.1%}
|
| 116 |
+
**Total Questions Asked:** {summary['total_questions']}
|
| 117 |
+
**Total Tests Completed:** {summary['total_tests']}
|
| 118 |
+
|
| 119 |
+
**Strong Areas:** {', '.join(summary['strong_areas']) if summary['strong_areas'] else 'None yet'}
|
| 120 |
+
**Areas Needing Improvement:** {', '.join(summary['weak_areas']) if summary['weak_areas'] else 'None yet'}
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
# Prepare knowledge level data
|
| 124 |
+
knowledge_data = summary['knowledge_level']
|
| 125 |
+
if not knowledge_data:
|
| 126 |
+
knowledge_data = {"No topics learned yet": 0.0}
|
| 127 |
+
|
| 128 |
+
# Prepare statistics data
|
| 129 |
+
stats_data = {
|
| 130 |
+
"Total Questions": summary['total_questions'],
|
| 131 |
+
"Total Tests": summary['total_tests'],
|
| 132 |
+
"Preferred Topics": summary['preferred_topics'][:5] if summary['preferred_topics'] else [],
|
| 133 |
+
"Overall Progress": f"{summary['overall_progress']:.1%}"
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
# Update focus areas options
|
| 137 |
+
all_topics = list(set(list(knowledge_data.keys()) +
|
| 138 |
+
summary['preferred_topics'] +
|
| 139 |
+
summary['weak_areas']))
|
| 140 |
+
|
| 141 |
+
return (
|
| 142 |
+
gr.update(visible=True),
|
| 143 |
+
summary_text,
|
| 144 |
+
knowledge_data,
|
| 145 |
+
stats_data,
|
| 146 |
+
all_topics,
|
| 147 |
+
gr.update(visible=False)
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
def generate_learning_path(user_id, focus_areas):
|
| 151 |
+
"""Generate learning path"""
|
| 152 |
+
if not user_id:
|
| 153 |
+
return (
|
| 154 |
+
gr.update(visible=False),
|
| 155 |
+
"",
|
| 156 |
+
"",
|
| 157 |
+
"",
|
| 158 |
+
{},
|
| 159 |
+
gr.update(visible=False)
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
path = adaptive_engine.create_or_update_path(user_id, focus_areas if focus_areas else None)
|
| 163 |
+
|
| 164 |
+
# Generate path visualization HTML
|
| 165 |
+
vis_data = adaptive_engine.get_path_visualization_data(user_id)
|
| 166 |
+
|
| 167 |
+
# Create progress bar
|
| 168 |
+
progress_html = f"""
|
| 169 |
+
<div style="width:100%; background-color:#f0f0f0; border-radius:5px; overflow:hidden; margin:20px 0;">
|
| 170 |
+
<div style="width:{path.completion_percentage*100}%; background-color:#4CAF50; height:30px; border-radius:5px; display:flex; align-items:center; justify-content:center; color:white; font-weight:bold;">
|
| 171 |
+
{path.completion_percentage*100:.1f}% Complete
|
| 172 |
+
</div>
|
| 173 |
+
</div>
|
| 174 |
+
<p><strong>Total Nodes:</strong> {len(path.nodes)} | <strong>Completed:</strong> {sum(1 for n in path.nodes if n.status == 'completed')} | <strong>Estimated Time:</strong> {path.estimated_total_time} minutes</p>
|
| 175 |
+
"""
|
| 176 |
+
|
| 177 |
+
# Create path visualization
|
| 178 |
+
path_html = "<div style='margin:20px 0;'>"
|
| 179 |
+
path_html += "<h4>Learning Path Structure:</h4>"
|
| 180 |
+
path_html += "<div style='display:flex; flex-direction:column; gap:10px;'>"
|
| 181 |
+
|
| 182 |
+
for i, node in enumerate(path.nodes):
|
| 183 |
+
status_color = {
|
| 184 |
+
"completed": "#4CAF50",
|
| 185 |
+
"in_progress": "#2196F3",
|
| 186 |
+
"pending": "#9E9E9E",
|
| 187 |
+
"skipped": "#FF9800"
|
| 188 |
+
}.get(node.status, "#9E9E9E")
|
| 189 |
+
|
| 190 |
+
is_current = i == path.current_node_index
|
| 191 |
+
highlight = "border: 3px solid #FF5722; padding: 10px;" if is_current else "padding: 10px;"
|
| 192 |
+
|
| 193 |
+
path_html += f"""
|
| 194 |
+
<div style='{highlight} background-color:white; border-left: 5px solid {status_color}; border-radius:5px; margin:5px 0;'>
|
| 195 |
+
<div style='display:flex; justify-content:space-between; align-items:center;'>
|
| 196 |
+
<div>
|
| 197 |
+
<strong>{node.topic}</strong> - {node.bloom_level.title()} ({node.content_type})
|
| 198 |
+
<br>
|
| 199 |
+
<small>Difficulty: {node.difficulty:.2f} | Time: {node.estimated_time} min</small>
|
| 200 |
+
</div>
|
| 201 |
+
<div style='color:{status_color}; font-weight:bold;'>
|
| 202 |
+
{node.status.title()}
|
| 203 |
+
</div>
|
| 204 |
+
</div>
|
| 205 |
+
</div>
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
path_html += "</div></div>"
|
| 209 |
+
|
| 210 |
+
# Current node information
|
| 211 |
+
if path.current_node_index < len(path.nodes):
|
| 212 |
+
current_node = path.nodes[path.current_node_index]
|
| 213 |
+
current_node_info = f"""
|
| 214 |
+
### Current Learning Node
|
| 215 |
+
|
| 216 |
+
**Topic:** {current_node.topic}
|
| 217 |
+
**Bloom Level:** {current_node.bloom_level.title()}
|
| 218 |
+
**Content Type:** {current_node.content_type.title()}
|
| 219 |
+
**Difficulty:** {current_node.difficulty:.2f}
|
| 220 |
+
**Estimated Time:** {current_node.estimated_time} minutes
|
| 221 |
+
|
| 222 |
+
**Status:** {current_node.status.title()}
|
| 223 |
+
"""
|
| 224 |
+
else:
|
| 225 |
+
current_node_info = "### Learning Path Complete! 🎉"
|
| 226 |
+
|
| 227 |
+
# Get recommendations
|
| 228 |
+
recommendations = adaptive_engine.get_recommendations(user_id)
|
| 229 |
+
|
| 230 |
+
return (
|
| 231 |
+
gr.update(visible=True),
|
| 232 |
+
progress_html,
|
| 233 |
+
path_html,
|
| 234 |
+
current_node_info,
|
| 235 |
+
recommendations,
|
| 236 |
+
gr.update(visible=True)
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
def start_current_node(user_id):
|
| 240 |
+
"""Start current node"""
|
| 241 |
+
path = adaptive_engine.get_active_path(user_id)
|
| 242 |
+
if not path or path.current_node_index >= len(path.nodes):
|
| 243 |
+
return "No active node to start."
|
| 244 |
+
|
| 245 |
+
current_node = path.nodes[path.current_node_index]
|
| 246 |
+
return f"Starting learning node: {current_node.topic} - {current_node.bloom_level}"
|
| 247 |
+
|
| 248 |
+
# Bind events
|
| 249 |
+
load_profile_btn.click(
|
| 250 |
+
load_user_profile,
|
| 251 |
+
inputs=[user_id_input],
|
| 252 |
+
outputs=[profile_container, profile_summary, knowledge_level_display,
|
| 253 |
+
learning_stats, focus_areas_input, path_container]
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
generate_path_btn.click(
|
| 257 |
+
generate_learning_path,
|
| 258 |
+
inputs=[user_id_input, focus_areas_input],
|
| 259 |
+
outputs=[path_container, path_progress, path_visualization,
|
| 260 |
+
current_node_info, recommendations_display, history_container]
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
next_action_btn.click(
|
| 264 |
+
start_current_node,
|
| 265 |
+
inputs=[user_id_input],
|
| 266 |
+
outputs=[]
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
# Auto-load default user
|
| 270 |
+
user_id_input.change(
|
| 271 |
+
load_user_profile,
|
| 272 |
+
inputs=[user_id_input],
|
| 273 |
+
outputs=[profile_container, profile_summary, knowledge_level_display,
|
| 274 |
+
learning_stats, focus_areas_input, path_container]
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
return {
|
| 278 |
+
"adaptive_engine": adaptive_engine,
|
| 279 |
+
"user_profiling": user_profiling
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
# Integrate with existing testing functionality
|
| 283 |
+
def integrate_with_testing(adaptive_engine, user_profiling, test_results, user_id):
|
| 284 |
+
"""Integrate test results into personalized learning system"""
|
| 285 |
+
if not user_id or not test_results:
|
| 286 |
+
return
|
| 287 |
+
|
| 288 |
+
# Extract topic from test results (assuming test results contain topic information)
|
| 289 |
+
topic = test_results[0].get('topic', 'unknown') if test_results else 'unknown'
|
| 290 |
+
|
| 291 |
+
# Update user profile
|
| 292 |
+
profile = user_profiling.update_from_test_results(user_id, topic, test_results)
|
| 293 |
+
|
| 294 |
+
# Update learning path
|
| 295 |
+
path = adaptive_engine.get_active_path(user_id)
|
| 296 |
+
if path:
|
| 297 |
+
# Calculate average score
|
| 298 |
+
scores = [1.0 if r.get('is_correct', False) else 0.0 for r in test_results]
|
| 299 |
+
avg_score = sum(scores) / len(scores) if scores else 0.5
|
| 300 |
+
|
| 301 |
+
# Find corresponding node and mark as completed
|
| 302 |
+
for node in path.nodes:
|
| 303 |
+
if node.topic == topic and node.status == "in_progress":
|
| 304 |
+
adaptive_engine.complete_node(user_id, node.node_id, avg_score)
|
| 305 |
+
break
|
| 306 |
+
|
| 307 |
+
# Integrate with Q&A functionality
|
| 308 |
+
def integrate_with_qa(user_profiling, user_id, question):
|
| 309 |
+
"""Integrate Q&A history into personalized learning system"""
|
| 310 |
+
if not user_id or not question:
|
| 311 |
+
return
|
| 312 |
+
|
| 313 |
+
# Simple topic extraction (can be improved based on actual needs)
|
| 314 |
+
topic = None
|
| 315 |
+
if "distronic" in question.lower() or "distance" in question.lower():
|
| 316 |
+
topic = "DISTRONIC"
|
| 317 |
+
elif "lane" in question.lower():
|
| 318 |
+
topic = "Lane Change Assist"
|
| 319 |
+
elif "steering" in question.lower():
|
| 320 |
+
topic = "Steering Assist"
|
| 321 |
+
elif "stop" in question.lower() or "go" in question.lower():
|
| 322 |
+
topic = "Stop-and-Go Assist"
|
| 323 |
+
|
| 324 |
+
# Update user profile
|
| 325 |
+
user_profiling.update_from_question(user_id, question, topic)
|
| 326 |
+
|
modules/personalized_learning.py
ADDED
|
@@ -0,0 +1,794 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Personalized Learning Pathway System
|
| 3 |
+
Implements adaptive learning capabilities that customize instruction based on comprehensive user profiling
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import os
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
from datetime import datetime, timedelta
|
| 11 |
+
from typing import Dict, List, Optional, Tuple
|
| 12 |
+
from dataclasses import dataclass, asdict
|
| 13 |
+
from collections import defaultdict
|
| 14 |
+
import pickle
|
| 15 |
+
|
| 16 |
+
@dataclass
|
| 17 |
+
class UserProfile:
|
| 18 |
+
"""User profile data structure"""
|
| 19 |
+
user_id: str
|
| 20 |
+
knowledge_level: Dict[str, float] # Knowledge level for each topic (0-1)
|
| 21 |
+
learning_style: str # "visual", "textual", "practical", "mixed"
|
| 22 |
+
learning_pace: str # "slow", "medium", "fast"
|
| 23 |
+
preferred_topics: List[str]
|
| 24 |
+
weak_areas: List[str]
|
| 25 |
+
strong_areas: List[str]
|
| 26 |
+
test_scores: Dict[str, List[float]] # Historical test scores
|
| 27 |
+
question_history: List[Dict] # Question and answer history
|
| 28 |
+
learning_time: Dict[str, float] # Learning time for each topic (minutes)
|
| 29 |
+
last_activity: str
|
| 30 |
+
total_questions_asked: int
|
| 31 |
+
total_tests_completed: int
|
| 32 |
+
bloom_level_performance: Dict[str, Dict[str, float]] # Bloom level performance for each topic
|
| 33 |
+
created_at: str
|
| 34 |
+
updated_at: str
|
| 35 |
+
# Cold start related fields
|
| 36 |
+
has_completed_onboarding: bool = False # Whether onboarding is completed
|
| 37 |
+
background_experience: str = "" # Background experience (e.g., "experienced", "beginner", "intermediate")
|
| 38 |
+
learning_goals: List[str] = None # Learning goals, None requires special handling
|
| 39 |
+
initial_assessment_completed: bool = False # Whether initial assessment is completed
|
| 40 |
+
initial_knowledge_survey: Dict[str, float] = None # Initial knowledge survey results, None requires special handling
|
| 41 |
+
|
| 42 |
+
@dataclass
|
| 43 |
+
class LearningPathNode:
|
| 44 |
+
"""Learning path node"""
|
| 45 |
+
node_id: str
|
| 46 |
+
topic: str
|
| 47 |
+
bloom_level: str # "remember", "understand", "apply", "analyze", "evaluate", "create"
|
| 48 |
+
difficulty: float # 0-1
|
| 49 |
+
prerequisites: List[str] # Prerequisite node IDs
|
| 50 |
+
estimated_time: int # Estimated time (minutes)
|
| 51 |
+
content_type: str # "reading", "quiz", "practical", "review"
|
| 52 |
+
status: str # "pending", "in_progress", "completed", "skipped"
|
| 53 |
+
completion_date: Optional[str] = None
|
| 54 |
+
score: Optional[float] = None
|
| 55 |
+
|
| 56 |
+
@dataclass
|
| 57 |
+
class LearningPath:
|
| 58 |
+
"""Learning path"""
|
| 59 |
+
path_id: str
|
| 60 |
+
user_id: str
|
| 61 |
+
nodes: List[LearningPathNode]
|
| 62 |
+
current_node_index: int
|
| 63 |
+
completion_percentage: float
|
| 64 |
+
created_at: str
|
| 65 |
+
updated_at: str
|
| 66 |
+
estimated_total_time: int
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class UserProfilingSystem:
|
| 70 |
+
"""User profiling system"""
|
| 71 |
+
|
| 72 |
+
def __init__(self, storage_dir: str = "user_data"):
|
| 73 |
+
self.storage_dir = storage_dir
|
| 74 |
+
os.makedirs(storage_dir, exist_ok=True)
|
| 75 |
+
self.profiles_file = os.path.join(storage_dir, "user_profiles.json")
|
| 76 |
+
self.profiles = self._load_profiles()
|
| 77 |
+
|
| 78 |
+
def _load_profiles(self) -> Dict[str, UserProfile]:
|
| 79 |
+
"""Load user profiles"""
|
| 80 |
+
if os.path.exists(self.profiles_file):
|
| 81 |
+
try:
|
| 82 |
+
with open(self.profiles_file, 'r', encoding='utf-8') as f:
|
| 83 |
+
data = json.load(f)
|
| 84 |
+
return {uid: UserProfile(**profile) for uid, profile in data.items()}
|
| 85 |
+
except Exception as e:
|
| 86 |
+
print(f"Error loading profiles: {e}")
|
| 87 |
+
return {}
|
| 88 |
+
|
| 89 |
+
def _save_profiles(self):
|
| 90 |
+
"""Save user profiles"""
|
| 91 |
+
try:
|
| 92 |
+
with open(self.profiles_file, 'w', encoding='utf-8') as f:
|
| 93 |
+
data = {uid: asdict(profile) for uid, profile in self.profiles.items()}
|
| 94 |
+
json.dump(data, f, indent=2, ensure_ascii=False)
|
| 95 |
+
except Exception as e:
|
| 96 |
+
print(f"Error saving profiles: {e}")
|
| 97 |
+
|
| 98 |
+
def get_or_create_profile(self, user_id: str) -> UserProfile:
|
| 99 |
+
"""Get or create user profile (cold start)"""
|
| 100 |
+
if user_id not in self.profiles:
|
| 101 |
+
self.profiles[user_id] = UserProfile(
|
| 102 |
+
user_id=user_id,
|
| 103 |
+
knowledge_level={},
|
| 104 |
+
learning_style="mixed",
|
| 105 |
+
learning_pace="medium",
|
| 106 |
+
preferred_topics=[],
|
| 107 |
+
weak_areas=[],
|
| 108 |
+
strong_areas=[],
|
| 109 |
+
test_scores={},
|
| 110 |
+
question_history=[],
|
| 111 |
+
learning_time={},
|
| 112 |
+
last_activity=datetime.now().isoformat(),
|
| 113 |
+
total_questions_asked=0,
|
| 114 |
+
total_tests_completed=0,
|
| 115 |
+
bloom_level_performance={},
|
| 116 |
+
created_at=datetime.now().isoformat(),
|
| 117 |
+
updated_at=datetime.now().isoformat(),
|
| 118 |
+
has_completed_onboarding=False,
|
| 119 |
+
background_experience="",
|
| 120 |
+
learning_goals=None,
|
| 121 |
+
initial_assessment_completed=False,
|
| 122 |
+
initial_knowledge_survey=None
|
| 123 |
+
)
|
| 124 |
+
self._save_profiles()
|
| 125 |
+
return self.profiles[user_id]
|
| 126 |
+
|
| 127 |
+
def is_cold_start(self, user_id: str) -> bool:
|
| 128 |
+
"""Check if user is in cold start state"""
|
| 129 |
+
if user_id not in self.profiles:
|
| 130 |
+
return True
|
| 131 |
+
profile = self.profiles[user_id]
|
| 132 |
+
return not profile.has_completed_onboarding
|
| 133 |
+
|
| 134 |
+
def complete_onboarding(self, user_id: str, onboarding_data: Dict):
|
| 135 |
+
"""Complete cold start setup and collect initial user information
|
| 136 |
+
|
| 137 |
+
Information collected during cold start:
|
| 138 |
+
1. Learning preferences:
|
| 139 |
+
- learning_style: Learning style preference
|
| 140 |
+
- learning_pace: Learning pace preference
|
| 141 |
+
2. Background information:
|
| 142 |
+
- background_experience: Background experience
|
| 143 |
+
- learning_goals: List of learning goals
|
| 144 |
+
3. Initial knowledge assessment:
|
| 145 |
+
- initial_knowledge_survey: Initial familiarity with each topic (0-1)
|
| 146 |
+
- initial_assessment_completed: Whether initial assessment is completed
|
| 147 |
+
"""
|
| 148 |
+
profile = self.get_or_create_profile(user_id)
|
| 149 |
+
|
| 150 |
+
# Update learning style
|
| 151 |
+
if 'learning_style' in onboarding_data:
|
| 152 |
+
profile.learning_style = onboarding_data['learning_style']
|
| 153 |
+
|
| 154 |
+
# Update learning pace
|
| 155 |
+
if 'learning_pace' in onboarding_data:
|
| 156 |
+
profile.learning_pace = onboarding_data['learning_pace']
|
| 157 |
+
|
| 158 |
+
# Update background experience
|
| 159 |
+
if 'background_experience' in onboarding_data:
|
| 160 |
+
profile.background_experience = onboarding_data['background_experience']
|
| 161 |
+
|
| 162 |
+
# Update learning goals
|
| 163 |
+
if 'learning_goals' in onboarding_data:
|
| 164 |
+
profile.learning_goals = onboarding_data['learning_goals']
|
| 165 |
+
|
| 166 |
+
# Update initial knowledge survey
|
| 167 |
+
if 'initial_knowledge_survey' in onboarding_data:
|
| 168 |
+
profile.initial_knowledge_survey = onboarding_data['initial_knowledge_survey']
|
| 169 |
+
# Convert initial survey results to knowledge level
|
| 170 |
+
profile.knowledge_level = onboarding_data['initial_knowledge_survey'].copy()
|
| 171 |
+
|
| 172 |
+
# Update preferred topics (based on initial survey, select topics with lower familiarity)
|
| 173 |
+
if 'initial_knowledge_survey' in onboarding_data:
|
| 174 |
+
survey = onboarding_data['initial_knowledge_survey']
|
| 175 |
+
# Select topics with lower familiarity as learning focus
|
| 176 |
+
low_knowledge_topics = [topic for topic, level in survey.items() if level < 0.5]
|
| 177 |
+
profile.preferred_topics = low_knowledge_topics[:3] # Take top 3
|
| 178 |
+
|
| 179 |
+
# Update initial assessment status
|
| 180 |
+
if 'initial_assessment_completed' in onboarding_data:
|
| 181 |
+
profile.initial_assessment_completed = onboarding_data['initial_assessment_completed']
|
| 182 |
+
|
| 183 |
+
# Mark cold start as completed
|
| 184 |
+
profile.has_completed_onboarding = True
|
| 185 |
+
profile.updated_at = datetime.now().isoformat()
|
| 186 |
+
|
| 187 |
+
self._save_profiles()
|
| 188 |
+
return profile
|
| 189 |
+
|
| 190 |
+
def update_from_test_results(self, user_id: str, topic: str, test_results: List[Dict]):
|
| 191 |
+
"""Update user profile from test results"""
|
| 192 |
+
profile = self.get_or_create_profile(user_id)
|
| 193 |
+
|
| 194 |
+
# Calculate average score
|
| 195 |
+
scores = [r.get('score', 1.0 if r.get('is_correct', False) else 0.0)
|
| 196 |
+
for r in test_results]
|
| 197 |
+
avg_score = np.mean(scores) if scores else 0.5
|
| 198 |
+
|
| 199 |
+
# Update knowledge level
|
| 200 |
+
if topic not in profile.knowledge_level:
|
| 201 |
+
profile.knowledge_level[topic] = avg_score
|
| 202 |
+
else:
|
| 203 |
+
# Weighted average (give more weight to latest results)
|
| 204 |
+
profile.knowledge_level[topic] = 0.7 * avg_score + 0.3 * profile.knowledge_level[topic]
|
| 205 |
+
|
| 206 |
+
# Update test score history
|
| 207 |
+
if topic not in profile.test_scores:
|
| 208 |
+
profile.test_scores[topic] = []
|
| 209 |
+
profile.test_scores[topic].append(avg_score)
|
| 210 |
+
|
| 211 |
+
# Update Bloom level performance
|
| 212 |
+
if topic not in profile.bloom_level_performance:
|
| 213 |
+
profile.bloom_level_performance[topic] = {}
|
| 214 |
+
|
| 215 |
+
for result in test_results:
|
| 216 |
+
level = result.get('level', 'unknown')
|
| 217 |
+
is_correct = result.get('is_correct', False)
|
| 218 |
+
score = 1.0 if is_correct else 0.0
|
| 219 |
+
|
| 220 |
+
if level not in profile.bloom_level_performance[topic]:
|
| 221 |
+
profile.bloom_level_performance[topic][level] = []
|
| 222 |
+
profile.bloom_level_performance[topic][level].append(score)
|
| 223 |
+
|
| 224 |
+
# Calculate average performance for each Bloom level
|
| 225 |
+
for level in profile.bloom_level_performance[topic]:
|
| 226 |
+
scores = profile.bloom_level_performance[topic][level]
|
| 227 |
+
profile.bloom_level_performance[topic][level] = np.mean(scores) if scores else 0.0
|
| 228 |
+
|
| 229 |
+
# Update weak and strong areas
|
| 230 |
+
self._update_weak_strong_areas(profile)
|
| 231 |
+
|
| 232 |
+
# Update learning pace
|
| 233 |
+
profile.learning_pace = self._calculate_learning_pace(profile)
|
| 234 |
+
|
| 235 |
+
profile.total_tests_completed += 1
|
| 236 |
+
profile.last_activity = datetime.now().isoformat()
|
| 237 |
+
profile.updated_at = datetime.now().isoformat()
|
| 238 |
+
|
| 239 |
+
self._save_profiles()
|
| 240 |
+
return profile
|
| 241 |
+
|
| 242 |
+
def update_from_question(self, user_id: str, question: str, topic: Optional[str] = None):
|
| 243 |
+
"""Update user profile from question history"""
|
| 244 |
+
profile = self.get_or_create_profile(user_id)
|
| 245 |
+
|
| 246 |
+
profile.question_history.append({
|
| 247 |
+
"question": question,
|
| 248 |
+
"topic": topic,
|
| 249 |
+
"timestamp": datetime.now().isoformat()
|
| 250 |
+
})
|
| 251 |
+
|
| 252 |
+
# Analyze question type to infer learning style
|
| 253 |
+
profile.learning_style = self._infer_learning_style(profile.question_history)
|
| 254 |
+
|
| 255 |
+
# Update preferred topics
|
| 256 |
+
if topic:
|
| 257 |
+
if topic not in profile.preferred_topics:
|
| 258 |
+
profile.preferred_topics.append(topic)
|
| 259 |
+
# Sort by frequency
|
| 260 |
+
topic_counts = defaultdict(int)
|
| 261 |
+
for q in profile.question_history:
|
| 262 |
+
if q.get('topic'):
|
| 263 |
+
topic_counts[q['topic']] += 1
|
| 264 |
+
profile.preferred_topics = sorted(topic_counts.items(), key=lambda x: x[1], reverse=True)[:5]
|
| 265 |
+
profile.preferred_topics = [t[0] for t in profile.preferred_topics]
|
| 266 |
+
|
| 267 |
+
profile.total_questions_asked += 1
|
| 268 |
+
profile.last_activity = datetime.now().isoformat()
|
| 269 |
+
profile.updated_at = datetime.now().isoformat()
|
| 270 |
+
|
| 271 |
+
self._save_profiles()
|
| 272 |
+
return profile
|
| 273 |
+
|
| 274 |
+
def update_learning_time(self, user_id: str, topic: str, minutes: float):
|
| 275 |
+
"""Update learning time"""
|
| 276 |
+
profile = self.get_or_create_profile(user_id)
|
| 277 |
+
|
| 278 |
+
if topic not in profile.learning_time:
|
| 279 |
+
profile.learning_time[topic] = 0.0
|
| 280 |
+
profile.learning_time[topic] += minutes
|
| 281 |
+
|
| 282 |
+
profile.last_activity = datetime.now().isoformat()
|
| 283 |
+
profile.updated_at = datetime.now().isoformat()
|
| 284 |
+
|
| 285 |
+
self._save_profiles()
|
| 286 |
+
return profile
|
| 287 |
+
|
| 288 |
+
def _update_weak_strong_areas(self, profile: UserProfile):
|
| 289 |
+
"""Update weak and strong areas"""
|
| 290 |
+
# Topics with knowledge level below 0.6 are weak areas
|
| 291 |
+
weak = [topic for topic, level in profile.knowledge_level.items() if level < 0.6]
|
| 292 |
+
# Topics with knowledge level above 0.8 are strong areas
|
| 293 |
+
strong = [topic for topic, level in profile.knowledge_level.items() if level >= 0.8]
|
| 294 |
+
|
| 295 |
+
profile.weak_areas = weak
|
| 296 |
+
profile.strong_areas = strong
|
| 297 |
+
|
| 298 |
+
def _calculate_learning_pace(self, profile: UserProfile) -> str:
|
| 299 |
+
"""Calculate learning pace"""
|
| 300 |
+
if not profile.test_scores:
|
| 301 |
+
return "medium"
|
| 302 |
+
|
| 303 |
+
# Calculate test completion speed
|
| 304 |
+
total_tests = profile.total_tests_completed
|
| 305 |
+
if total_tests == 0:
|
| 306 |
+
return "medium"
|
| 307 |
+
|
| 308 |
+
# Analyze performance changes in recent tests
|
| 309 |
+
recent_scores = []
|
| 310 |
+
for topic_scores in profile.test_scores.values():
|
| 311 |
+
if topic_scores:
|
| 312 |
+
recent_scores.extend(topic_scores[-3:]) # Last 3 tests
|
| 313 |
+
|
| 314 |
+
if not recent_scores:
|
| 315 |
+
return "medium"
|
| 316 |
+
|
| 317 |
+
# If recent scores improve quickly, likely a fast-paced learner
|
| 318 |
+
if len(recent_scores) >= 3:
|
| 319 |
+
improvement = recent_scores[-1] - recent_scores[0]
|
| 320 |
+
if improvement > 0.3:
|
| 321 |
+
return "fast"
|
| 322 |
+
elif improvement < -0.1:
|
| 323 |
+
return "slow"
|
| 324 |
+
|
| 325 |
+
return "medium"
|
| 326 |
+
|
| 327 |
+
def _infer_learning_style(self, question_history: List[Dict]) -> str:
|
| 328 |
+
"""Infer learning style from question history"""
|
| 329 |
+
if not question_history:
|
| 330 |
+
return "mixed"
|
| 331 |
+
|
| 332 |
+
# Analyze question keywords
|
| 333 |
+
visual_keywords = ["how", "what does", "show", "visual", "diagram", "illustration"]
|
| 334 |
+
practical_keywords = ["how to", "step", "procedure", "activate", "use", "operate"]
|
| 335 |
+
textual_keywords = ["what is", "explain", "define", "describe", "meaning"]
|
| 336 |
+
|
| 337 |
+
visual_count = sum(1 for q in question_history
|
| 338 |
+
if any(kw in q.get('question', '').lower() for kw in visual_keywords))
|
| 339 |
+
practical_count = sum(1 for q in question_history
|
| 340 |
+
if any(kw in q.get('question', '').lower() for kw in practical_keywords))
|
| 341 |
+
textual_count = sum(1 for q in question_history
|
| 342 |
+
if any(kw in q.get('question', '').lower() for kw in textual_keywords))
|
| 343 |
+
|
| 344 |
+
total = len(question_history)
|
| 345 |
+
if total == 0:
|
| 346 |
+
return "mixed"
|
| 347 |
+
|
| 348 |
+
visual_ratio = visual_count / total
|
| 349 |
+
practical_ratio = practical_count / total
|
| 350 |
+
textual_ratio = textual_count / total
|
| 351 |
+
|
| 352 |
+
max_ratio = max(visual_ratio, practical_ratio, textual_ratio)
|
| 353 |
+
if max_ratio > 0.4:
|
| 354 |
+
if visual_ratio == max_ratio:
|
| 355 |
+
return "visual"
|
| 356 |
+
elif practical_ratio == max_ratio:
|
| 357 |
+
return "practical"
|
| 358 |
+
else:
|
| 359 |
+
return "textual"
|
| 360 |
+
|
| 361 |
+
return "mixed"
|
| 362 |
+
|
| 363 |
+
def get_profile_summary(self, user_id: str) -> Dict:
|
| 364 |
+
"""Get user profile summary"""
|
| 365 |
+
profile = self.get_or_create_profile(user_id)
|
| 366 |
+
|
| 367 |
+
return {
|
| 368 |
+
"user_id": profile.user_id,
|
| 369 |
+
"knowledge_level": profile.knowledge_level,
|
| 370 |
+
"learning_style": profile.learning_style,
|
| 371 |
+
"learning_pace": profile.learning_pace,
|
| 372 |
+
"preferred_topics": profile.preferred_topics,
|
| 373 |
+
"weak_areas": profile.weak_areas,
|
| 374 |
+
"strong_areas": profile.strong_areas,
|
| 375 |
+
"total_questions": profile.total_questions_asked,
|
| 376 |
+
"total_tests": profile.total_tests_completed,
|
| 377 |
+
"overall_progress": self._calculate_overall_progress(profile)
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
def _calculate_overall_progress(self, profile: UserProfile) -> float:
|
| 381 |
+
"""Calculate overall progress"""
|
| 382 |
+
if not profile.knowledge_level:
|
| 383 |
+
return 0.0
|
| 384 |
+
return np.mean(list(profile.knowledge_level.values()))
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
class LearningPathGenerator:
|
| 388 |
+
"""Learning path generator"""
|
| 389 |
+
|
| 390 |
+
def __init__(self, user_profiling: UserProfilingSystem, available_topics: List[str]):
|
| 391 |
+
self.user_profiling = user_profiling
|
| 392 |
+
self.available_topics = available_topics
|
| 393 |
+
self.bloom_levels = ["remember", "understand", "apply", "analyze", "evaluate", "create"]
|
| 394 |
+
|
| 395 |
+
def generate_path(self, user_id: str, focus_areas: Optional[List[str]] = None) -> LearningPath:
|
| 396 |
+
"""Generate personalized learning path"""
|
| 397 |
+
profile = self.user_profiling.get_or_create_profile(user_id)
|
| 398 |
+
|
| 399 |
+
# Determine topics to learn
|
| 400 |
+
topics_to_learn = self._determine_topics(profile, focus_areas)
|
| 401 |
+
|
| 402 |
+
# Generate learning nodes
|
| 403 |
+
nodes = []
|
| 404 |
+
node_id_counter = 0
|
| 405 |
+
|
| 406 |
+
for topic in topics_to_learn:
|
| 407 |
+
topic_level = profile.knowledge_level.get(topic, 0.0)
|
| 408 |
+
bloom_performance = profile.bloom_level_performance.get(topic, {})
|
| 409 |
+
|
| 410 |
+
# Generate nodes for different Bloom levels for each topic
|
| 411 |
+
for bloom_level in self.bloom_levels:
|
| 412 |
+
# Check if this node is needed
|
| 413 |
+
if not self._needs_node(profile, topic, bloom_level, topic_level, bloom_performance):
|
| 414 |
+
continue
|
| 415 |
+
|
| 416 |
+
node = LearningPathNode(
|
| 417 |
+
node_id=f"node_{node_id_counter}",
|
| 418 |
+
topic=topic,
|
| 419 |
+
bloom_level=bloom_level,
|
| 420 |
+
difficulty=self._calculate_difficulty(topic_level, bloom_level),
|
| 421 |
+
prerequisites=self._get_prerequisites(nodes, topic, bloom_level),
|
| 422 |
+
estimated_time=self._estimate_time(bloom_level, profile.learning_pace),
|
| 423 |
+
content_type=self._determine_content_type(bloom_level, profile.learning_style),
|
| 424 |
+
status="pending"
|
| 425 |
+
)
|
| 426 |
+
nodes.append(node)
|
| 427 |
+
node_id_counter += 1
|
| 428 |
+
|
| 429 |
+
# Sort nodes (considering prerequisites)
|
| 430 |
+
ordered_nodes = self._topological_sort(nodes)
|
| 431 |
+
|
| 432 |
+
# Calculate total time
|
| 433 |
+
total_time = sum(node.estimated_time for node in ordered_nodes)
|
| 434 |
+
|
| 435 |
+
path = LearningPath(
|
| 436 |
+
path_id=f"path_{user_id}_{datetime.now().strftime('%Y%m%d%H%M%S')}",
|
| 437 |
+
user_id=user_id,
|
| 438 |
+
nodes=ordered_nodes,
|
| 439 |
+
current_node_index=0,
|
| 440 |
+
completion_percentage=0.0,
|
| 441 |
+
created_at=datetime.now().isoformat(),
|
| 442 |
+
updated_at=datetime.now().isoformat(),
|
| 443 |
+
estimated_total_time=total_time
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
return path
|
| 447 |
+
|
| 448 |
+
def _determine_topics(self, profile: UserProfile, focus_areas: Optional[List[str]]) -> List[str]:
|
| 449 |
+
"""Determine topics to learn"""
|
| 450 |
+
if focus_areas:
|
| 451 |
+
return focus_areas
|
| 452 |
+
|
| 453 |
+
# Prioritize weak areas
|
| 454 |
+
topics = profile.weak_areas.copy()
|
| 455 |
+
|
| 456 |
+
# Add unlearned topics
|
| 457 |
+
for topic in self.available_topics:
|
| 458 |
+
if topic not in profile.knowledge_level and topic not in topics:
|
| 459 |
+
topics.append(topic)
|
| 460 |
+
|
| 461 |
+
# If no weak areas, recommend preferred or strong area related topics
|
| 462 |
+
if not topics:
|
| 463 |
+
topics = profile.preferred_topics[:3] if profile.preferred_topics else self.available_topics[:3]
|
| 464 |
+
|
| 465 |
+
return topics[:5] # Limit to maximum 5 topics
|
| 466 |
+
|
| 467 |
+
def _needs_node(self, profile: UserProfile, topic: str, bloom_level: str,
|
| 468 |
+
topic_level: float, bloom_performance: Dict) -> bool:
|
| 469 |
+
"""Determine if a node is needed"""
|
| 470 |
+
# If performance at this Bloom level is already good, skip
|
| 471 |
+
level_performance = bloom_performance.get(bloom_level, 0.0)
|
| 472 |
+
if level_performance >= 0.8:
|
| 473 |
+
return False
|
| 474 |
+
|
| 475 |
+
# Decide based on knowledge level
|
| 476 |
+
if topic_level < 0.3 and bloom_level in ["analyze", "evaluate", "create"]:
|
| 477 |
+
return False # Insufficient foundational knowledge, learn basics first
|
| 478 |
+
|
| 479 |
+
return True
|
| 480 |
+
|
| 481 |
+
def _calculate_difficulty(self, topic_level: float, bloom_level: str) -> float:
|
| 482 |
+
"""Calculate node difficulty"""
|
| 483 |
+
bloom_weights = {
|
| 484 |
+
"remember": 0.2,
|
| 485 |
+
"understand": 0.3,
|
| 486 |
+
"apply": 0.5,
|
| 487 |
+
"analyze": 0.7,
|
| 488 |
+
"evaluate": 0.85,
|
| 489 |
+
"create": 1.0
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
base_difficulty = bloom_weights.get(bloom_level, 0.5)
|
| 493 |
+
# Adjust based on current knowledge level
|
| 494 |
+
adjusted = base_difficulty * (1 - topic_level * 0.3)
|
| 495 |
+
return min(1.0, max(0.1, adjusted))
|
| 496 |
+
|
| 497 |
+
def _get_prerequisites(self, existing_nodes: List[LearningPathNode],
|
| 498 |
+
topic: str, bloom_level: str) -> List[str]:
|
| 499 |
+
"""Get prerequisites"""
|
| 500 |
+
prereqs = []
|
| 501 |
+
|
| 502 |
+
# Lower Bloom levels of the same topic are prerequisites
|
| 503 |
+
bloom_order = ["remember", "understand", "apply", "analyze", "evaluate", "create"]
|
| 504 |
+
current_index = bloom_order.index(bloom_level) if bloom_level in bloom_order else 0
|
| 505 |
+
|
| 506 |
+
for node in existing_nodes:
|
| 507 |
+
if node.topic == topic:
|
| 508 |
+
node_index = bloom_order.index(node.bloom_level) if node.bloom_level in bloom_order else 0
|
| 509 |
+
if node_index < current_index:
|
| 510 |
+
prereqs.append(node.node_id)
|
| 511 |
+
|
| 512 |
+
return prereqs
|
| 513 |
+
|
| 514 |
+
def _estimate_time(self, bloom_level: str, learning_pace: str) -> int:
|
| 515 |
+
"""Estimate learning time (minutes)"""
|
| 516 |
+
base_times = {
|
| 517 |
+
"remember": 10,
|
| 518 |
+
"understand": 15,
|
| 519 |
+
"apply": 20,
|
| 520 |
+
"analyze": 25,
|
| 521 |
+
"evaluate": 30,
|
| 522 |
+
"create": 35
|
| 523 |
+
}
|
| 524 |
+
|
| 525 |
+
base_time = base_times.get(bloom_level, 20)
|
| 526 |
+
|
| 527 |
+
pace_multipliers = {
|
| 528 |
+
"slow": 1.5,
|
| 529 |
+
"medium": 1.0,
|
| 530 |
+
"fast": 0.7
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
return int(base_time * pace_multipliers.get(learning_pace, 1.0))
|
| 534 |
+
|
| 535 |
+
def _determine_content_type(self, bloom_level: str, learning_style: str) -> str:
|
| 536 |
+
"""Determine content type"""
|
| 537 |
+
# Decide based on learning style and Bloom level
|
| 538 |
+
if learning_style == "visual":
|
| 539 |
+
if bloom_level in ["remember", "understand"]:
|
| 540 |
+
return "reading"
|
| 541 |
+
else:
|
| 542 |
+
return "practical"
|
| 543 |
+
elif learning_style == "practical":
|
| 544 |
+
return "practical"
|
| 545 |
+
elif learning_style == "textual":
|
| 546 |
+
return "reading"
|
| 547 |
+
else:
|
| 548 |
+
# mixed
|
| 549 |
+
if bloom_level in ["apply", "analyze", "evaluate", "create"]:
|
| 550 |
+
return "quiz"
|
| 551 |
+
return "reading"
|
| 552 |
+
|
| 553 |
+
def _topological_sort(self, nodes: List[LearningPathNode]) -> List[LearningPathNode]:
|
| 554 |
+
"""Topological sort to ensure prerequisites are completed first"""
|
| 555 |
+
# Create node mapping
|
| 556 |
+
node_map = {node.node_id: node for node in nodes}
|
| 557 |
+
|
| 558 |
+
# Calculate in-degree
|
| 559 |
+
in_degree = {node.node_id: len(node.prerequisites) for node in nodes}
|
| 560 |
+
|
| 561 |
+
# Find nodes without prerequisites
|
| 562 |
+
queue = [node.node_id for node in nodes if in_degree[node.node_id] == 0]
|
| 563 |
+
result = []
|
| 564 |
+
|
| 565 |
+
while queue:
|
| 566 |
+
current_id = queue.pop(0)
|
| 567 |
+
current_node = node_map[current_id]
|
| 568 |
+
result.append(current_node)
|
| 569 |
+
|
| 570 |
+
# Update in-degree of other nodes
|
| 571 |
+
for node in nodes:
|
| 572 |
+
if current_id in node.prerequisites:
|
| 573 |
+
in_degree[node.node_id] -= 1
|
| 574 |
+
if in_degree[node.node_id] == 0:
|
| 575 |
+
queue.append(node.node_id)
|
| 576 |
+
|
| 577 |
+
# Add remaining nodes (may have circular dependencies)
|
| 578 |
+
remaining = [node for node in nodes if node not in result]
|
| 579 |
+
result.extend(remaining)
|
| 580 |
+
|
| 581 |
+
return result
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
class AdaptiveLearningEngine:
|
| 585 |
+
"""Adaptive learning engine"""
|
| 586 |
+
|
| 587 |
+
def __init__(self, user_profiling: UserProfilingSystem, learning_path_generator: LearningPathGenerator):
|
| 588 |
+
self.user_profiling = user_profiling
|
| 589 |
+
self.learning_path_generator = learning_path_generator
|
| 590 |
+
self.paths_file = os.path.join("user_data", "learning_paths.json")
|
| 591 |
+
self.paths = self._load_paths()
|
| 592 |
+
|
| 593 |
+
def _load_paths(self) -> Dict[str, LearningPath]:
|
| 594 |
+
"""Load learning paths"""
|
| 595 |
+
if os.path.exists(self.paths_file):
|
| 596 |
+
try:
|
| 597 |
+
with open(self.paths_file, 'r', encoding='utf-8') as f:
|
| 598 |
+
data = json.load(f)
|
| 599 |
+
paths = {}
|
| 600 |
+
for pid, path_data in data.items():
|
| 601 |
+
nodes = [LearningPathNode(**node) for node in path_data['nodes']]
|
| 602 |
+
path = LearningPath(
|
| 603 |
+
path_id=path_data['path_id'],
|
| 604 |
+
user_id=path_data['user_id'],
|
| 605 |
+
nodes=nodes,
|
| 606 |
+
current_node_index=path_data['current_node_index'],
|
| 607 |
+
completion_percentage=path_data['completion_percentage'],
|
| 608 |
+
created_at=path_data['created_at'],
|
| 609 |
+
updated_at=path_data['updated_at'],
|
| 610 |
+
estimated_total_time=path_data['estimated_total_time']
|
| 611 |
+
)
|
| 612 |
+
paths[pid] = path
|
| 613 |
+
return paths
|
| 614 |
+
except Exception as e:
|
| 615 |
+
print(f"Error loading paths: {e}")
|
| 616 |
+
return {}
|
| 617 |
+
|
| 618 |
+
def _save_paths(self):
|
| 619 |
+
"""Save learning paths"""
|
| 620 |
+
try:
|
| 621 |
+
os.makedirs("user_data", exist_ok=True)
|
| 622 |
+
with open(self.paths_file, 'w', encoding='utf-8') as f:
|
| 623 |
+
data = {}
|
| 624 |
+
for pid, path in self.paths.items():
|
| 625 |
+
path_dict = asdict(path)
|
| 626 |
+
data[pid] = path_dict
|
| 627 |
+
json.dump(data, f, indent=2, ensure_ascii=False)
|
| 628 |
+
except Exception as e:
|
| 629 |
+
print(f"Error saving paths: {e}")
|
| 630 |
+
|
| 631 |
+
def create_or_update_path(self, user_id: str, focus_areas: Optional[List[str]] = None) -> LearningPath:
|
| 632 |
+
"""Create or update learning path"""
|
| 633 |
+
# Check if there is an active path
|
| 634 |
+
active_path = self.get_active_path(user_id)
|
| 635 |
+
|
| 636 |
+
if active_path and active_path.completion_percentage < 1.0:
|
| 637 |
+
# Update existing path
|
| 638 |
+
return self._update_path(active_path)
|
| 639 |
+
else:
|
| 640 |
+
# Create new path
|
| 641 |
+
path = self.learning_path_generator.generate_path(user_id, focus_areas)
|
| 642 |
+
self.paths[path.path_id] = path
|
| 643 |
+
self._save_paths()
|
| 644 |
+
return path
|
| 645 |
+
|
| 646 |
+
def get_active_path(self, user_id: str) -> Optional[LearningPath]:
|
| 647 |
+
"""Get user's current active path"""
|
| 648 |
+
user_paths = [p for p in self.paths.values() if p.user_id == user_id]
|
| 649 |
+
if not user_paths:
|
| 650 |
+
return None
|
| 651 |
+
|
| 652 |
+
# Return the latest incomplete path
|
| 653 |
+
active = [p for p in user_paths if p.completion_percentage < 1.0]
|
| 654 |
+
if active:
|
| 655 |
+
return max(active, key=lambda p: p.created_at)
|
| 656 |
+
|
| 657 |
+
# If no incomplete paths, return the latest one
|
| 658 |
+
return max(user_paths, key=lambda p: p.created_at)
|
| 659 |
+
|
| 660 |
+
def complete_node(self, user_id: str, node_id: str, score: float):
|
| 661 |
+
"""Complete a node"""
|
| 662 |
+
path = self.get_active_path(user_id)
|
| 663 |
+
if not path:
|
| 664 |
+
return None
|
| 665 |
+
|
| 666 |
+
# Find node and mark as completed
|
| 667 |
+
for node in path.nodes:
|
| 668 |
+
if node.node_id == node_id:
|
| 669 |
+
node.status = "completed"
|
| 670 |
+
node.score = score
|
| 671 |
+
node.completion_date = datetime.now().isoformat()
|
| 672 |
+
break
|
| 673 |
+
|
| 674 |
+
# Update path progress
|
| 675 |
+
completed = sum(1 for n in path.nodes if n.status == "completed")
|
| 676 |
+
path.completion_percentage = completed / len(path.nodes) if path.nodes else 0.0
|
| 677 |
+
|
| 678 |
+
# Update current node index
|
| 679 |
+
for i, node in enumerate(path.nodes):
|
| 680 |
+
if node.status not in ["completed", "skipped"]:
|
| 681 |
+
path.current_node_index = i
|
| 682 |
+
break
|
| 683 |
+
|
| 684 |
+
path.updated_at = datetime.now().isoformat()
|
| 685 |
+
self._save_paths()
|
| 686 |
+
|
| 687 |
+
# Update user profile
|
| 688 |
+
current_node = path.nodes[path.current_node_index] if path.current_node_index < len(path.nodes) else None
|
| 689 |
+
if current_node:
|
| 690 |
+
self.user_profiling.update_learning_time(
|
| 691 |
+
user_id,
|
| 692 |
+
current_node.topic,
|
| 693 |
+
current_node.estimated_time
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
return path
|
| 697 |
+
|
| 698 |
+
def _update_path(self, path: LearningPath) -> LearningPath:
|
| 699 |
+
"""Update path based on user performance"""
|
| 700 |
+
profile = self.user_profiling.get_or_create_profile(path.user_id)
|
| 701 |
+
|
| 702 |
+
# Check if path needs adjustment
|
| 703 |
+
for node in path.nodes:
|
| 704 |
+
if node.status == "pending":
|
| 705 |
+
# Check if already mastered
|
| 706 |
+
topic_level = profile.knowledge_level.get(node.topic, 0.0)
|
| 707 |
+
bloom_perf = profile.bloom_level_performance.get(node.topic, {}).get(node.bloom_level, 0.0)
|
| 708 |
+
|
| 709 |
+
if bloom_perf >= 0.8:
|
| 710 |
+
# Already mastered, can skip
|
| 711 |
+
node.status = "skipped"
|
| 712 |
+
node.completion_date = datetime.now().isoformat()
|
| 713 |
+
|
| 714 |
+
# Recalculate progress
|
| 715 |
+
completed = sum(1 for n in path.nodes if n.status in ["completed", "skipped"])
|
| 716 |
+
path.completion_percentage = completed / len(path.nodes) if path.nodes else 0.0
|
| 717 |
+
|
| 718 |
+
path.updated_at = datetime.now().isoformat()
|
| 719 |
+
self._save_paths()
|
| 720 |
+
|
| 721 |
+
return path
|
| 722 |
+
|
| 723 |
+
def get_recommendations(self, user_id: str) -> Dict:
|
| 724 |
+
"""Get learning recommendations"""
|
| 725 |
+
profile = self.user_profiling.get_or_create_profile(user_id)
|
| 726 |
+
path = self.get_active_path(user_id)
|
| 727 |
+
|
| 728 |
+
recommendations = {
|
| 729 |
+
"next_node": None,
|
| 730 |
+
"suggested_topics": [],
|
| 731 |
+
"review_topics": [],
|
| 732 |
+
"challenge_topics": []
|
| 733 |
+
}
|
| 734 |
+
|
| 735 |
+
# Recommend next node
|
| 736 |
+
if path and path.current_node_index < len(path.nodes):
|
| 737 |
+
next_node = path.nodes[path.current_node_index]
|
| 738 |
+
recommendations["next_node"] = {
|
| 739 |
+
"node_id": next_node.node_id,
|
| 740 |
+
"topic": next_node.topic,
|
| 741 |
+
"bloom_level": next_node.bloom_level,
|
| 742 |
+
"content_type": next_node.content_type,
|
| 743 |
+
"estimated_time": next_node.estimated_time
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
# Recommend topics for review
|
| 747 |
+
recommendations["review_topics"] = profile.weak_areas[:3]
|
| 748 |
+
|
| 749 |
+
# Recommend challenge topics (advanced content for strong areas)
|
| 750 |
+
for topic in profile.strong_areas[:2]:
|
| 751 |
+
if topic not in recommendations["challenge_topics"]:
|
| 752 |
+
recommendations["challenge_topics"].append(topic)
|
| 753 |
+
|
| 754 |
+
# Recommend new topics
|
| 755 |
+
all_topics = set(self.learning_path_generator.available_topics)
|
| 756 |
+
learned_topics = set(profile.knowledge_level.keys())
|
| 757 |
+
new_topics = list(all_topics - learned_topics)[:3]
|
| 758 |
+
recommendations["suggested_topics"] = new_topics
|
| 759 |
+
|
| 760 |
+
return recommendations
|
| 761 |
+
|
| 762 |
+
def get_path_visualization_data(self, user_id: str) -> Dict:
|
| 763 |
+
"""Get path visualization data"""
|
| 764 |
+
path = self.get_active_path(user_id)
|
| 765 |
+
if not path:
|
| 766 |
+
return {"nodes": [], "edges": []}
|
| 767 |
+
|
| 768 |
+
nodes_data = []
|
| 769 |
+
edges_data = []
|
| 770 |
+
|
| 771 |
+
for node in path.nodes:
|
| 772 |
+
nodes_data.append({
|
| 773 |
+
"id": node.node_id,
|
| 774 |
+
"topic": node.topic,
|
| 775 |
+
"bloom_level": node.bloom_level,
|
| 776 |
+
"status": node.status,
|
| 777 |
+
"difficulty": node.difficulty,
|
| 778 |
+
"score": node.score
|
| 779 |
+
})
|
| 780 |
+
|
| 781 |
+
# Add edges (prerequisites)
|
| 782 |
+
for prereq_id in node.prerequisites:
|
| 783 |
+
edges_data.append({
|
| 784 |
+
"source": prereq_id,
|
| 785 |
+
"target": node.node_id
|
| 786 |
+
})
|
| 787 |
+
|
| 788 |
+
return {
|
| 789 |
+
"nodes": nodes_data,
|
| 790 |
+
"edges": edges_data,
|
| 791 |
+
"completion_percentage": path.completion_percentage,
|
| 792 |
+
"current_node_index": path.current_node_index
|
| 793 |
+
}
|
| 794 |
+
|
modules/proactive_learning.py
ADDED
|
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Proactive Learning Assistance Module (Phase 1)
|
| 3 |
+
Implements intelligent prompting suggestions, context-aware follow-up questions, and critical knowledge gap identification
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
from typing import Dict, List, Optional, Tuple
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from openai import OpenAI
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class KnowledgeGapAnalyzer:
|
| 13 |
+
"""Analyzes user knowledge gaps, especially critical safety-related gaps"""
|
| 14 |
+
|
| 15 |
+
# Safety-critical ADAS features that require high knowledge levels
|
| 16 |
+
SAFETY_CRITICAL_FEATURES = [
|
| 17 |
+
"Function of Active Distance Assist DISTRONIC",
|
| 18 |
+
"Function of Active Stop-and-Go Assist",
|
| 19 |
+
"Function of Active Steering Assist"
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
# Knowledge level thresholds
|
| 23 |
+
CRITICAL_GAP_THRESHOLD = 0.5 # Below this is considered a critical gap for safety features
|
| 24 |
+
WEAK_AREA_THRESHOLD = 0.6 # Below this is considered a weak area
|
| 25 |
+
|
| 26 |
+
def __init__(self, available_topics: List[str]):
|
| 27 |
+
self.available_topics = available_topics
|
| 28 |
+
|
| 29 |
+
def identify_critical_gaps(self, user_profile) -> List[str]:
|
| 30 |
+
"""
|
| 31 |
+
Identify critical knowledge gaps that could impact safety
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
List of topics with critical knowledge gaps
|
| 35 |
+
"""
|
| 36 |
+
critical_gaps = []
|
| 37 |
+
knowledge_level = user_profile.knowledge_level if hasattr(user_profile, 'knowledge_level') else {}
|
| 38 |
+
|
| 39 |
+
for topic in self.available_topics:
|
| 40 |
+
level = knowledge_level.get(topic, 0.0)
|
| 41 |
+
|
| 42 |
+
# Check if it's a safety-critical feature with low knowledge
|
| 43 |
+
if topic in self.SAFETY_CRITICAL_FEATURES and level < self.CRITICAL_GAP_THRESHOLD:
|
| 44 |
+
critical_gaps.append(topic)
|
| 45 |
+
|
| 46 |
+
return critical_gaps
|
| 47 |
+
|
| 48 |
+
def identify_weak_areas(self, user_profile) -> List[str]:
|
| 49 |
+
"""
|
| 50 |
+
Identify all weak areas (not just critical)
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
List of topics with weak knowledge levels
|
| 54 |
+
"""
|
| 55 |
+
weak_areas = []
|
| 56 |
+
knowledge_level = user_profile.knowledge_level if hasattr(user_profile, 'knowledge_level') else {}
|
| 57 |
+
|
| 58 |
+
for topic in self.available_topics:
|
| 59 |
+
level = knowledge_level.get(topic, 0.0)
|
| 60 |
+
if level < self.WEAK_AREA_THRESHOLD:
|
| 61 |
+
weak_areas.append(topic)
|
| 62 |
+
|
| 63 |
+
return weak_areas
|
| 64 |
+
|
| 65 |
+
def get_gap_priority(self, user_profile) -> List[Tuple[str, float]]:
|
| 66 |
+
"""
|
| 67 |
+
Get knowledge gaps with priority scores
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
List of (topic, priority_score) tuples, sorted by priority
|
| 71 |
+
"""
|
| 72 |
+
gaps = []
|
| 73 |
+
knowledge_level = user_profile.knowledge_level if hasattr(user_profile, 'knowledge_level') else {}
|
| 74 |
+
|
| 75 |
+
for topic in self.available_topics:
|
| 76 |
+
level = knowledge_level.get(topic, 0.0)
|
| 77 |
+
|
| 78 |
+
# Calculate priority score
|
| 79 |
+
priority = 0.0
|
| 80 |
+
|
| 81 |
+
# Safety-critical features get higher priority
|
| 82 |
+
if topic in self.SAFETY_CRITICAL_FEATURES:
|
| 83 |
+
priority += 2.0
|
| 84 |
+
|
| 85 |
+
# Lower knowledge level = higher priority
|
| 86 |
+
priority += (1.0 - level) * 1.5
|
| 87 |
+
|
| 88 |
+
# Check if it's in weak areas
|
| 89 |
+
if hasattr(user_profile, 'weak_areas') and topic in user_profile.weak_areas:
|
| 90 |
+
priority += 0.5
|
| 91 |
+
|
| 92 |
+
gaps.append((topic, priority))
|
| 93 |
+
|
| 94 |
+
# Sort by priority (descending)
|
| 95 |
+
gaps.sort(key=lambda x: x[1], reverse=True)
|
| 96 |
+
return gaps
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class PromptSuggestionGenerator:
|
| 100 |
+
"""Generates intelligent prompt suggestions based on user profile and learning history"""
|
| 101 |
+
|
| 102 |
+
def __init__(self, client: OpenAI, rag_engine, knowledge_gap_analyzer: KnowledgeGapAnalyzer,
|
| 103 |
+
available_topics: List[str]):
|
| 104 |
+
self.client = client
|
| 105 |
+
self.rag_engine = rag_engine
|
| 106 |
+
self.gap_analyzer = knowledge_gap_analyzer
|
| 107 |
+
self.available_topics = available_topics
|
| 108 |
+
|
| 109 |
+
def generate_suggestions(self, user_id: str, user_profile, learning_path=None,
|
| 110 |
+
context: Optional[str] = None, max_suggestions: int = 5) -> List[Dict[str, str]]:
|
| 111 |
+
"""
|
| 112 |
+
Generate prompt suggestions based on multiple criteria
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
user_id: User ID
|
| 116 |
+
user_profile: UserProfile object
|
| 117 |
+
learning_path: Optional LearningPath object
|
| 118 |
+
context: Optional context (e.g., recent question)
|
| 119 |
+
max_suggestions: Maximum number of suggestions to return
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
List of suggestion dictionaries with 'question' and 'reason' keys
|
| 123 |
+
"""
|
| 124 |
+
suggestions = []
|
| 125 |
+
|
| 126 |
+
# 1. Based on critical knowledge gaps
|
| 127 |
+
critical_gaps = self.gap_analyzer.identify_critical_gaps(user_profile)
|
| 128 |
+
for topic in critical_gaps[:2]: # Top 2 critical gaps
|
| 129 |
+
question = self._generate_question_for_topic(topic, "beginner")
|
| 130 |
+
if question:
|
| 131 |
+
suggestions.append({
|
| 132 |
+
"question": question,
|
| 133 |
+
"reason": f"Critical Safety Feature: Your understanding of {topic.replace('Function of ', '')} needs improvement",
|
| 134 |
+
"priority": "high",
|
| 135 |
+
"type": "critical_gap"
|
| 136 |
+
})
|
| 137 |
+
|
| 138 |
+
# 2. Based on learning path
|
| 139 |
+
if learning_path and hasattr(learning_path, 'nodes') and learning_path.nodes:
|
| 140 |
+
current_node = None
|
| 141 |
+
if learning_path.current_node_index < len(learning_path.nodes):
|
| 142 |
+
current_node = learning_path.nodes[learning_path.current_node_index]
|
| 143 |
+
|
| 144 |
+
if current_node and current_node.status != "completed":
|
| 145 |
+
question = self._generate_question_for_topic(current_node.topic, current_node.bloom_level)
|
| 146 |
+
if question:
|
| 147 |
+
suggestions.append({
|
| 148 |
+
"question": question,
|
| 149 |
+
"reason": f"Learning Path: Current learning node - {current_node.topic}",
|
| 150 |
+
"priority": "medium",
|
| 151 |
+
"type": "learning_path"
|
| 152 |
+
})
|
| 153 |
+
|
| 154 |
+
# 3. Based on weak areas
|
| 155 |
+
weak_areas = self.gap_analyzer.identify_weak_areas(user_profile)
|
| 156 |
+
for topic in weak_areas[:2]: # Top 2 weak areas
|
| 157 |
+
if topic not in critical_gaps: # Avoid duplicates
|
| 158 |
+
question = self._generate_question_for_topic(topic, "understand")
|
| 159 |
+
if question:
|
| 160 |
+
suggestions.append({
|
| 161 |
+
"question": question,
|
| 162 |
+
"reason": f"Weak Area: Recommend strengthening understanding of {topic.replace('Function of ', '')}",
|
| 163 |
+
"priority": "medium",
|
| 164 |
+
"type": "weak_area"
|
| 165 |
+
})
|
| 166 |
+
|
| 167 |
+
# 4. Based on recent questions (if context provided)
|
| 168 |
+
if context:
|
| 169 |
+
related_questions = self._generate_related_questions(context)
|
| 170 |
+
for q in related_questions[:2]:
|
| 171 |
+
suggestions.append({
|
| 172 |
+
"question": q,
|
| 173 |
+
"reason": "Related Question: Explore deeper into the topic you just asked about",
|
| 174 |
+
"priority": "low",
|
| 175 |
+
"type": "related"
|
| 176 |
+
})
|
| 177 |
+
|
| 178 |
+
# 5. Based on unlearned topics
|
| 179 |
+
knowledge_level = user_profile.knowledge_level if hasattr(user_profile, 'knowledge_level') else {}
|
| 180 |
+
unlearned_topics = [t for t in self.available_topics if t not in knowledge_level]
|
| 181 |
+
for topic in unlearned_topics[:1]: # Top 1 unlearned topic
|
| 182 |
+
question = self._generate_question_for_topic(topic, "remember")
|
| 183 |
+
if question:
|
| 184 |
+
suggestions.append({
|
| 185 |
+
"question": question,
|
| 186 |
+
"reason": f"New Topic: Start learning {topic.replace('Function of ', '')}",
|
| 187 |
+
"priority": "low",
|
| 188 |
+
"type": "new_topic"
|
| 189 |
+
})
|
| 190 |
+
|
| 191 |
+
# Rank and filter suggestions
|
| 192 |
+
suggestions = self._rank_suggestions(suggestions)
|
| 193 |
+
return suggestions[:max_suggestions]
|
| 194 |
+
|
| 195 |
+
def _generate_question_for_topic(self, topic: str, level: str = "understand") -> Optional[str]:
|
| 196 |
+
"""Generate a question for a specific topic"""
|
| 197 |
+
try:
|
| 198 |
+
# Use RAG to get topic information
|
| 199 |
+
query = f"What are the key points about {topic}?"
|
| 200 |
+
answer, _ = self.rag_engine.query(query)
|
| 201 |
+
|
| 202 |
+
# Generate question using LLM
|
| 203 |
+
prompt = f"""Based on the following information about {topic}, generate a single, clear question that a user might ask to learn about this topic.
|
| 204 |
+
|
| 205 |
+
The question should be at a {level} level (from Bloom's taxonomy).
|
| 206 |
+
|
| 207 |
+
Information:
|
| 208 |
+
{answer[:500]} # Limit context to avoid token limits
|
| 209 |
+
|
| 210 |
+
Generate only the question text, nothing else. The question should be:
|
| 211 |
+
- Clear and specific
|
| 212 |
+
- Appropriate for someone learning about ADAS systems
|
| 213 |
+
- In Chinese or English (match the user's language preference)
|
| 214 |
+
|
| 215 |
+
Question:"""
|
| 216 |
+
|
| 217 |
+
response = self.client.chat.completions.create(
|
| 218 |
+
model="gpt-4o-mini",
|
| 219 |
+
messages=[
|
| 220 |
+
{"role": "system", "content": "You are a helpful assistant that generates educational questions."},
|
| 221 |
+
{"role": "user", "content": prompt}
|
| 222 |
+
],
|
| 223 |
+
temperature=0.7,
|
| 224 |
+
max_tokens=100
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
question = response.choices[0].message.content.strip()
|
| 228 |
+
# Remove quotes if present
|
| 229 |
+
question = question.strip('"').strip("'")
|
| 230 |
+
return question
|
| 231 |
+
except Exception as e:
|
| 232 |
+
print(f"Error generating question for topic {topic}: {e}")
|
| 233 |
+
# Fallback to simple question
|
| 234 |
+
topic_clean = topic.replace("Function of ", "").replace(" Assist", "")
|
| 235 |
+
return f"What is {topic_clean} and how does it work?"
|
| 236 |
+
|
| 237 |
+
def _generate_related_questions(self, context: str) -> List[str]:
|
| 238 |
+
"""Generate related questions based on context"""
|
| 239 |
+
try:
|
| 240 |
+
prompt = f"""Based on the following question or context, generate 2-3 related follow-up questions that would help deepen understanding.
|
| 241 |
+
|
| 242 |
+
Context: {context[:300]}
|
| 243 |
+
|
| 244 |
+
Generate 2-3 questions, one per line. Questions should:
|
| 245 |
+
- Build upon the context
|
| 246 |
+
- Help explore related concepts
|
| 247 |
+
- Be clear and specific
|
| 248 |
+
|
| 249 |
+
Questions:"""
|
| 250 |
+
|
| 251 |
+
response = self.client.chat.completions.create(
|
| 252 |
+
model="gpt-4o-mini",
|
| 253 |
+
messages=[
|
| 254 |
+
{"role": "system", "content": "You are a helpful assistant that generates educational follow-up questions."},
|
| 255 |
+
{"role": "user", "content": prompt}
|
| 256 |
+
],
|
| 257 |
+
temperature=0.7,
|
| 258 |
+
max_tokens=200
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
questions_text = response.choices[0].message.content.strip()
|
| 262 |
+
questions = [q.strip().strip('-').strip() for q in questions_text.split('\n') if q.strip()]
|
| 263 |
+
return questions[:3]
|
| 264 |
+
except Exception as e:
|
| 265 |
+
print(f"Error generating related questions: {e}")
|
| 266 |
+
return []
|
| 267 |
+
|
| 268 |
+
def _rank_suggestions(self, suggestions: List[Dict]) -> List[Dict]:
|
| 269 |
+
"""Rank suggestions by priority"""
|
| 270 |
+
priority_weights = {"high": 3, "medium": 2, "low": 1}
|
| 271 |
+
suggestions.sort(key=lambda x: priority_weights.get(x.get("priority", "low"), 1), reverse=True)
|
| 272 |
+
return suggestions
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class FollowUpQuestionGenerator:
|
| 276 |
+
"""Generates context-aware follow-up questions based on RAG answers"""
|
| 277 |
+
|
| 278 |
+
def __init__(self, client: OpenAI, rag_engine):
|
| 279 |
+
self.client = client
|
| 280 |
+
self.rag_engine = rag_engine
|
| 281 |
+
|
| 282 |
+
self.bloom_levels = ["remember", "understand", "apply", "analyze", "evaluate", "create"]
|
| 283 |
+
|
| 284 |
+
def generate_follow_up_questions(self, answer: str, user_profile,
|
| 285 |
+
max_questions: int = 5) -> List[Dict[str, str]]:
|
| 286 |
+
"""
|
| 287 |
+
Generate follow-up questions based on the answer provided
|
| 288 |
+
|
| 289 |
+
Args:
|
| 290 |
+
answer: The RAG answer text
|
| 291 |
+
user_profile: UserProfile object
|
| 292 |
+
max_questions: Maximum number of questions to generate
|
| 293 |
+
|
| 294 |
+
Returns:
|
| 295 |
+
List of question dictionaries with 'question' and 'bloom_level' keys
|
| 296 |
+
"""
|
| 297 |
+
questions = []
|
| 298 |
+
|
| 299 |
+
# Determine user's current Bloom level (default to understand)
|
| 300 |
+
current_bloom = self._infer_user_bloom_level(user_profile)
|
| 301 |
+
current_index = self.bloom_levels.index(current_bloom) if current_bloom in self.bloom_levels else 1
|
| 302 |
+
|
| 303 |
+
# Generate questions for next 2-3 Bloom levels
|
| 304 |
+
target_levels = self.bloom_levels[current_index:current_index + 3]
|
| 305 |
+
|
| 306 |
+
for level in target_levels[:2]: # Limit to 2 levels
|
| 307 |
+
level_questions = self._generate_questions_by_bloom(answer, level)
|
| 308 |
+
questions.extend(level_questions[:2]) # 2 questions per level
|
| 309 |
+
|
| 310 |
+
# Also generate related concept questions
|
| 311 |
+
related_questions = self._generate_related_concept_questions(answer)
|
| 312 |
+
questions.extend(related_questions[:1])
|
| 313 |
+
|
| 314 |
+
return questions[:max_questions]
|
| 315 |
+
|
| 316 |
+
def _infer_user_bloom_level(self, user_profile) -> str:
|
| 317 |
+
"""Infer user's current Bloom taxonomy level based on profile"""
|
| 318 |
+
# Check recent test performance
|
| 319 |
+
if hasattr(user_profile, 'bloom_level_performance') and user_profile.bloom_level_performance:
|
| 320 |
+
# Find the highest level where user has good performance
|
| 321 |
+
for level in reversed(self.bloom_levels):
|
| 322 |
+
for topic_perf in user_profile.bloom_level_performance.values():
|
| 323 |
+
if level in topic_perf and topic_perf[level] >= 0.7:
|
| 324 |
+
return level
|
| 325 |
+
|
| 326 |
+
# Default based on overall progress
|
| 327 |
+
if hasattr(user_profile, 'knowledge_level') and user_profile.knowledge_level:
|
| 328 |
+
avg_level = sum(user_profile.knowledge_level.values()) / len(user_profile.knowledge_level.values())
|
| 329 |
+
if avg_level < 0.3:
|
| 330 |
+
return "remember"
|
| 331 |
+
elif avg_level < 0.6:
|
| 332 |
+
return "understand"
|
| 333 |
+
else:
|
| 334 |
+
return "apply"
|
| 335 |
+
|
| 336 |
+
return "understand" # Default
|
| 337 |
+
|
| 338 |
+
def _generate_questions_by_bloom(self, answer: str, bloom_level: str) -> List[Dict[str, str]]:
|
| 339 |
+
"""Generate questions at a specific Bloom taxonomy level"""
|
| 340 |
+
try:
|
| 341 |
+
bloom_descriptions = {
|
| 342 |
+
"remember": "test basic recall of facts and information",
|
| 343 |
+
"understand": "test explanation and interpretation of concepts",
|
| 344 |
+
"apply": "test application of knowledge in practical situations",
|
| 345 |
+
"analyze": "test analysis of relationships and structure",
|
| 346 |
+
"evaluate": "test evaluation and judgment based on criteria",
|
| 347 |
+
"create": "test creation of new ideas or solutions"
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
prompt = f"""Based on the following answer about ADAS systems, generate 2 follow-up questions at the {bloom_level} level of Bloom's taxonomy.
|
| 351 |
+
|
| 352 |
+
Bloom level description: {bloom_descriptions.get(bloom_level, '')}
|
| 353 |
+
|
| 354 |
+
Answer text:
|
| 355 |
+
{answer[:800]} # Limit context
|
| 356 |
+
|
| 357 |
+
Generate 2 questions that:
|
| 358 |
+
- Build upon the information in the answer
|
| 359 |
+
- Are at the {bloom_level} level
|
| 360 |
+
- Help deepen understanding
|
| 361 |
+
- Are clear and specific
|
| 362 |
+
|
| 363 |
+
Output format: One question per line, no numbering or bullets.
|
| 364 |
+
|
| 365 |
+
Questions:"""
|
| 366 |
+
|
| 367 |
+
response = self.client.chat.completions.create(
|
| 368 |
+
model="gpt-4o-mini",
|
| 369 |
+
messages=[
|
| 370 |
+
{"role": "system", "content": "You are an educational assistant that generates follow-up questions."},
|
| 371 |
+
{"role": "user", "content": prompt}
|
| 372 |
+
],
|
| 373 |
+
temperature=0.7,
|
| 374 |
+
max_tokens=200
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
questions_text = response.choices[0].message.content.strip()
|
| 378 |
+
question_list = [q.strip().strip('-').strip() for q in questions_text.split('\n') if q.strip()]
|
| 379 |
+
|
| 380 |
+
return [{"question": q, "bloom_level": bloom_level} for q in question_list[:2]]
|
| 381 |
+
except Exception as e:
|
| 382 |
+
print(f"Error generating questions by Bloom level: {e}")
|
| 383 |
+
return []
|
| 384 |
+
|
| 385 |
+
def _generate_related_concept_questions(self, answer: str) -> List[Dict[str, str]]:
|
| 386 |
+
"""Generate questions about related concepts"""
|
| 387 |
+
try:
|
| 388 |
+
prompt = f"""Based on the following answer, generate 1 question about a related ADAS concept that would help the user understand the broader context.
|
| 389 |
+
|
| 390 |
+
Answer:
|
| 391 |
+
{answer[:500]}
|
| 392 |
+
|
| 393 |
+
Generate 1 question about a related concept or feature that connects to the information provided.
|
| 394 |
+
|
| 395 |
+
Question:"""
|
| 396 |
+
|
| 397 |
+
response = self.client.chat.completions.create(
|
| 398 |
+
model="gpt-4o-mini",
|
| 399 |
+
messages=[
|
| 400 |
+
{"role": "system", "content": "You are an educational assistant."},
|
| 401 |
+
{"role": "user", "content": prompt}
|
| 402 |
+
],
|
| 403 |
+
temperature=0.7,
|
| 404 |
+
max_tokens=100
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
question = response.choices[0].message.content.strip().strip('"').strip("'")
|
| 408 |
+
return [{"question": question, "bloom_level": "understand"}]
|
| 409 |
+
except Exception as e:
|
| 410 |
+
print(f"Error generating related concept question: {e}")
|
| 411 |
+
return []
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
class ProactiveLearningEngine:
|
| 415 |
+
"""Main engine for proactive learning assistance"""
|
| 416 |
+
|
| 417 |
+
def __init__(self, client: OpenAI, rag_engine, user_profiling, adaptive_engine=None,
|
| 418 |
+
available_topics: List[str] = None):
|
| 419 |
+
self.client = client
|
| 420 |
+
self.rag_engine = rag_engine
|
| 421 |
+
self.user_profiling = user_profiling
|
| 422 |
+
self.adaptive_engine = adaptive_engine
|
| 423 |
+
self.available_topics = available_topics or []
|
| 424 |
+
|
| 425 |
+
# Initialize components
|
| 426 |
+
self.gap_analyzer = KnowledgeGapAnalyzer(self.available_topics)
|
| 427 |
+
self.suggestion_generator = PromptSuggestionGenerator(
|
| 428 |
+
client, rag_engine, self.gap_analyzer, self.available_topics
|
| 429 |
+
)
|
| 430 |
+
self.followup_generator = FollowUpQuestionGenerator(client, rag_engine)
|
| 431 |
+
|
| 432 |
+
def get_prompt_suggestions(self, user_id: str, context: Optional[str] = None,
|
| 433 |
+
max_suggestions: int = 5) -> List[Dict[str, str]]:
|
| 434 |
+
"""
|
| 435 |
+
Get prompt suggestions for a user
|
| 436 |
+
|
| 437 |
+
Args:
|
| 438 |
+
user_id: User ID
|
| 439 |
+
context: Optional context (e.g., recent question)
|
| 440 |
+
max_suggestions: Maximum number of suggestions
|
| 441 |
+
|
| 442 |
+
Returns:
|
| 443 |
+
List of suggestion dictionaries
|
| 444 |
+
"""
|
| 445 |
+
if not self.user_profiling:
|
| 446 |
+
return []
|
| 447 |
+
|
| 448 |
+
user_profile = self.user_profiling.get_or_create_profile(user_id)
|
| 449 |
+
|
| 450 |
+
# Get learning path if available
|
| 451 |
+
learning_path = None
|
| 452 |
+
if self.adaptive_engine:
|
| 453 |
+
learning_path = self.adaptive_engine.get_active_path(user_id)
|
| 454 |
+
|
| 455 |
+
return self.suggestion_generator.generate_suggestions(
|
| 456 |
+
user_id, user_profile, learning_path, context, max_suggestions
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
def get_follow_up_questions(self, user_id: str, answer: str,
|
| 460 |
+
max_questions: int = 5) -> List[Dict[str, str]]:
|
| 461 |
+
"""
|
| 462 |
+
Get follow-up questions based on an answer
|
| 463 |
+
|
| 464 |
+
Args:
|
| 465 |
+
user_id: User ID
|
| 466 |
+
answer: The RAG answer text
|
| 467 |
+
max_questions: Maximum number of questions
|
| 468 |
+
|
| 469 |
+
Returns:
|
| 470 |
+
List of question dictionaries
|
| 471 |
+
"""
|
| 472 |
+
if not self.user_profiling:
|
| 473 |
+
return []
|
| 474 |
+
|
| 475 |
+
user_profile = self.user_profiling.get_or_create_profile(user_id)
|
| 476 |
+
return self.followup_generator.generate_follow_up_questions(
|
| 477 |
+
answer, user_profile, max_questions
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
def get_critical_gaps(self, user_id: str) -> List[str]:
|
| 481 |
+
"""
|
| 482 |
+
Get critical knowledge gaps for a user
|
| 483 |
+
|
| 484 |
+
Args:
|
| 485 |
+
user_id: User ID
|
| 486 |
+
|
| 487 |
+
Returns:
|
| 488 |
+
List of topics with critical gaps
|
| 489 |
+
"""
|
| 490 |
+
if not self.user_profiling:
|
| 491 |
+
return []
|
| 492 |
+
|
| 493 |
+
user_profile = self.user_profiling.get_or_create_profile(user_id)
|
| 494 |
+
return self.gap_analyzer.identify_critical_gaps(user_profile)
|
| 495 |
+
|
| 496 |
+
def analyze_user_state(self, user_id: str) -> Dict:
|
| 497 |
+
"""
|
| 498 |
+
Analyze user's current learning state
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
user_id: User ID
|
| 502 |
+
|
| 503 |
+
Returns:
|
| 504 |
+
Dictionary with analysis results
|
| 505 |
+
"""
|
| 506 |
+
if not self.user_profiling:
|
| 507 |
+
return {}
|
| 508 |
+
|
| 509 |
+
user_profile = self.user_profiling.get_or_create_profile(user_id)
|
| 510 |
+
|
| 511 |
+
critical_gaps = self.gap_analyzer.identify_critical_gaps(user_profile)
|
| 512 |
+
weak_areas = self.gap_analyzer.identify_weak_areas(user_profile)
|
| 513 |
+
gap_priorities = self.gap_analyzer.get_gap_priority(user_profile)
|
| 514 |
+
|
| 515 |
+
return {
|
| 516 |
+
"critical_gaps": critical_gaps,
|
| 517 |
+
"weak_areas": weak_areas,
|
| 518 |
+
"gap_priorities": gap_priorities[:5], # Top 5
|
| 519 |
+
"total_gaps": len(weak_areas),
|
| 520 |
+
"critical_gaps_count": len(critical_gaps)
|
| 521 |
+
}
|
| 522 |
+
|
modules/scenario_contextualization/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Scenario-Based Contextualization Module
|
| 3 |
+
Provides scenario context integration for ADAS RAG system
|
| 4 |
+
"""
|
| 5 |
+
|
modules/scenario_contextualization/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (309 Bytes). View file
|
|
|
modules/scenario_contextualization/database/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Scenario Database Module
|
| 3 |
+
"""
|
| 4 |
+
|
modules/scenario_contextualization/database/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (245 Bytes). View file
|
|
|
modules/scenario_contextualization/database/__pycache__/scenario_database.cpython-312.pyc
ADDED
|
Binary file (6.89 kB). View file
|
|
|
modules/scenario_contextualization/database/__pycache__/scenario_models.cpython-312.pyc
ADDED
|
Binary file (6.35 kB). View file
|
|
|
modules/scenario_contextualization/database/scenario_database.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Scenario Database Management
|
| 3 |
+
"""
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Dict, List, Optional
|
| 8 |
+
from collections import defaultdict
|
| 9 |
+
|
| 10 |
+
from .scenario_models import ADASScenario
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ScenarioDatabase:
|
| 14 |
+
"""Scenario database management"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, data_file: str = "data/scenarios/initial_scenarios.json"):
|
| 17 |
+
self.data_file = Path(data_file)
|
| 18 |
+
self.scenarios: Dict[str, ADASScenario] = {}
|
| 19 |
+
self.index = {}
|
| 20 |
+
self._load_scenarios()
|
| 21 |
+
self._build_index()
|
| 22 |
+
|
| 23 |
+
def _load_scenarios(self):
|
| 24 |
+
"""Load scenarios from JSON file"""
|
| 25 |
+
if not self.data_file.exists():
|
| 26 |
+
print(f"⚠️ Scenario database file not found: {self.data_file}")
|
| 27 |
+
print(" Run 'python scripts/create_initial_scenarios.py' to create initial scenarios")
|
| 28 |
+
return
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
with open(self.data_file, 'r', encoding='utf-8') as f:
|
| 32 |
+
data = json.load(f)
|
| 33 |
+
|
| 34 |
+
scenarios_list = data.get("scenarios", [])
|
| 35 |
+
for scenario_data in scenarios_list:
|
| 36 |
+
scenario = ADASScenario.from_dict(scenario_data)
|
| 37 |
+
self.scenarios[scenario.scenario_id] = scenario
|
| 38 |
+
|
| 39 |
+
print(f"✅ Loaded {len(self.scenarios)} scenarios from {self.data_file}")
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print(f"❌ Error loading scenarios: {e}")
|
| 42 |
+
|
| 43 |
+
def _build_index(self):
|
| 44 |
+
"""Build index to speed up queries"""
|
| 45 |
+
self.index = {
|
| 46 |
+
'by_feature': defaultdict(list),
|
| 47 |
+
'by_type': defaultdict(list),
|
| 48 |
+
'by_source': defaultdict(list),
|
| 49 |
+
'by_tag': defaultdict(list)
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
for scenario in self.scenarios.values():
|
| 53 |
+
self.index['by_feature'][scenario.adas_feature].append(scenario.scenario_id)
|
| 54 |
+
self.index['by_type'][scenario.scenario_type].append(scenario.scenario_id)
|
| 55 |
+
for source in scenario.source:
|
| 56 |
+
self.index['by_source'][source].append(scenario.scenario_id)
|
| 57 |
+
for tag in scenario.tags:
|
| 58 |
+
self.index['by_tag'][tag].append(scenario.scenario_id)
|
| 59 |
+
|
| 60 |
+
def get_by_id(self, scenario_id: str) -> Optional[ADASScenario]:
|
| 61 |
+
"""Get scenario by ID"""
|
| 62 |
+
return self.scenarios.get(scenario_id)
|
| 63 |
+
|
| 64 |
+
def get_all(self) -> List[ADASScenario]:
|
| 65 |
+
"""Get all scenarios"""
|
| 66 |
+
return list(self.scenarios.values())
|
| 67 |
+
|
| 68 |
+
def filter_by_features(self, features: List[str]) -> List[ADASScenario]:
|
| 69 |
+
"""Filter scenarios by ADAS features"""
|
| 70 |
+
if not features:
|
| 71 |
+
return []
|
| 72 |
+
|
| 73 |
+
scenario_ids = set()
|
| 74 |
+
for feature in features:
|
| 75 |
+
if feature in self.index['by_feature']:
|
| 76 |
+
scenario_ids.update(self.index['by_feature'][feature])
|
| 77 |
+
|
| 78 |
+
return [self.scenarios[sid] for sid in scenario_ids if sid in self.scenarios]
|
| 79 |
+
|
| 80 |
+
def filter_by_type(self, scenario_type: str) -> List[ADASScenario]:
|
| 81 |
+
"""Filter scenarios by type"""
|
| 82 |
+
scenario_ids = self.index['by_type'].get(scenario_type, [])
|
| 83 |
+
return [self.scenarios[sid] for sid in scenario_ids if sid in self.scenarios]
|
| 84 |
+
|
| 85 |
+
def full_text_search(self, query: str, top_k: int = 10) -> List[ADASScenario]:
|
| 86 |
+
"""Simple full-text search (based on keyword matching)"""
|
| 87 |
+
query_lower = query.lower()
|
| 88 |
+
query_words = set(query_lower.split())
|
| 89 |
+
|
| 90 |
+
scored_scenarios = []
|
| 91 |
+
for scenario in self.scenarios.values():
|
| 92 |
+
# Calculate match score
|
| 93 |
+
text = f"{scenario.title} {scenario.description} {' '.join(scenario.tags)}".lower()
|
| 94 |
+
text_words = set(text.split())
|
| 95 |
+
|
| 96 |
+
# Calculate intersection
|
| 97 |
+
matches = len(query_words & text_words)
|
| 98 |
+
if matches > 0:
|
| 99 |
+
score = matches / len(query_words) # Simple match rate
|
| 100 |
+
scored_scenarios.append((score, scenario))
|
| 101 |
+
|
| 102 |
+
# Sort and return top k
|
| 103 |
+
scored_scenarios.sort(key=lambda x: x[0], reverse=True)
|
| 104 |
+
return [s for _, s in scored_scenarios[:top_k]]
|
| 105 |
+
|
modules/scenario_contextualization/database/scenario_models.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Scenario Data Models
|
| 3 |
+
"""
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from typing import List, Optional, Dict
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class BoundaryCondition:
|
| 11 |
+
"""Boundary condition"""
|
| 12 |
+
condition: str # Condition description
|
| 13 |
+
impact: str # Impact description
|
| 14 |
+
details: Optional[str] = None # Additional details
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class AppropriateResponse:
|
| 19 |
+
"""Appropriate response"""
|
| 20 |
+
response: str # Response action
|
| 21 |
+
rationale: str # Rationale
|
| 22 |
+
details: Optional[str] = None # Additional details
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@dataclass
|
| 26 |
+
class ScenarioMetadata:
|
| 27 |
+
"""Scenario metadata"""
|
| 28 |
+
created_at: str
|
| 29 |
+
updated_at: str
|
| 30 |
+
views: int = 0
|
| 31 |
+
quality_score: float = 0.0
|
| 32 |
+
verified: bool = False
|
| 33 |
+
source_urls: List[str] = field(default_factory=list)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class ADASScenario:
|
| 38 |
+
"""Complete ADAS scenario data model"""
|
| 39 |
+
# Basic information
|
| 40 |
+
scenario_id: str
|
| 41 |
+
title: str
|
| 42 |
+
description: str
|
| 43 |
+
|
| 44 |
+
# Classification information
|
| 45 |
+
adas_feature: str # Related ADAS feature
|
| 46 |
+
scenario_type: str # "boundary_condition", "historical_incident", "hypothetical_edge_case"
|
| 47 |
+
source: List[str] # Data source
|
| 48 |
+
severity_level: str # "low", "medium", "high"
|
| 49 |
+
|
| 50 |
+
# Scenario content
|
| 51 |
+
boundary_conditions: List[BoundaryCondition]
|
| 52 |
+
appropriate_responses: List[AppropriateResponse]
|
| 53 |
+
educational_principles: List[str]
|
| 54 |
+
|
| 55 |
+
# Generalization information
|
| 56 |
+
manufacturer_specific: bool
|
| 57 |
+
generalization: str # Generalized universal principle
|
| 58 |
+
|
| 59 |
+
# Metadata (must come before fields with default values)
|
| 60 |
+
metadata: ScenarioMetadata
|
| 61 |
+
|
| 62 |
+
# Related information (fields with default values must come last)
|
| 63 |
+
related_features: List[str] = field(default_factory=list)
|
| 64 |
+
related_scenarios: List[str] = field(default_factory=list)
|
| 65 |
+
tags: List[str] = field(default_factory=list)
|
| 66 |
+
|
| 67 |
+
@classmethod
|
| 68 |
+
def from_dict(cls, data: dict) -> 'ADASScenario':
|
| 69 |
+
"""Create scenario object from dictionary"""
|
| 70 |
+
# Convert boundary conditions
|
| 71 |
+
boundary_conditions = [
|
| 72 |
+
BoundaryCondition(**bc) if isinstance(bc, dict) else bc
|
| 73 |
+
for bc in data.get("boundary_conditions", [])
|
| 74 |
+
]
|
| 75 |
+
|
| 76 |
+
# Convert appropriate responses
|
| 77 |
+
appropriate_responses = [
|
| 78 |
+
AppropriateResponse(**ar) if isinstance(ar, dict) else ar
|
| 79 |
+
for ar in data.get("appropriate_responses", [])
|
| 80 |
+
]
|
| 81 |
+
|
| 82 |
+
# Convert metadata
|
| 83 |
+
metadata = ScenarioMetadata(**data.get("metadata", {}))
|
| 84 |
+
|
| 85 |
+
return cls(
|
| 86 |
+
scenario_id=data["scenario_id"],
|
| 87 |
+
title=data["title"],
|
| 88 |
+
description=data["description"],
|
| 89 |
+
adas_feature=data["adas_feature"],
|
| 90 |
+
scenario_type=data["scenario_type"],
|
| 91 |
+
source=data.get("source", []),
|
| 92 |
+
severity_level=data.get("severity_level", "medium"),
|
| 93 |
+
boundary_conditions=boundary_conditions,
|
| 94 |
+
appropriate_responses=appropriate_responses,
|
| 95 |
+
educational_principles=data.get("educational_principles", []),
|
| 96 |
+
manufacturer_specific=data.get("manufacturer_specific", False),
|
| 97 |
+
generalization=data.get("generalization", ""),
|
| 98 |
+
metadata=metadata,
|
| 99 |
+
related_features=data.get("related_features", []),
|
| 100 |
+
related_scenarios=data.get("related_scenarios", []),
|
| 101 |
+
tags=data.get("tags", [])
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def to_dict(self) -> dict:
|
| 105 |
+
"""Convert to dictionary"""
|
| 106 |
+
return {
|
| 107 |
+
"scenario_id": self.scenario_id,
|
| 108 |
+
"title": self.title,
|
| 109 |
+
"description": self.description,
|
| 110 |
+
"adas_feature": self.adas_feature,
|
| 111 |
+
"scenario_type": self.scenario_type,
|
| 112 |
+
"source": self.source,
|
| 113 |
+
"severity_level": self.severity_level,
|
| 114 |
+
"boundary_conditions": [
|
| 115 |
+
{
|
| 116 |
+
"condition": bc.condition,
|
| 117 |
+
"impact": bc.impact,
|
| 118 |
+
"details": bc.details
|
| 119 |
+
}
|
| 120 |
+
for bc in self.boundary_conditions
|
| 121 |
+
],
|
| 122 |
+
"appropriate_responses": [
|
| 123 |
+
{
|
| 124 |
+
"response": ar.response,
|
| 125 |
+
"rationale": ar.rationale,
|
| 126 |
+
"details": ar.details
|
| 127 |
+
}
|
| 128 |
+
for ar in self.appropriate_responses
|
| 129 |
+
],
|
| 130 |
+
"educational_principles": self.educational_principles,
|
| 131 |
+
"manufacturer_specific": self.manufacturer_specific,
|
| 132 |
+
"generalization": self.generalization,
|
| 133 |
+
"related_features": self.related_features,
|
| 134 |
+
"related_scenarios": self.related_scenarios,
|
| 135 |
+
"tags": self.tags,
|
| 136 |
+
"metadata": {
|
| 137 |
+
"created_at": self.metadata.created_at,
|
| 138 |
+
"updated_at": self.metadata.updated_at,
|
| 139 |
+
"views": self.metadata.views,
|
| 140 |
+
"quality_score": self.metadata.quality_score,
|
| 141 |
+
"verified": self.metadata.verified,
|
| 142 |
+
"source_urls": self.metadata.source_urls
|
| 143 |
+
}
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@dataclass
|
| 148 |
+
class RankedScenario:
|
| 149 |
+
"""Scenario with relevance score"""
|
| 150 |
+
scenario: ADASScenario
|
| 151 |
+
relevance_score: float
|
| 152 |
+
match_reasons: List[str] = field(default_factory=list)
|
| 153 |
+
|
modules/scenario_contextualization/formatting/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Scenario Formatting Module
|
| 3 |
+
"""
|
| 4 |
+
|
modules/scenario_contextualization/formatting/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (249 Bytes). View file
|
|
|
modules/scenario_contextualization/formatting/__pycache__/constructive_formatter.cpython-312.pyc
ADDED
|
Binary file (7.88 kB). View file
|
|
|
modules/scenario_contextualization/formatting/constructive_formatter.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Constructive Scenario Formatter
|
| 3 |
+
Formats scenarios in an educational, constructive way
|
| 4 |
+
"""
|
| 5 |
+
from typing import List
|
| 6 |
+
from ..database.scenario_models import ADASScenario, RankedScenario
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class FormattedScenario:
|
| 10 |
+
"""Formatted scenario"""
|
| 11 |
+
def __init__(
|
| 12 |
+
self,
|
| 13 |
+
title: str,
|
| 14 |
+
adas_feature: str,
|
| 15 |
+
boundary_conditions: List[str],
|
| 16 |
+
appropriate_responses: List[str],
|
| 17 |
+
educational_principle: str,
|
| 18 |
+
relevance_hint: str = "",
|
| 19 |
+
full_scenario_link: str = ""
|
| 20 |
+
):
|
| 21 |
+
self.title = title
|
| 22 |
+
self.adas_feature = adas_feature
|
| 23 |
+
self.boundary_conditions = boundary_conditions
|
| 24 |
+
self.appropriate_responses = appropriate_responses
|
| 25 |
+
self.educational_principle = educational_principle
|
| 26 |
+
self.relevance_hint = relevance_hint
|
| 27 |
+
self.full_scenario_link = full_scenario_link
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class ConstructiveFormatter:
|
| 31 |
+
"""Constructive scenario formatter"""
|
| 32 |
+
|
| 33 |
+
def format_scenario(self, ranked_scenario: RankedScenario) -> FormattedScenario:
|
| 34 |
+
"""
|
| 35 |
+
Format a single scenario
|
| 36 |
+
|
| 37 |
+
Principles:
|
| 38 |
+
- Avoid detailed incident descriptions
|
| 39 |
+
- Emphasize boundary conditions and appropriate responses
|
| 40 |
+
- Provide educational principles
|
| 41 |
+
- Use positive, constructive language
|
| 42 |
+
"""
|
| 43 |
+
scenario = ranked_scenario.scenario
|
| 44 |
+
|
| 45 |
+
# Format boundary conditions
|
| 46 |
+
boundary_conditions = self._format_boundary_conditions(scenario)
|
| 47 |
+
|
| 48 |
+
# Format appropriate responses
|
| 49 |
+
appropriate_responses = self._format_responses(scenario)
|
| 50 |
+
|
| 51 |
+
# Extract educational principle
|
| 52 |
+
educational_principle = self._format_principle(scenario)
|
| 53 |
+
|
| 54 |
+
# Generate relevance hint
|
| 55 |
+
relevance_hint = self._generate_relevance_hint(ranked_scenario)
|
| 56 |
+
|
| 57 |
+
return FormattedScenario(
|
| 58 |
+
title=scenario.title,
|
| 59 |
+
adas_feature=scenario.adas_feature,
|
| 60 |
+
boundary_conditions=boundary_conditions,
|
| 61 |
+
appropriate_responses=appropriate_responses,
|
| 62 |
+
educational_principle=educational_principle,
|
| 63 |
+
relevance_hint=relevance_hint,
|
| 64 |
+
full_scenario_link=f"/scenario/{scenario.scenario_id}"
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
def _format_boundary_conditions(self, scenario: ADASScenario) -> List[str]:
|
| 68 |
+
"""Format boundary conditions using constructive language"""
|
| 69 |
+
formatted = []
|
| 70 |
+
for bc in scenario.boundary_conditions:
|
| 71 |
+
# Use constructive language: emphasize "may" rather than "failed"
|
| 72 |
+
condition_text = f"• {bc.condition}: {bc.impact}"
|
| 73 |
+
if bc.details:
|
| 74 |
+
condition_text += f" ({bc.details})"
|
| 75 |
+
formatted.append(condition_text)
|
| 76 |
+
return formatted
|
| 77 |
+
|
| 78 |
+
def _format_responses(self, scenario: ADASScenario) -> List[str]:
|
| 79 |
+
"""Format appropriate responses, emphasizing correct actions"""
|
| 80 |
+
formatted = []
|
| 81 |
+
for ar in scenario.appropriate_responses:
|
| 82 |
+
response_text = f"• {ar.response}: {ar.rationale}"
|
| 83 |
+
if ar.details:
|
| 84 |
+
response_text += f" ({ar.details})"
|
| 85 |
+
formatted.append(response_text)
|
| 86 |
+
return formatted
|
| 87 |
+
|
| 88 |
+
def _format_principle(self, scenario: ADASScenario) -> str:
|
| 89 |
+
"""Format educational principle"""
|
| 90 |
+
if scenario.educational_principles:
|
| 91 |
+
# Use first principle, or combine multiple principles
|
| 92 |
+
if len(scenario.educational_principles) == 1:
|
| 93 |
+
return scenario.educational_principles[0]
|
| 94 |
+
else:
|
| 95 |
+
return " ".join(scenario.educational_principles[:2]) # Max two principles
|
| 96 |
+
elif scenario.generalization:
|
| 97 |
+
return scenario.generalization
|
| 98 |
+
else:
|
| 99 |
+
return "Understanding system limitations helps ensure safe operation."
|
| 100 |
+
|
| 101 |
+
def _generate_relevance_hint(self, ranked_scenario: RankedScenario) -> str:
|
| 102 |
+
"""Generate relevance hint"""
|
| 103 |
+
if ranked_scenario.match_reasons:
|
| 104 |
+
reasons = ", ".join(ranked_scenario.match_reasons[:2]) # Max two reasons
|
| 105 |
+
return f"Relevant because: {reasons}"
|
| 106 |
+
return ""
|
| 107 |
+
|
| 108 |
+
def format_scenarios_for_ui(self, ranked_scenarios: List[RankedScenario]) -> str:
|
| 109 |
+
"""
|
| 110 |
+
Format multiple scenarios as HTML for UI display
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
str: HTML formatted scenario cards
|
| 114 |
+
"""
|
| 115 |
+
if not ranked_scenarios:
|
| 116 |
+
return ""
|
| 117 |
+
|
| 118 |
+
formatted_scenarios = [self.format_scenario(rs) for rs in ranked_scenarios]
|
| 119 |
+
|
| 120 |
+
html_parts = []
|
| 121 |
+
for i, fs in enumerate(formatted_scenarios, 1):
|
| 122 |
+
scenario_html = f"""
|
| 123 |
+
<div style="margin-bottom: 20px; padding: 15px; border: 1px solid #ddd; border-radius: 8px; background-color: #f9f9f9;">
|
| 124 |
+
<h4 style="margin-top: 0; color: #333;">{fs.title}</h4>
|
| 125 |
+
<p style="margin: 5px 0; color: #666; font-size: 0.9em;">Related to: <strong>{fs.adas_feature}</strong></p>
|
| 126 |
+
|
| 127 |
+
<div style="margin: 15px 0;">
|
| 128 |
+
<strong style="color: #d9534f;">⚠️ Boundary Conditions:</strong>
|
| 129 |
+
<ul style="margin: 5px 0; padding-left: 20px;">
|
| 130 |
+
{''.join(f'<li style="margin: 5px 0;">{bc}</li>' for bc in fs.boundary_conditions)}
|
| 131 |
+
</ul>
|
| 132 |
+
</div>
|
| 133 |
+
|
| 134 |
+
<div style="margin: 15px 0;">
|
| 135 |
+
<strong style="color: #5cb85c;">✅ Appropriate Responses:</strong>
|
| 136 |
+
<ul style="margin: 5px 0; padding-left: 20px;">
|
| 137 |
+
{''.join(f'<li style="margin: 5px 0;">{ar}</li>' for ar in fs.appropriate_responses)}
|
| 138 |
+
</ul>
|
| 139 |
+
</div>
|
| 140 |
+
|
| 141 |
+
<div style="margin: 15px 0;">
|
| 142 |
+
<strong style="color: #337ab7;">📖 Educational Principle:</strong>
|
| 143 |
+
<p style="margin: 5px 0; font-style: italic;">{fs.educational_principle}</p>
|
| 144 |
+
</div>
|
| 145 |
+
|
| 146 |
+
{f'<p style="margin: 5px 0; font-size: 0.85em; color: #999;">{fs.relevance_hint}</p>' if fs.relevance_hint else ''}
|
| 147 |
+
</div>
|
| 148 |
+
"""
|
| 149 |
+
html_parts.append(scenario_html)
|
| 150 |
+
|
| 151 |
+
return "".join(html_parts)
|
| 152 |
+
|
modules/scenario_contextualization/integration/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Scenario Integration Module
|
| 3 |
+
"""
|
| 4 |
+
|
modules/scenario_contextualization/integration/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (251 Bytes). View file
|
|
|
modules/scenario_contextualization/integration/__pycache__/enhanced_rag_engine.cpython-312.pyc
ADDED
|
Binary file (3.41 kB). View file
|
|
|
modules/scenario_contextualization/integration/__pycache__/feature_extractor.cpython-312.pyc
ADDED
|
Binary file (5.17 kB). View file
|
|
|
modules/scenario_contextualization/integration/enhanced_rag_engine.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Enhanced RAG Engine
|
| 3 |
+
Integrates scenario contextualization into RAG queries
|
| 4 |
+
"""
|
| 5 |
+
from typing import Tuple, Optional
|
| 6 |
+
from openai import OpenAI
|
| 7 |
+
|
| 8 |
+
from src.rag_query import RAGQueryEngine
|
| 9 |
+
from .feature_extractor import ADASFeatureExtractor
|
| 10 |
+
from ..retrieval.scenario_retriever import ScenarioRetriever
|
| 11 |
+
from ..formatting.constructive_formatter import ConstructiveFormatter
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class EnhancedAnswer:
|
| 15 |
+
"""Enhanced answer with scenario context"""
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
answer: str,
|
| 19 |
+
sources: str,
|
| 20 |
+
scenarios_html: Optional[str] = None,
|
| 21 |
+
scenario_count: int = 0
|
| 22 |
+
):
|
| 23 |
+
self.answer = answer
|
| 24 |
+
self.sources = sources
|
| 25 |
+
self.scenarios_html = scenarios_html
|
| 26 |
+
self.scenario_count = scenario_count
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class EnhancedRAGEngine:
|
| 30 |
+
"""
|
| 31 |
+
Enhanced RAG engine with scenario contextualization integration
|
| 32 |
+
"""
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
base_rag_engine: RAGQueryEngine,
|
| 36 |
+
scenario_retriever: ScenarioRetriever,
|
| 37 |
+
feature_extractor: ADASFeatureExtractor,
|
| 38 |
+
formatter: ConstructiveFormatter
|
| 39 |
+
):
|
| 40 |
+
self.base_rag = base_rag_engine
|
| 41 |
+
self.scenario_retriever = scenario_retriever
|
| 42 |
+
self.feature_extractor = feature_extractor
|
| 43 |
+
self.formatter = formatter
|
| 44 |
+
|
| 45 |
+
def query(
|
| 46 |
+
self,
|
| 47 |
+
query: str,
|
| 48 |
+
user_id: Optional[str] = None,
|
| 49 |
+
user_context: Optional[dict] = None
|
| 50 |
+
) -> EnhancedAnswer:
|
| 51 |
+
"""
|
| 52 |
+
Execute enhanced query
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
query: User query
|
| 56 |
+
user_id: User ID (optional, for personalization)
|
| 57 |
+
user_context: User context (optional)
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
EnhancedAnswer: Contains standard answer and scenario context
|
| 61 |
+
"""
|
| 62 |
+
# 1. Standard RAG query
|
| 63 |
+
base_answer, sources = self.base_rag.query(query)
|
| 64 |
+
|
| 65 |
+
# 2. Extract ADAS features
|
| 66 |
+
adas_features = self.feature_extractor.extract(query)
|
| 67 |
+
|
| 68 |
+
# 3. Retrieve relevant scenarios (if related features found)
|
| 69 |
+
scenarios_html = None
|
| 70 |
+
scenario_count = 0
|
| 71 |
+
if adas_features:
|
| 72 |
+
try:
|
| 73 |
+
ranked_scenarios = self.scenario_retriever.retrieve(
|
| 74 |
+
query=query,
|
| 75 |
+
adas_features=adas_features,
|
| 76 |
+
max_results=3,
|
| 77 |
+
user_context=user_context
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
if ranked_scenarios:
|
| 81 |
+
# 4. Format scenarios
|
| 82 |
+
scenarios_html = self.formatter.format_scenarios_for_ui(ranked_scenarios)
|
| 83 |
+
scenario_count = len(ranked_scenarios)
|
| 84 |
+
except Exception as e:
|
| 85 |
+
print(f"⚠️ Error retrieving scenarios: {e}")
|
| 86 |
+
# Continue execution, scenario retrieval failure doesn't affect standard answer
|
| 87 |
+
|
| 88 |
+
return EnhancedAnswer(
|
| 89 |
+
answer=base_answer,
|
| 90 |
+
sources=sources,
|
| 91 |
+
scenarios_html=scenarios_html,
|
| 92 |
+
scenario_count=scenario_count
|
| 93 |
+
)
|
| 94 |
+
|
modules/scenario_contextualization/integration/feature_extractor.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
ADAS Feature Extractor
|
| 3 |
+
Extracts ADAS features from user queries
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
from typing import List, Optional
|
| 7 |
+
from openai import OpenAI
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# ADAS feature keywords mapping
|
| 11 |
+
ADAS_FEATURES_KEYWORDS = {
|
| 12 |
+
"DISTRONIC": ["distronic", "distance assist", "adaptive cruise", "acc", "cruise control", "following distance"],
|
| 13 |
+
"Active Lane Change Assist": ["lane change", "lca", "lane change assist", "change lane", "lane switching"],
|
| 14 |
+
"Active Steering Assist": ["steering assist", "lane keeping", "lka", "lane keep", "steering", "lane centering"],
|
| 15 |
+
"Active Stop-and-Go Assist": ["stop and go", "traffic jam", "low speed", "stop-and-go", "traffic assist"]
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ADASFeatureExtractor:
|
| 20 |
+
"""ADAS feature extractor"""
|
| 21 |
+
|
| 22 |
+
def __init__(self, use_llm: bool = False, client: Optional[OpenAI] = None):
|
| 23 |
+
"""
|
| 24 |
+
Args:
|
| 25 |
+
use_llm: Whether to use LLM extraction (more accurate but slower)
|
| 26 |
+
client: OpenAI client (if use_llm=True)
|
| 27 |
+
"""
|
| 28 |
+
self.use_llm = use_llm
|
| 29 |
+
self.client = client
|
| 30 |
+
|
| 31 |
+
def extract(self, query: str) -> List[str]:
|
| 32 |
+
"""
|
| 33 |
+
Extract ADAS features from query
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
query: User query text
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
List[str]: List of extracted ADAS features
|
| 40 |
+
"""
|
| 41 |
+
if self.use_llm and self.client:
|
| 42 |
+
return self._extract_with_llm(query)
|
| 43 |
+
else:
|
| 44 |
+
return self._extract_with_keywords(query)
|
| 45 |
+
|
| 46 |
+
def _extract_with_keywords(self, query: str) -> List[str]:
|
| 47 |
+
"""Extract features using keyword matching (fast method)"""
|
| 48 |
+
query_lower = query.lower()
|
| 49 |
+
matched_features = []
|
| 50 |
+
|
| 51 |
+
for feature, keywords in ADAS_FEATURES_KEYWORDS.items():
|
| 52 |
+
if any(kw in query_lower for kw in keywords):
|
| 53 |
+
matched_features.append(feature)
|
| 54 |
+
|
| 55 |
+
return matched_features
|
| 56 |
+
|
| 57 |
+
def _extract_with_llm(self, query: str) -> List[str]:
|
| 58 |
+
"""Extract features using LLM (more accurate method)"""
|
| 59 |
+
if not self.client:
|
| 60 |
+
return self._extract_with_keywords(query)
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
available_features = list(ADAS_FEATURES_KEYWORDS.keys())
|
| 64 |
+
|
| 65 |
+
prompt = f"""
|
| 66 |
+
Extract ADAS features mentioned in this query: "{query}"
|
| 67 |
+
|
| 68 |
+
Available features:
|
| 69 |
+
{chr(10).join(f'- {f}' for f in available_features)}
|
| 70 |
+
|
| 71 |
+
Return a JSON object with a "features" array containing the feature names.
|
| 72 |
+
If no features are mentioned, return an empty array.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
response = self.client.chat.completions.create(
|
| 76 |
+
model="gpt-4o-mini",
|
| 77 |
+
messages=[
|
| 78 |
+
{"role": "system", "content": "You are an expert in ADAS systems. Extract ADAS features from user queries."},
|
| 79 |
+
{"role": "user", "content": prompt}
|
| 80 |
+
],
|
| 81 |
+
response_format={"type": "json_object"},
|
| 82 |
+
temperature=0.1
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
result = json.loads(response.choices[0].message.content)
|
| 86 |
+
features = result.get("features", [])
|
| 87 |
+
|
| 88 |
+
# Validate extracted features against available list
|
| 89 |
+
valid_features = [f for f in features if f in available_features]
|
| 90 |
+
return valid_features if valid_features else self._extract_with_keywords(query)
|
| 91 |
+
|
| 92 |
+
except Exception as e:
|
| 93 |
+
print(f"⚠️ Error in LLM feature extraction: {e}")
|
| 94 |
+
return self._extract_with_keywords(query)
|
| 95 |
+
|
modules/scenario_contextualization/retrieval/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Scenario Retrieval Module
|
| 3 |
+
"""
|
| 4 |
+
|
modules/scenario_contextualization/retrieval/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (247 Bytes). View file
|
|
|
modules/scenario_contextualization/retrieval/__pycache__/scenario_retriever.cpython-312.pyc
ADDED
|
Binary file (5.7 kB). View file
|
|
|
modules/scenario_contextualization/retrieval/scenario_retriever.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Scenario Retriever
|
| 3 |
+
Retrieves relevant scenarios using semantic search and feature filtering
|
| 4 |
+
"""
|
| 5 |
+
from typing import List, Optional, Dict
|
| 6 |
+
from openai import OpenAI
|
| 7 |
+
|
| 8 |
+
from ..database.scenario_database import ScenarioDatabase
|
| 9 |
+
from ..database.scenario_models import ADASScenario, RankedScenario
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ScenarioRetriever:
|
| 13 |
+
"""Scenario retrieval engine"""
|
| 14 |
+
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
scenario_database: ScenarioDatabase,
|
| 18 |
+
scenario_vector_store_id: Optional[str] = None,
|
| 19 |
+
client: Optional[OpenAI] = None
|
| 20 |
+
):
|
| 21 |
+
"""
|
| 22 |
+
Args:
|
| 23 |
+
scenario_database: Scenario database
|
| 24 |
+
scenario_vector_store_id: Scenario vector store ID (optional, if using semantic search)
|
| 25 |
+
client: OpenAI client (if using vector search)
|
| 26 |
+
"""
|
| 27 |
+
self.database = scenario_database
|
| 28 |
+
self.vector_store_id = scenario_vector_store_id
|
| 29 |
+
self.client = client
|
| 30 |
+
|
| 31 |
+
def retrieve(
|
| 32 |
+
self,
|
| 33 |
+
query: str,
|
| 34 |
+
adas_features: List[str],
|
| 35 |
+
max_results: int = 3,
|
| 36 |
+
user_context: Optional[Dict] = None
|
| 37 |
+
) -> List[RankedScenario]:
|
| 38 |
+
"""
|
| 39 |
+
Retrieve relevant scenarios
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
query: User query
|
| 43 |
+
adas_features: List of extracted ADAS features
|
| 44 |
+
max_results: Maximum number of results to return
|
| 45 |
+
user_context: User context (optional, for personalization)
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
List[RankedScenario]: Sorted list of scenarios
|
| 49 |
+
"""
|
| 50 |
+
# 1. Feature filtering
|
| 51 |
+
feature_filtered = self.database.filter_by_features(adas_features)
|
| 52 |
+
|
| 53 |
+
# 2. Full-text search (if feature filtering results are insufficient)
|
| 54 |
+
if len(feature_filtered) < max_results:
|
| 55 |
+
text_results = self.database.full_text_search(query, top_k=max_results * 2)
|
| 56 |
+
# Merge results
|
| 57 |
+
all_candidates = list(set(feature_filtered + text_results))
|
| 58 |
+
else:
|
| 59 |
+
all_candidates = feature_filtered
|
| 60 |
+
|
| 61 |
+
# 3. Relevance scoring
|
| 62 |
+
scored = self._score_relevance(all_candidates, query, adas_features)
|
| 63 |
+
|
| 64 |
+
# 4. User context adjustment (if available)
|
| 65 |
+
if user_context:
|
| 66 |
+
scored = self._adjust_for_user_context(scored, user_context)
|
| 67 |
+
|
| 68 |
+
# 5. Sort and return top N
|
| 69 |
+
scored.sort(key=lambda x: x.relevance_score, reverse=True)
|
| 70 |
+
return scored[:max_results]
|
| 71 |
+
|
| 72 |
+
def _score_relevance(
|
| 73 |
+
self,
|
| 74 |
+
scenarios: List[ADASScenario],
|
| 75 |
+
query: str,
|
| 76 |
+
adas_features: List[str]
|
| 77 |
+
) -> List[RankedScenario]:
|
| 78 |
+
"""
|
| 79 |
+
Calculate scenario relevance score
|
| 80 |
+
|
| 81 |
+
Factors:
|
| 82 |
+
1. Feature match (30%)
|
| 83 |
+
2. Text similarity (40%)
|
| 84 |
+
3. Scenario type weight (20%)
|
| 85 |
+
4. Scenario quality score (10%)
|
| 86 |
+
"""
|
| 87 |
+
ranked = []
|
| 88 |
+
query_lower = query.lower()
|
| 89 |
+
|
| 90 |
+
for scenario in scenarios:
|
| 91 |
+
score = 0.0
|
| 92 |
+
match_reasons = []
|
| 93 |
+
|
| 94 |
+
# 1. Feature match (30%)
|
| 95 |
+
feature_weight = 0.3
|
| 96 |
+
if scenario.adas_feature in adas_features:
|
| 97 |
+
feature_match = 1.0
|
| 98 |
+
match_reasons.append(f"Matches feature: {scenario.adas_feature}")
|
| 99 |
+
else:
|
| 100 |
+
feature_match = 0.0
|
| 101 |
+
score += feature_match * feature_weight
|
| 102 |
+
|
| 103 |
+
# 2. Text similarity (40%)
|
| 104 |
+
semantic_weight = 0.4
|
| 105 |
+
# Simple keyword matching
|
| 106 |
+
scenario_text = f"{scenario.title} {scenario.description} {' '.join(scenario.tags)}".lower()
|
| 107 |
+
query_words = set(query_lower.split())
|
| 108 |
+
scenario_words = set(scenario_text.split())
|
| 109 |
+
common_words = query_words & scenario_words
|
| 110 |
+
if query_words:
|
| 111 |
+
text_similarity = len(common_words) / len(query_words)
|
| 112 |
+
if text_similarity > 0.1:
|
| 113 |
+
match_reasons.append(f"Text similarity: {text_similarity:.2f}")
|
| 114 |
+
else:
|
| 115 |
+
text_similarity = 0.0
|
| 116 |
+
score += text_similarity * semantic_weight
|
| 117 |
+
|
| 118 |
+
# 3. Scenario type weight (20%)
|
| 119 |
+
type_weight = 0.2
|
| 120 |
+
type_weights = {
|
| 121 |
+
"boundary_condition": 1.0,
|
| 122 |
+
"historical_incident": 0.8,
|
| 123 |
+
"hypothetical_edge_case": 0.9
|
| 124 |
+
}
|
| 125 |
+
type_score = type_weights.get(scenario.scenario_type, 0.5)
|
| 126 |
+
score += type_score * type_weight
|
| 127 |
+
|
| 128 |
+
# 4. Scenario quality score (10%)
|
| 129 |
+
quality_weight = 0.1
|
| 130 |
+
quality_score = scenario.metadata.quality_score
|
| 131 |
+
score += quality_score * quality_weight
|
| 132 |
+
|
| 133 |
+
ranked.append(RankedScenario(
|
| 134 |
+
scenario=scenario,
|
| 135 |
+
relevance_score=score,
|
| 136 |
+
match_reasons=match_reasons
|
| 137 |
+
))
|
| 138 |
+
|
| 139 |
+
return ranked
|
| 140 |
+
|
| 141 |
+
def _adjust_for_user_context(
|
| 142 |
+
self,
|
| 143 |
+
ranked_scenarios: List[RankedScenario],
|
| 144 |
+
user_context: Dict
|
| 145 |
+
) -> List[RankedScenario]:
|
| 146 |
+
"""
|
| 147 |
+
Adjust relevance scores based on user context
|
| 148 |
+
|
| 149 |
+
Example: If user is a beginner, prioritize basic scenarios
|
| 150 |
+
"""
|
| 151 |
+
# Personalization adjustment logic can be implemented here
|
| 152 |
+
# Currently returns as-is
|
| 153 |
+
return ranked_scenarios
|
| 154 |
+
|