sproducts commited on
Commit
894f932
·
verified ·
1 Parent(s): f8d8f6c

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +67 -0
  2. requirements.txt +4 -0
  3. roadmap.txt +14 -0
  4. rules.txt +3 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
+
4
+ # --- 1. Load Roadmap and Rules ---
5
+ def load_roadmap_and_rules(roadmap_path="roadmap.txt", rules_path="rules.txt"):
6
+ roadmap_content = {}
7
+ with open(roadmap_path, 'r') as f:
8
+ current_section = None
9
+ for line in f:
10
+ line = line.strip()
11
+ if line.startswith("#"):
12
+ current_section = line[1:].strip()
13
+ roadmap_content[current_section] = ""
14
+ elif current_section:
15
+ roadmap_content[current_section] += line + "\n"
16
+ with open(rules_path, 'r') as f:
17
+ rules_content = f.read()
18
+ return roadmap_content, rules_content
19
+
20
+ roadmap, rules = load_roadmap_and_rules()
21
+
22
+ # --- 2. Load the AI Model (Mistral 7B) ---
23
+ model_name = "mistralai/Mistral-7B-Instruct-v0.2"
24
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
25
+ model = AutoModelForCausalLM.from_pretrained(model_name)
26
+
27
+ # --- 3. Function to Get Chatbot Response ---
28
+ def get_chatbot_response(user_query, project_phase, roadmap, rules, model, tokenizer):
29
+ phase_roadmap_section = roadmap.get(project_phase, "General Guidance")
30
+ context = f"""
31
+ Project Roadmap (Phase: {project_phase}):
32
+ {phase_roadmap_section}
33
+
34
+ Project Rules:
35
+ {rules}
36
+
37
+ User Query: {user_query}
38
+
39
+ ---
40
+ Provide helpful guidance for this project.
41
+ """
42
+ prompt = f"<s>[INST] {context} [/INST]"
43
+ inputs = tokenizer(prompt, return_tensors="pt")
44
+ outputs = model.generate(**inputs, max_new_tokens=300)
45
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
46
+ response_start_index = response.find("[/INST]") + len("[/INST]") if "[/INST]" in response else 0
47
+ cleaned_response = response[response_start_index:].strip()
48
+ return cleaned_response
49
+
50
+ # --- 4. Gradio Interface Function ---
51
+ def chatbot_interface(user_query, project_phase):
52
+ return get_chatbot_response(user_query, project_phase, roadmap, rules, model, tokenizer)
53
+
54
+ # --- 5. Gradio Interface Setup ---
55
+ iface = gr.Interface(
56
+ fn=chatbot_interface,
57
+ inputs=[
58
+ gr.Textbox(label="Your Question"),
59
+ gr.Dropdown(list(roadmap.keys()), label="Project Phase", value=list(roadmap.keys())[0])
60
+ ],
61
+ outputs="text",
62
+ title="Project Guidance Chatbot",
63
+ description="Ask questions about your project phase and get guidance."
64
+ )
65
+
66
+ if __name__ == "__main__":
67
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio
4
+ accelerate
roadmap.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Base Model Selection
2
+ **What to do:** Choose a pre-trained language model from Hugging Face Hub.
3
+ **Considerations:** Project size, complexity, memory.
4
+ **Example:** Mistral 7B is a good start.
5
+
6
+ # Fine-Tuning
7
+ **What to do:** Train model on your data.
8
+ **Considerations:** Dataset, fine-tuning method.
9
+ **Example:** Use LoRA for efficiency.
10
+
11
+ # Deployment
12
+ **What to do:** Make chatbot accessible.
13
+ **Considerations:** Platform choice.
14
+ **Example:** Hugging Face Spaces is recommended.
rules.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Rule 1: Follow the project roadmap.
2
+ Rule 2: Verify generated code.
3
+ Rule 3: Ask if unsure.