cuizhanming commited on
Commit
4181e3b
·
1 Parent(s): 1fe7de1

Update agent file

Browse files
Files changed (3) hide show
  1. agent.py +28 -40
  2. app.py +12 -14
  3. requirements.txt +1 -3
agent.py CHANGED
@@ -123,7 +123,7 @@ sys_msg = SystemMessage(content=system_prompt)
123
  # build a retriever
124
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768
125
  supabase: Client = create_client(
126
- os.environ.get("SUPABASE_URL"),
127
  os.environ.get("SUPABASE_SERVICE_KEY"))
128
  vector_store = SupabaseVectorStore(
129
  client=supabase,
@@ -137,8 +137,6 @@ create_retriever_tool = create_retriever_tool(
137
  description="A tool to retrieve similar questions from a vector store.",
138
  )
139
 
140
-
141
-
142
  tools = [
143
  multiply,
144
  add,
@@ -177,47 +175,37 @@ def build_graph(provider: str = "groq"):
177
  def assistant(state: MessagesState):
178
  """Assistant node"""
179
  return {"messages": [llm_with_tools.invoke(state["messages"])]}
180
-
181
- # def retriever(state: MessagesState):
182
- # """Retriever node"""
183
- # similar_question = vector_store.similarity_search(state["messages"][0].content)
184
- #example_msg = HumanMessage(
185
- # content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
186
- # )
187
- # return {"messages": [sys_msg] + state["messages"] + [example_msg]}
188
-
189
- from langchain_core.messages import AIMessage
190
 
191
  def retriever(state: MessagesState):
192
- query = state["messages"][-1].content
193
- similar_doc = vector_store.similarity_search(query, k=1)[0]
194
-
195
- content = similar_doc.page_content
196
- if "Final answer :" in content:
197
- answer = content.split("Final answer :")[-1].strip()
198
- else:
199
- answer = content.strip()
200
-
201
- return {"messages": [AIMessage(content=answer)]}
202
-
203
- # builder = StateGraph(MessagesState)
204
- #builder.add_node("retriever", retriever)
205
- #builder.add_node("assistant", assistant)
206
- #builder.add_node("tools", ToolNode(tools))
207
- #builder.add_edge(START, "retriever")
208
- #builder.add_edge("retriever", "assistant")
209
- #builder.add_conditional_edges(
210
- # "assistant",
211
- # tools_condition,
212
- #)
213
- #builder.add_edge("tools", "assistant")
214
 
215
  builder = StateGraph(MessagesState)
216
  builder.add_node("retriever", retriever)
217
-
218
- # Retriever ist Start und Endpunkt
219
- builder.set_entry_point("retriever")
220
- builder.set_finish_point("retriever")
 
 
 
 
 
221
 
222
  # Compile graph
223
- return builder.compile()
 
 
 
 
 
 
 
 
 
 
 
 
123
  # build a retriever
124
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768
125
  supabase: Client = create_client(
126
+ os.environ.get("SUPABASE_URL"),
127
  os.environ.get("SUPABASE_SERVICE_KEY"))
128
  vector_store = SupabaseVectorStore(
129
  client=supabase,
 
137
  description="A tool to retrieve similar questions from a vector store.",
138
  )
139
 
 
 
140
  tools = [
141
  multiply,
142
  add,
 
175
  def assistant(state: MessagesState):
176
  """Assistant node"""
177
  return {"messages": [llm_with_tools.invoke(state["messages"])]}
 
 
 
 
 
 
 
 
 
 
178
 
179
  def retriever(state: MessagesState):
180
+ """Retriever node"""
181
+ similar_question = vector_store.similarity_search(state["messages"][0].content)
182
+ example_msg = HumanMessage(
183
+ content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
184
+ )
185
+ return {"messages": [sys_msg] + state["messages"] + [example_msg]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
  builder = StateGraph(MessagesState)
188
  builder.add_node("retriever", retriever)
189
+ builder.add_node("assistant", assistant)
190
+ builder.add_node("tools", ToolNode(tools))
191
+ builder.add_edge(START, "retriever")
192
+ builder.add_edge("retriever", "assistant")
193
+ builder.add_conditional_edges(
194
+ "assistant",
195
+ tools_condition,
196
+ )
197
+ builder.add_edge("tools", "assistant")
198
 
199
  # Compile graph
200
+ return builder.compile()
201
+
202
+ # test
203
+ if __name__ == "__main__":
204
+ question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
205
+ # Build the graph
206
+ graph = build_graph(provider="groq")
207
+ # Run the graph
208
+ messages = [HumanMessage(content=question)]
209
+ messages = graph.invoke({"messages": messages})
210
+ for m in messages["messages"]:
211
+ m.pretty_print()
app.py CHANGED
@@ -25,11 +25,11 @@ class BasicAgent:
25
 
26
  def __call__(self, question: str) -> str:
27
  print(f"Agent received question (first 50 chars): {question[:50]}...")
 
28
  messages = [HumanMessage(content=question)]
29
- result = self.graph.invoke({"messages": messages})
30
- answer = result['messages'][-1].content
31
- return answer # kein [14:] mehr nötig!
32
-
33
 
34
 
35
  def run_and_submit_all( profile: gr.OAuthProfile | None):
@@ -68,16 +68,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
68
  response.raise_for_status()
69
  questions_data = response.json()
70
  if not questions_data:
71
- print("Fetched questions list is empty.")
72
- return "Fetched questions list is empty or invalid format.", None
73
  print(f"Fetched {len(questions_data)} questions.")
74
  except requests.exceptions.RequestException as e:
75
  print(f"Error fetching questions: {e}")
76
  return f"Error fetching questions: {e}", None
77
  except requests.exceptions.JSONDecodeError as e:
78
- print(f"Error decoding JSON response from questions endpoint: {e}")
79
- print(f"Response text: {response.text[:500]}")
80
- return f"Error decoding server response for questions: {e}", None
81
  except Exception as e:
82
  print(f"An unexpected error occurred fetching questions: {e}")
83
  return f"An unexpected error occurred fetching questions: {e}", None
@@ -97,14 +97,14 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
97
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
98
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
99
  except Exception as e:
100
- print(f"Error running agent on task {task_id}: {e}")
101
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
102
 
103
  if not answers_payload:
104
  print("Agent did not produce any answers to submit.")
105
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
106
 
107
- # 4. Prepare Submission
108
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
109
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
110
  print(status_update)
@@ -159,11 +159,9 @@ with gr.Blocks() as demo:
159
  gr.Markdown(
160
  """
161
  **Instructions:**
162
-
163
  1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
164
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
165
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
166
-
167
  ---
168
  **Disclaimers:**
169
  Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
 
25
 
26
  def __call__(self, question: str) -> str:
27
  print(f"Agent received question (first 50 chars): {question[:50]}...")
28
+ # Wrap the question in a HumanMessage from langchain_core
29
  messages = [HumanMessage(content=question)]
30
+ messages = self.graph.invoke({"messages": messages})
31
+ answer = messages['messages'][-1].content
32
+ return answer[14:]
 
33
 
34
 
35
  def run_and_submit_all( profile: gr.OAuthProfile | None):
 
68
  response.raise_for_status()
69
  questions_data = response.json()
70
  if not questions_data:
71
+ print("Fetched questions list is empty.")
72
+ return "Fetched questions list is empty or invalid format.", None
73
  print(f"Fetched {len(questions_data)} questions.")
74
  except requests.exceptions.RequestException as e:
75
  print(f"Error fetching questions: {e}")
76
  return f"Error fetching questions: {e}", None
77
  except requests.exceptions.JSONDecodeError as e:
78
+ print(f"Error decoding JSON response from questions endpoint: {e}")
79
+ print(f"Response text: {response.text[:500]}")
80
+ return f"Error decoding server response for questions: {e}", None
81
  except Exception as e:
82
  print(f"An unexpected error occurred fetching questions: {e}")
83
  return f"An unexpected error occurred fetching questions: {e}", None
 
97
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
98
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
99
  except Exception as e:
100
+ print(f"Error running agent on task {task_id}: {e}")
101
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
102
 
103
  if not answers_payload:
104
  print("Agent did not produce any answers to submit.")
105
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
106
 
107
+ # 4. Prepare Submission
108
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
109
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
110
  print(status_update)
 
159
  gr.Markdown(
160
  """
161
  **Instructions:**
 
162
  1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
163
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
164
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
 
165
  ---
166
  **Disclaimers:**
167
  Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
requirements.txt CHANGED
@@ -15,6 +15,4 @@ arxiv
15
  pymupdf
16
  wikipedia
17
  pgvector
18
- python-dotenv
19
- pytesseract
20
- matplotlib
 
15
  pymupdf
16
  wikipedia
17
  pgvector
18
+ python-dotenv