QuentinL52 commited on
Commit
40dc45b
·
verified ·
1 Parent(s): b339057

Update services/graph_service.py

Browse files
Files changed (1) hide show
  1. services/graph_service.py +39 -69
services/graph_service.py CHANGED
@@ -5,12 +5,12 @@ from typing import TypedDict, Annotated, Sequence, Dict, Any, List
5
 
6
  from langchain_openai import ChatOpenAI
7
  from langchain_core.runnables import Runnable
8
- from langchain_core.messages import BaseMessage, AIMessage, HumanMessage, SystemMessage
9
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
10
  from langgraph.graph import StateGraph, END
11
  from langgraph.prebuilt import ToolNode
12
 
13
- from tools.analysis_tools import trigger_interview_analysis, _final_analysis_node
14
 
15
  class AgentState(TypedDict):
16
  messages: Annotated[Sequence[BaseMessage], lambda x, y: x + y]
@@ -38,7 +38,7 @@ class GraphInterviewProcessor:
38
  self.formatted_cv_str = self._format_cv_for_prompt()
39
  self.skills_summary = self._extract_skills_summary()
40
  self.reconversion_info = self._extract_reconversion_info()
41
-
42
  self.agent_runnable = self._create_agent_runnable()
43
  self.graph = self._build_graph()
44
  logging.info("GraphInterviewProcessor initialisé avec succès.")
@@ -75,34 +75,15 @@ class GraphInterviewProcessor:
75
  llm = ChatOpenAI(api_key=os.getenv("OPENAI_API_KEY"), model="gpt-4o-mini", temperature=0.7)
76
  tools = [trigger_interview_analysis]
77
  llm_with_tools = llm.bind_tools(tools)
78
-
79
  return prompt | llm_with_tools
80
 
81
- def _should_continue(self, state: AgentState) -> str:
82
- """
83
- Détermine si l'entretien doit continuer ou se terminer.
84
- """
85
- messages = state.get('messages', [])
86
- last_message = messages[-1]
87
- if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
88
- for tool_call in last_message.tool_calls:
89
- if tool_call.get('name') == 'trigger_interview_analysis':
90
- print("Condition de fin détectée : appel à trigger_interview_analysis.")
91
- return "end"
92
- return "continue"
93
-
94
  def _agent_node(self, state: AgentState):
95
  """Prépare le prompt et appelle le runnable de l'agent."""
96
-
97
- context_header = (
98
- f"--- CONTEXTE TECHNIQUE POUR L'AGENT (ne pas mentionner à l'utilisateur) ---\n"
99
- f"L'ID de l'utilisateur actuel est : {state['user_id']}\n"
100
- f"L'ID de l'offre d'emploi actuelle est : {state['job_offer_id']}\n"
101
- f"Quand tu appelleras l'outil 'trigger_interview_analysis', tu devras OBLIGATOIREMENT utiliser ces IDs exacts.\n"
102
- f"--- FIN DU CONTEXTE TECHNIQUE ---\n\n"
103
- )
104
  job_description_str = json.dumps(self.job_offer, ensure_ascii=False)
 
105
  system_prompt_content = self.system_prompt_template.format(
 
 
106
  entreprise=self.job_offer.get('entreprise', 'notre entreprise'),
107
  poste=self.job_offer.get('poste', 'ce poste'),
108
  mission=self.job_offer.get('mission', 'Non spécifiée'),
@@ -112,60 +93,50 @@ class GraphInterviewProcessor:
112
  cv=self.formatted_cv_str,
113
  skills_analysis=self.skills_summary,
114
  reconversion_analysis=self.reconversion_info,
115
- job_description=job_description_str,
116
- user_id=state['user_id'],
117
- job_offer_id=state['job_offer_id'],
118
  )
119
 
120
- final_system_prompt = context_header + system_prompt_content
121
- job_description = json.dumps(self.job_offer)
122
-
123
  response = self.agent_runnable.invoke({
124
- "system_prompt_content": final_system_prompt,
125
- "messages": state["messages"],
126
- "job_description": job_description
127
  })
128
- return {
129
- "messages": [response],
130
- "job_description": job_description_str
131
- }
132
 
133
  def _router(self, state: AgentState) -> str:
134
- """
135
- Route le flux du graphe en fonction de la dernière réponse de l'agent.
136
- - Si un outil d'analyse final est appelé, termine le graphe.
137
- - Si un autre outil est appelé, va au noeud d'outils.
138
- - Sinon, termine le tour de conversation.
139
- """
140
  last_message = state["messages"][-1]
141
  if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
142
  if any(tool_call.get('name') == 'trigger_interview_analysis' for tool_call in last_message.tool_calls):
143
- print(">>> Routeur : Appel à l'outil final détecté. Terminaison du graphe.")
144
  return "call_final_tool"
145
  return "call_tool"
146
  return "end_turn"
147
- '''
148
- def _build_graph(self) -> any:
149
- """Construit et compile le graphe d'états."""
150
- tool_node = ToolNode([trigger_interview_analysis])
151
- graph = StateGraph(AgentState)
152
- graph.add_node("agent", self._agent_node)
153
- graph.add_node("tools", tool_node)
154
- graph.add_node("final_tool_node", tool_node)
155
- graph.set_entry_point("agent")
156
- graph.add_conditional_edges(
157
- "agent",
158
- self._router,
159
- {
160
- "call_tool": "tools",
161
- "call_final_tool": "final_tool_node",
162
- "end_turn": END
163
- }
164
- )
165
- graph.add_edge("tools", "agent")
166
- graph.add_edge("final_tool_node", END)
167
- return graph.compile()
168
- '''
 
 
 
 
169
  def _build_graph(self) -> any:
170
  """Construit et compile le graphe d'états."""
171
  tool_node = ToolNode([trigger_interview_analysis])
@@ -199,7 +170,7 @@ class GraphInterviewProcessor:
199
  if not langchain_messages:
200
  logging.info("Historique de conversation vide. Ajout d'un message de démarrage interne.")
201
  langchain_messages.append(HumanMessage(content="Bonjour, je suis prêt à commencer l'entretien."))
202
-
203
  initial_state = {
204
  "user_id": self.user_id,
205
  "job_offer_id": self.job_offer_id,
@@ -212,7 +183,6 @@ class GraphInterviewProcessor:
212
  if not final_state or not final_state.get('messages'):
213
  logging.error("L'état final est vide ou ne contient pas de messages.")
214
  return {"response": "Erreur: Impossible de générer une réponse.", "status": "finished"}
215
-
216
  last_message = final_state['messages'][-1]
217
  status = "finished" if hasattr(last_message, 'tool_calls') and last_message.tool_calls else "interviewing"
218
  response_content = last_message.content
 
5
 
6
  from langchain_openai import ChatOpenAI
7
  from langchain_core.runnables import Runnable
8
+ from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
9
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
10
  from langgraph.graph import StateGraph, END
11
  from langgraph.prebuilt import ToolNode
12
 
13
+ from tools.analysis_tools import trigger_interview_analysis
14
 
15
  class AgentState(TypedDict):
16
  messages: Annotated[Sequence[BaseMessage], lambda x, y: x + y]
 
38
  self.formatted_cv_str = self._format_cv_for_prompt()
39
  self.skills_summary = self._extract_skills_summary()
40
  self.reconversion_info = self._extract_reconversion_info()
41
+
42
  self.agent_runnable = self._create_agent_runnable()
43
  self.graph = self._build_graph()
44
  logging.info("GraphInterviewProcessor initialisé avec succès.")
 
75
  llm = ChatOpenAI(api_key=os.getenv("OPENAI_API_KEY"), model="gpt-4o-mini", temperature=0.7)
76
  tools = [trigger_interview_analysis]
77
  llm_with_tools = llm.bind_tools(tools)
 
78
  return prompt | llm_with_tools
79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  def _agent_node(self, state: AgentState):
81
  """Prépare le prompt et appelle le runnable de l'agent."""
 
 
 
 
 
 
 
 
82
  job_description_str = json.dumps(self.job_offer, ensure_ascii=False)
83
+
84
  system_prompt_content = self.system_prompt_template.format(
85
+ user_id=state['user_id'],
86
+ job_offer_id=state['job_offer_id'],
87
  entreprise=self.job_offer.get('entreprise', 'notre entreprise'),
88
  poste=self.job_offer.get('poste', 'ce poste'),
89
  mission=self.job_offer.get('mission', 'Non spécifiée'),
 
93
  cv=self.formatted_cv_str,
94
  skills_analysis=self.skills_summary,
95
  reconversion_analysis=self.reconversion_info,
96
+ job_description=job_description_str
 
 
97
  )
98
 
 
 
 
99
  response = self.agent_runnable.invoke({
100
+ "system_prompt_content": system_prompt_content,
101
+ "messages": state["messages"]
 
102
  })
103
+
104
+ return {"messages": [response]}
 
 
105
 
106
  def _router(self, state: AgentState) -> str:
107
+ """Route le flux du graphe en fonction de la dernière réponse de l'agent."""
 
 
 
 
 
108
  last_message = state["messages"][-1]
109
  if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
110
  if any(tool_call.get('name') == 'trigger_interview_analysis' for tool_call in last_message.tool_calls):
 
111
  return "call_final_tool"
112
  return "call_tool"
113
  return "end_turn"
114
+
115
+ def _final_analysis_node(self, state: AgentState):
116
+ """
117
+ Appelle l'outil d'analyse finale. Construit les arguments manuellement
118
+ à partir de l'état du graphe pour garantir la fiabilité.
119
+ """
120
+ conversation_history = []
121
+ for msg in state["messages"]:
122
+ if isinstance(msg, HumanMessage):
123
+ role = "user"
124
+ elif isinstance(msg, AIMessage):
125
+ role = "assistant"
126
+ else:
127
+ continue
128
+ conversation_history.append({"role": role, "content": msg.content})
129
+
130
+ tool_input = {
131
+ "user_id": state['user_id'],
132
+ "job_offer_id": state['job_offer_id'],
133
+ "job_description": state['job_description'],
134
+ "conversation_history": conversation_history
135
+ }
136
+
137
+ trigger_interview_analysis.invoke(tool_input)
138
+ return {}
139
+
140
  def _build_graph(self) -> any:
141
  """Construit et compile le graphe d'états."""
142
  tool_node = ToolNode([trigger_interview_analysis])
 
170
  if not langchain_messages:
171
  logging.info("Historique de conversation vide. Ajout d'un message de démarrage interne.")
172
  langchain_messages.append(HumanMessage(content="Bonjour, je suis prêt à commencer l'entretien."))
173
+
174
  initial_state = {
175
  "user_id": self.user_id,
176
  "job_offer_id": self.job_offer_id,
 
183
  if not final_state or not final_state.get('messages'):
184
  logging.error("L'état final est vide ou ne contient pas de messages.")
185
  return {"response": "Erreur: Impossible de générer une réponse.", "status": "finished"}
 
186
  last_message = final_state['messages'][-1]
187
  status = "finished" if hasattr(last_message, 'tool_calls') and last_message.tool_calls else "interviewing"
188
  response_content = last_message.content