from doc_processor import DocProcessor from contextual_doc_processor import ContextualDocProcessor from embedding_retriever import EmbeddingRetriever from langchain_community.retrievers import BM25Retriever import numpy as np from huggingface_hub import InferenceClient import torch # from sentence_transformers import CrossEncoder import json import os def get_list_dir(DATA_DIR): list_dir = os.listdir(DATA_DIR) list_dir = [d for d in list_dir if os.path.isdir(os.path.join(DATA_DIR, d))] return list_dir def get_data_paths(DATA_DIR, list_dir): list_files = [] for d in list_dir: start_path = os.path.join(DATA_DIR, d) filenames = os.listdir(start_path) filenames = [f for f in filenames if f.endswith(".json")] paths = [os.path.join(start_path, f) for f in filenames] list_files += paths return list_files def get_chunks(DATA_DIR, list_dir, use_context_wtcontext, use_context, PATH_SAVE_CHUNKS, PATH_SAVE_CONTEXT_CHUNKS): chunks = [] LIST_FILES = get_data_paths(DATA_DIR, list_dir) if use_context_wtcontext: doc_process = ContextualDocProcessor(LIST_FILES, PATH_SAVE_CONTEXT_CHUNKS) doc_process.process_data() chunks = doc_process.chunks doc_process = DocProcessor(LIST_FILES, PATH_SAVE_CHUNKS) doc_process.process_data() chunks += doc_process.chunks else: if use_context: doc_process = ContextualDocProcessor(LIST_FILES, PATH_SAVE_CONTEXT_CHUNKS) else: doc_process = DocProcessor(LIST_FILES, PATH_SAVE_CHUNKS) doc_process.process_data() chunks = doc_process.chunks return chunks def process_data(DATA_DIR, PATH_SAVE_CHUNKS, PATH_SAVE_CONTEXT_CHUNKS, use_context_wtcontext, use_context): list_dir = get_list_dir(DATA_DIR) chunks = get_chunks(DATA_DIR, list_dir, use_context_wtcontext, use_context, PATH_SAVE_CHUNKS, PATH_SAVE_CONTEXT_CHUNKS) return list_dir, chunks def add_embedding_retriever(embedding_models, embedding_model_name, path_idx, chunks, device): embedding_models[path_idx] = EmbeddingRetriever(embedding_model_name, path_idx, chunks, device) return embedding_models def add_BM25_retriever(chunks, TOP_K): return BM25Retriever.from_documents(chunks, k=TOP_K) def process_retrievers(embedding_model_names, chunks, TOP_K, use_context, use_context_wtcontext, PATH_IDX, PATH_IDX_CONTEXT, PATH_IDX_CONTEXT_AND_WT, device): embedding_models = {} if embedding_model_names: path_idx = PATH_IDX_CONTEXT_AND_WT if use_context_wtcontext else PATH_IDX_CONTEXT if use_context else PATH_IDX for embedding_model_name in embedding_model_names: embedding_models = add_embedding_retriever(embedding_models, embedding_model_name, path_idx, device) BM25_retriever = add_BM25_retriever(chunks, TOP_K) return embedding_models, BM25_retriever class Agent: def __init__(self, list_dir, chunks, embedding_models, BM25_retriever, TOP_K, reformulation=False, use_HyDE=False, use_HyDE_cut=False, ask_again=False): # self.DATA_DIR = DATA_DIR # self.agent_name = agent_name self.embedding_models = embedding_models self.BM25_retriever = BM25_retriever self.ranks = {} self.current_query = [] if reformulation else "" self.reformulation = reformulation self.use_HyDE = use_HyDE self.use_HyDE_cut = use_HyDE_cut self.ask_again = ask_again self.history = [] # self.device = device # self.semantic_rerank_model = model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2') # self.list_dir = self.get_list_dir(DATA_DIR) self.list_dir = list_dir self.TOP_K = TOP_K self.chunks = chunks # LIST_FILES = self.get_data_paths(DATA_DIR) # if use_context_wtcontext: # doc_process = ContextualDocProcessor(LIST_FILES, PATH_SAVE_CONTEXT_CHUNKS) # doc_process.process_data() # self.chunks = doc_process.chunks # doc_process = DocProcessor(LIST_FILES, PATH_SAVE_CHUNKS) # doc_process.process_data() # self.chunks += doc_process.chunks # else: # if use_context: # doc_process = ContextualDocProcessor(LIST_FILES, PATH_SAVE_CONTEXT_CHUNKS) # else: # doc_process = DocProcessor(LIST_FILES, PATH_SAVE_CHUNKS) # doc_process.process_data() # self.chunks = doc_process.chunks # if embedding_model_names: # path_idx = PATH_IDX_CONTEXT_AND_WT if use_context_wtcontext else PATH_IDX_CONTEXT if use_context else PATH_IDX # for embedding_model_name in embedding_model_names: # self.add_embedding_retriever(embedding_model_name, path_idx) # self.add_BM25_retriever() # def get_list_dir(self, DATA_DIR): # list_dir = os.listdir(DATA_DIR) # list_dir = [d for d in list_dir if os.path.isdir(os.path.join(DATA_DIR, d))] # return list_dir # def get_data_paths(self, DATA_DIR): # list_files = [] # # for d in os.listdir(DATA_DIR): # # if os.path.isdir(os.path.join(DATA_DIR, d)): # for d in self.list_dir: # start_path = os.path.join(DATA_DIR, d) # filenames = os.listdir(start_path) # filenames = [f for f in filenames if f.endswith(".json")] # paths = [os.path.join(start_path, f) for f in filenames] # list_files += paths # return list_files def select_sub_part_of_data(self, query): list_dept = "\n".join(f"- {dept}" for dept in self.list_dir) prompt = f"""Voici une liste de secteur d'activité: {list_dept} Quel est le secteur d'activité qui correspond à la question suivante : {query} Répondre uniquement avec le nom du secteur d'activité. """ response = InferenceClient().chat.completions.create( model="deepseek-ai/DeepSeek-V3-0324", messages=[ { "role": "user", "content": prompt } ], ) dept = response.choices[0].message.content return dept # def add_embedding_retriever(self, embedding_model_name, PATH_IDX): # self.embedding_models[PATH_IDX] = EmbeddingRetriever(embedding_model_name, PATH_IDX, self.chunks, self.device) def ask_a_question(self, query, nb_reformulation=5): self.ranks = {} # self.dept = self.select_sub_part_of_data(query) # print("Dept:",self.dept) # query = query + "\nLe département correspondant à cette question est " + self.dept if not self.reformulation and not self.use_HyDE and not self.use_HyDE_cut: self.current_query = query else: self.current_query = [query] if self.reformulation: self.reformulation_of_the_query(nb_reformulation) if self.use_HyDE: doc_hyde = self.generate_HyDE(query) self.current_query += [doc_hyde] if self.use_HyDE_cut: doc_hyde = self.generate_HyDE_cut(query) self.current_query += [doc_hyde] # print(self.current_query) def generate_HyDE(self, query): prompt = f"Voila une question d'un utilisateur: '{query}'\n\nGénère une réponse hypothétique en français à cette question qui pourrait être présente sur un site d'information avec 1000 caratères maximum." response = InferenceClient().chat.completions.create( model="deepseek-ai/DeepSeek-V3-0324", messages=[ { "role": "user", "content": prompt } ], ) doc_gen = response.choices[0].message.content return doc_gen def generate_HyDE_cut(self, query): prompt = f"Voila une question d'un utilisateur: '{query}'\n\nGénère une réponse hypothétique en français à cette question qui pourrait être présente sur un site d'information." response = InferenceClient().chat.completions.create( model="deepseek-ai/DeepSeek-V3-0324", messages=[ { "role": "user", "content": prompt } ], ) doc_gen = response.choices[0].message.content[:1000] return doc_gen def reformulation_of_the_query(self, nb_reformulation=5): prompt = f"Reformule en français {nb_reformulation} fois la question '{self.current_query[0]}'" response = InferenceClient().chat.completions.create( model="deepseek-ai/DeepSeek-V3-0324", messages=[ { "role": "user", "content": prompt } ], ) queries = response.choices[0].message.content.split('\n') queries = [q for q in queries if "?" in q and not self.current_query[0] in q] self.current_query += queries def retrieve_data_from_embeddings(self): r = {} if self.reformulation or self.use_HyDE or self.use_HyDE_cut: for i, query in enumerate(self.current_query): for name_model, retriever in self.embedding_models.items(): r[str(i)+"-"+name_model] = retriever.retrieve_data(query, self.TOP_K) else: for name_model, retriever in self.embedding_models.items(): r[name_model] = retriever.retrieve_data(self.current_query, self.TOP_K) self.ranks.update(r) # self.add_dict_to_ranks(r) # def add_dict_to_ranks(self, data): # if self.query in self.ranks: # self.ranks.update(data) # else: # self.ranks = data # def add_BM25_retriever(self): # self.BM25_retriever = BM25Retriever.from_documents(self.chunks, k=self.TOP_K) def retrieve_data_from_BM25(self): if self.reformulation or self.use_HyDE or self.use_HyDE_cut: for i, query in enumerate(self.current_query): self.retrieve_query_from_BM25(query, str(i)+"-BM25") else: self.retrieve_query_from_BM25(self.current_query, "BM25") # self.add_dict_to_ranks(query, {'BM25': np.array(idx_bm25)}) def retrieve_query_from_BM25(self, query, rank_name): top_k_docs = self.BM25_retriever.invoke(query) # filtered_chunks = [d for d in top_k_docs if self.dept in d.metadata['source']] # print("Filtered chunks:",len(filtered_chunks)) idx_bm25 = self.get_idx_from_lists(self.chunks, top_k_docs) self.ranks[rank_name] = np.array(idx_bm25) def get_idx_from_lists(self, main_list, sublist): idx_list = [] for sl in sublist: if sl in main_list: idx_list += [main_list.index(sl)] return idx_list def RRF(self, k=60): idx_score = {} for idx_list in self.ranks.values(): for rank, idx in enumerate(idx_list): if idx not in idx_score: idx_score[idx] = 1 / (k + rank) else: idx_score[idx] += 1 / (k + rank) return sorted(idx_score.items(), key=lambda x:x[1], reverse=True) # merge_l = RRF([idx_bm25, [4,149]]) # print(merge_l) def get_chunks_from_rank(self, idx_list): chunks = [] for e in idx_list: id, score = e chunks += [self.chunks[id]] return chunks def create_prompt_with_context(self, rank): chunks_list = self.get_chunks_from_rank(rank) page_contents = [chunk.page_content + "\n\nSource:" + chunk.metadata['source'] for chunk in chunks_list] context = "\n\n".join(page_contents) query = self.current_query[0] if self.reformulation or self.use_HyDE or self.use_HyDE_cut else self.current_query prompt = f"Répondez à la question suivante en utilisant le contexte ci-dessous:\n\nContexte:\n{context}\n\nQuestion: {query}" self.history += [{ "role": "user", "content": prompt, }] return prompt, page_contents def ask_agent(self, cpt=1): query = self.history[-1]['content'] response = InferenceClient().chat_completion( model="deepseek-ai/DeepSeek-V3-0324", messages = self.history, temperature = 0, seed = 0 ) reply = response.choices[0].message.content self.history += [{ "role": "assistant", "content": reply, }] if self.ask_again: good_reply = self.ask_again_agent(cpt) if not good_reply: start = "Peux-tu donner une meilleur réponse à cette question:" if not query.startswith(start): query = f"Peux-tu donner une meilleur réponse à cette question: \n\n {query}" self.ask_a_question(query) self.history += [{ "role": "user", "content": query, }] reply = self.ask_agent(cpt+1) return reply def ask_again_agent(self, cpt=1): prompt = f"Est ce que la réponse donnée est satisfaisante ? Répondre uniquement par Oui ou Non." self.history += [{ "role": "user", "content": prompt, }] response = InferenceClient().chat.completions.create( model="deepseek-ai/DeepSeek-V3-0324", messages = self.history, options = {'temperature': 0} ) reply = response.choices[0].message.content self.history += [{ "role": "assistant", "content": reply, }] if "Oui" in reply or cpt > 4: return True return False # def semantic_rerank(self, chunks, reply): # # chunks_list = self.get_chunks_from_rank(rank) # # page_contents = [chunk.page_content + "\n\nSource:" + chunk.metadata['source'] for chunk in chunks_list] # pairs = [(chunk, reply) for chunk in chunks] # scores = self.semantic_rerank_model.predict(pairs) # ranked = sorted(zip(chunks, scores), key=lambda x: x[1], reverse=True) # return ranked def get_url_from_paths(self, paths): url = [] for p in paths: with open(p) as json_file: data = json.load(json_file) url += [data['url']] return url def get_a_reply(self, query): self.ask_a_question(query) # self.retrieve_data_from_embeddings() self.retrieve_data_from_BM25() rank = self.RRF() prompt, chunks = self.create_prompt_with_context(rank) reply = self.ask_agent() self.retrieve_query_from_BM25(reply, 'source') sources_BM25 = [self.chunks[r].metadata['source'] for r in self.ranks['source']] sources = list({s for s in sources_BM25 if sources_BM25.count(s) > 1}) if not sources: sources = [sources_BM25[0]] sources = self.get_url_from_paths(sources) # print("=== HISTORY ===") # print(self.history) return (reply, sources) # def ask(self, query, PATH_IDX_CONTEXT_AND_WT, PATH_IDX_CONTEXT, PATH_IDX, embedding_model_name, TOP_K): # path_idx = PATH_IDX_CONTEXT_AND_WT if use_context_and_wt else PATH_IDX_CONTEXT if use_context else PATH_IDX # agent.add_embedding_retriever(embedding_model_name, path_idx) # agent.add_BM25_retriever(TOP_K) # agent.ask_a_question(query) # agent.retrieve_data_from_embeddings(TOP_K) # agent.retrieve_data_from_BM25() # # print(agent.ranks) # rank = agent.RRF() # prompt, chunks = agent.create_prompt_with_context(rank) # reply = agent.ask_agent() # agent.retrieve_query_from_BM25(reply, 'source') # sources_BM25 = [agent.chunks[r].metadata['source'] for r in agent.ranks['source']] # sources = list({s for s in sources_BM25 if sources_BM25.count(s) > 1}) # if not sources: # sources = [sources_BM25[0]] # sources = agent.get_url_from_paths(sources) # # print(sources) # return (reply, sources) if __name__ == "__main__": DATA_DIR = "data_websites" PATH_SAVE_CHUNKS = "chunks_saved.json" PATH_SAVE_CONTEXT = "chunks_with_context.json" PATH_IDX = "index_faiss_data_sh" PATH_IDX_CONTEXT = "index_faiss_context_sh" PATH_IDX_CONTEXT_AND_WT= "index_faiss_context_and_wt_sh" # embedding_model_names = ["Geotrend/distilbert-base-en-fr-cased"] embedding_model_names = [] # embedding_model_name = "almanach/camembertav2-base" agent_name = "Geotrend/distilbert-base-en-fr-cased" TOP_K = 10 # query = "Quelles sont les missions du collège Sciences de l’Homme ?" # query = "Comment reprendre des études supérieures sans baccalauréat?" # query = "**Les débouchés après une licence en informatique** \n\nObtenir une licence en informatique ouvre les portes à de nombreux débouchés professionnels excitants ! Voici quelques-uns des postes et domaines où vous pouvez appliquer vos compétences :\n\n1. **Développeur logiciel** : Créez des applications, des jeux vidéo ou des systèmes d'information pour répondre aux besoins des entreprises ou des particuliers.\n2. **Système administrateur** : Gérez les serveurs, les réseaux et les bases de données pour garantir la sécurité et l'intégrité des systèmes informatiques.\n3. **Analyste de système** : Étudiez les besoins des entreprises pour concevoir et améliorer les systèmes d'information, aidant ainsi à optimiser leurs processus métier.\n4. **Consultant en technologies de l'information** : Aidez les entreprises à choisir et à mettre en œuvre les solutions informatiques qui répondent à leurs besoins spécifiques.\n5. **Ingénieur logiciel** : Conceptionnez, développez et testez des logiciels pour répondre aux exigences des industries telles que l'aéronautique, la défense ou la santé.\n6. **Data Scientist** : Analysez et interprétez les données pour aider les entreprises à prendre des décisions éclairées et à améliorer leurs processus métier.\n7. **Conception de produits** : Utilisez vos compétences en informatique pour concevoir et développer des produits innovants, tels que des jeux vidéo, des applications mobiles ou des appareils connectés.\n8. **Gestionnaire de projet** : Planifiez, coordinatez et contrôliez les projets informatiques pour assurer leur réussite et leur livraison dans les délais impartis.\n9. **Formateur en informatique** : Partagez vos connaissances avec d'autres en créant des cours, des tutoriels ou des formations en informatique.\n10. **Entrepreneur** : Fondez votre propre entreprise de services IT ou développez un produit innovant qui répond à un besoin spécifique sur le marché.\n\nEn résumé, une licence en informatique vous offre une grande variété de débouchés professionnels, allant des technologies de l'information aux industries créatives. Les compétences acquises lors de votre formation vous permettront de faire carrière dans les secteurs les plus diversifiés !" # query = "Comment arriver en licence de sociologie?" # query = "L'université de Bordeaux permet aux étudiants de demander la prise en compte de leur prénom d'usage ?" # query = "Comment prendre rendez-vous à la Clinique du droit ?" query = "Comment se déroule la formation \"Accompagner les personnes âgées, comprendre le vieillissement et ses conséquences\" ?" # use_context = True use_context = False # reformulation = True reformulation = False # use_HyDE = True use_HyDE = False # use_HyDE_cut = True use_HyDE_cut = False use_context_wtcontext= True # use_context_wtcontext= False # ask_again = True ask_again = False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # device = 'cpu' # print('Using device:', device) list_dir, chunks = process_data(DATA_DIR, PATH_SAVE_CHUNKS, PATH_SAVE_CONTEXT, use_context_wtcontext, use_context) embedding_models, BM25_retriever = process_retrievers(embedding_model_names, chunks, TOP_K, use_context, use_context_wtcontext, PATH_IDX, PATH_IDX_CONTEXT, PATH_IDX_CONTEXT_AND_WT, device) agent = Agent(list_dir, chunks, embedding_models, BM25_retriever, TOP_K, reformulation, use_HyDE, use_HyDE_cut, ask_again) agent.ask_a_question(query) agent.retrieve_data_from_embeddings() agent.retrieve_data_from_BM25() print(agent.ranks) for name, lr in agent.ranks.items(): print("*******",name,"*******") for r in lr: print(r) # if name == PATH_IDX_CONTEXT: # print(agent.doc_process.context[r]) # else: print(agent.chunks[r]) rank = agent.RRF() prompt, chunks = agent.create_prompt_with_context(rank) reply = agent.ask_agent() print("***************") for diag in agent.history: print(diag) # print(agent.history) print("***************") print(reply) # sources = agent.semantic_rerank(chunks, reply) # print("\nSemantic rerank:") # for chunk, score in sources[:10]: # print(f"Score: {score:.3f} | Chunk: {chunk.split('Source:')[-1]}") print("\nRank retrievers:") for chunk in chunks[:10]: print(f"Chunk: {chunk.split('Source:')[-1]}") agent.retrieve_query_from_BM25(reply, 'source') print("\nBM25 rerank:") for r in agent.ranks['source']: print(agent.chunks[r].metadata['source']) ########## Complete usage ########## # agent = Agent(DATA_DIR, PATH_SAVE_CHUNKS, PATH_SAVE_CONTEXT, agent_name) # agent.add_embedding_retriever(embedding_model_name, PATH_IDX) # agent.add_BM25_retriever(TOP_K) # agent.ask_a_question(query) # agent.retrieve_data_from_embeddings(TOP_K) # agent.retrieve_data_from_BM25() # print(agent.ranks) # for lr in agent.ranks.values(): # print("**************") # for r in lr: # print(r) # print(agent.chunks[r]) # rank = agent.RRF() # prompt = agent.create_prompt_with_context(rank) # reply = agent.ask_agent() # print(reply) #################################### ########## TO DO ########## # Chercher modele agent et le mettre en place : OK # Add history : OK a priori # Améliorer chunks # recursive retriever