Spaces:
Runtime error
Runtime error
| import requests | |
| import json | |
| import re | |
| from urllib.parse import quote | |
| def extract_between_tags(text, start_tag, end_tag): | |
| start_index = text.find(start_tag) | |
| end_index = text.find(end_tag, start_index) | |
| return text[start_index+len(start_tag):end_index-len(end_tag)] | |
| class VectaraQuery(): | |
| def __init__(self, api_key: str, customer_id: int, corpus_ids: list): | |
| self.customer_id = customer_id | |
| self.corpus_ids = corpus_ids | |
| self.api_key = api_key | |
| self.conv_id = None | |
| def submit_query(self, query_str: str): | |
| corpora_key_list = [{ | |
| 'customer_id': str(self.customer_id), 'corpus_id': str(corpus_id), 'lexical_interpolation_config': {'lambda': 0.025} | |
| } for corpus_id in self.corpus_ids | |
| ] | |
| endpoint = f"https://api.vectara.io/v1/query" | |
| start_tag = "%START_SNIPPET%" | |
| end_tag = "%END_SNIPPET%" | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Accept": "application/json", | |
| "customer-id": str(self.customer_id), | |
| "x-api-key": self.api_key, | |
| "grpc-timeout": "60S" | |
| } | |
| body = { | |
| 'query': [ | |
| { | |
| 'query': query_str, | |
| 'start': 0, | |
| 'numResults': 50, | |
| 'corpusKey': corpora_key_list, | |
| 'context_config': { | |
| 'sentences_before': 2, | |
| 'sentences_after': 2, | |
| 'start_tag': start_tag, | |
| 'end_tag': end_tag, | |
| }, | |
| 'rerankingConfig': | |
| { | |
| 'rerankerId': 272725718, | |
| 'mmrConfig': { | |
| 'diversityBias': 0.3 | |
| } | |
| }, | |
| 'summary': [ | |
| { | |
| 'responseLang': 'eng', | |
| 'maxSummarizedResults': 5, | |
| # 'summarizerPromptName': 'vectara-experimental-summary-ext-2023-12-11-sml', | |
| 'chat': { | |
| 'store': True, | |
| 'conversationId': self.conv_id | |
| }, | |
| # 'debug': True, | |
| } | |
| ] | |
| } | |
| ] | |
| } | |
| response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=headers) | |
| if response.status_code != 200: | |
| print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}") | |
| return "Sorry, I'm experiencing an error. Please report this and try again later." | |
| res = response.json() | |
| top_k = 10 | |
| summary = res['responseSet'][0]['summary'][0]['text'] | |
| responses = res['responseSet'][0]['response'][:top_k] | |
| docs = res['responseSet'][0]['document'] | |
| chat = res['responseSet'][0]['summary'][0]['chat'] | |
| # if len(chat['status']) <= 0: | |
| # st_code = chat['status'] | |
| # print(f"Chat query failed with code {st_code}") | |
| # if st_code == 'RESOURCE_EXHAUSTED': | |
| # self.conv_id = None | |
| # return 'Sorry, Vectara chat turns exceeds plan limit.' | |
| # return 'Sorry, something went wrong in my brain. Please try again later.' | |
| try: | |
| self.conv_id = res['responseSet'][0]['summary'][0]['chat']['conversationId'] | |
| except (TypeError): | |
| return "I'm sorry. I am experiencing an error in Vectara API conversationId assignment" | |
| pattern = r'\[\d{1,2}\]' | |
| matches = [match.span() for match in re.finditer(pattern, summary)] | |
| # figure out unique list of references | |
| refs = [] | |
| for match in matches: | |
| start, end = match | |
| response_num = int(summary[start+1:end-1]) | |
| doc_num = responses[response_num-1]['documentIndex'] | |
| metadata = {item['name']: item['value'] for item in docs[doc_num]['metadata']} | |
| text = extract_between_tags(responses[response_num-1]['text'], start_tag, end_tag) | |
| # for source citing url = f"{metadata['url']}#:~:text={quote(text)}" | |
| # if url not in refs: | |
| # refs.append(url) | |
| # replace references with markdown links | |
| # refs_dict = {url:(inx+1) for inx,url in enumerate(refs)} | |
| for match in reversed(matches): | |
| start, end = match | |
| response_num = int(summary[start+1:end-1]) | |
| doc_num = responses[response_num-1]['documentIndex'] | |
| metadata = {item['name']: item['value'] for item in docs[doc_num]['metadata']} | |
| text = extract_between_tags(responses[response_num-1]['text'], start_tag, end_tag) | |
| # for source citing url = f"{metadata['url']}#:~:text={quote(text)}" | |
| # citation_inx = refs_dict[url] | |
| summary = summary[:start] + summary[end:] | |
| return summary | |