Spaces:
Runtime error
Runtime error
Commit
·
2c80bdd
1
Parent(s):
72c8535
Update Index.py
Browse files
Index.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
-
from pydantic import BaseModel
|
| 3 |
|
| 4 |
# from transformers import pipeline
|
| 5 |
from txtai.embeddings import Embeddings
|
|
@@ -227,11 +227,11 @@ def _search(query, extractor, question=None):
|
|
| 227 |
return extractor([("answer", query, _prompt(question), False)])[0][1]
|
| 228 |
|
| 229 |
|
| 230 |
-
class ModelOutputEvaluate(BaseModel):
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
|
| 236 |
class BasePromptContext:
|
| 237 |
def __init__(self):
|
|
@@ -271,11 +271,11 @@ Please provide your grading for the correctness and explain you gave the particu
|
|
| 271 |
|
| 272 |
|
| 273 |
class Evaluater:
|
| 274 |
-
def __init__(self, item
|
| 275 |
-
self.question = item
|
| 276 |
-
self.answer = item
|
| 277 |
-
self.domain = item
|
| 278 |
-
self.context = item
|
| 279 |
self.llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000})
|
| 280 |
|
| 281 |
def get_prompt_template(self):
|
|
@@ -291,8 +291,7 @@ class Evaluater:
|
|
| 291 |
return score
|
| 292 |
|
| 293 |
# Create extractor instance
|
| 294 |
-
|
| 295 |
-
async def create_evaluation_scenario(item: ModelOutputEvaluate):
|
| 296 |
output = {
|
| 297 |
"input": item,
|
| 298 |
"score" : Evaluater(item).evaluate()
|
|
@@ -301,7 +300,7 @@ async def create_evaluation_scenario(item: ModelOutputEvaluate):
|
|
| 301 |
|
| 302 |
|
| 303 |
@app.get("/rag")
|
| 304 |
-
def rag(domain: str, question: str):
|
| 305 |
print()
|
| 306 |
db_exists = _check_if_db_exists(db_path=f"{os.getcwd()}/index/{domain}/documents")
|
| 307 |
print(db_exists)
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
+
#from pydantic import BaseModel
|
| 3 |
|
| 4 |
# from transformers import pipeline
|
| 5 |
from txtai.embeddings import Embeddings
|
|
|
|
| 227 |
return extractor([("answer", query, _prompt(question), False)])[0][1]
|
| 228 |
|
| 229 |
|
| 230 |
+
# class ModelOutputEvaluate(BaseModel):
|
| 231 |
+
# question: str
|
| 232 |
+
# answer: str
|
| 233 |
+
# domain: str
|
| 234 |
+
# context: str
|
| 235 |
|
| 236 |
class BasePromptContext:
|
| 237 |
def __init__(self):
|
|
|
|
| 271 |
|
| 272 |
|
| 273 |
class Evaluater:
|
| 274 |
+
def __init__(self, item):
|
| 275 |
+
self.question = item["question"]
|
| 276 |
+
self.answer = item["answer"]
|
| 277 |
+
self.domain = item["domain"]
|
| 278 |
+
self.context = item["context"]
|
| 279 |
self.llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000})
|
| 280 |
|
| 281 |
def get_prompt_template(self):
|
|
|
|
| 291 |
return score
|
| 292 |
|
| 293 |
# Create extractor instance
|
| 294 |
+
def _create_evaluation_scenario(item):
|
|
|
|
| 295 |
output = {
|
| 296 |
"input": item,
|
| 297 |
"score" : Evaluater(item).evaluate()
|
|
|
|
| 300 |
|
| 301 |
|
| 302 |
@app.get("/rag")
|
| 303 |
+
def rag(domain: str, question: str, evaluate: bool):
|
| 304 |
print()
|
| 305 |
db_exists = _check_if_db_exists(db_path=f"{os.getcwd()}/index/{domain}/documents")
|
| 306 |
print(db_exists)
|