Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,6 +6,18 @@ from huggingface_hub import InferenceClient # add to requirements.txt
|
|
| 6 |
|
| 7 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
# --- provider selection (HF serverless text-generation by default; optional Groq) ---
|
| 10 |
def select_model():
|
| 11 |
provider = os.getenv("PROVIDER", "hf").lower()
|
|
@@ -60,8 +72,7 @@ class BasicAgent:
|
|
| 60 |
chat = self.hf.chat_completion(
|
| 61 |
model=model,
|
| 62 |
messages=[{"role": "user", "content": prompt}],
|
| 63 |
-
max_tokens=
|
| 64 |
-
temperature=0.2,
|
| 65 |
)
|
| 66 |
return chat.choices[0].message["content"].strip()
|
| 67 |
raise
|
|
@@ -92,11 +103,8 @@ class BasicAgent:
|
|
| 92 |
if ctx:
|
| 93 |
prompt += f"\nContext:\n{ctx[:2000]}\n"
|
| 94 |
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
for pre in ("final answer:", "answer:", "final:", "prediction:"):
|
| 98 |
-
if ans.lower().startswith(pre): ans = ans[len(pre):].strip()
|
| 99 |
-
return ans
|
| 100 |
|
| 101 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 102 |
"""
|
|
|
|
| 6 |
|
| 7 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 8 |
|
| 9 |
+
def format_final_answer(q: str, raw: str) -> str:
|
| 10 |
+
text = raw.strip().splitlines()[0]
|
| 11 |
+
if "how many" in q.lower():
|
| 12 |
+
m = re.search(r"\d+", text)
|
| 13 |
+
if m:
|
| 14 |
+
return m.group(0)
|
| 15 |
+
# As a safeguard, also strip common wrappers:
|
| 16 |
+
for pre in ("final answer:", "answer:", "final:", "prediction:"):
|
| 17 |
+
if text.lower().startswith(pre):
|
| 18 |
+
text = text[len(pre):].strip()
|
| 19 |
+
return text
|
| 20 |
+
|
| 21 |
# --- provider selection (HF serverless text-generation by default; optional Groq) ---
|
| 22 |
def select_model():
|
| 23 |
provider = os.getenv("PROVIDER", "hf").lower()
|
|
|
|
| 72 |
chat = self.hf.chat_completion(
|
| 73 |
model=model,
|
| 74 |
messages=[{"role": "user", "content": prompt}],
|
| 75 |
+
temperature=0.0, max_tokens=16, top_p=1.0
|
|
|
|
| 76 |
)
|
| 77 |
return chat.choices[0].message["content"].strip()
|
| 78 |
raise
|
|
|
|
| 103 |
if ctx:
|
| 104 |
prompt += f"\nContext:\n{ctx[:2000]}\n"
|
| 105 |
|
| 106 |
+
raw = self._llm(prompt)
|
| 107 |
+
return format_final_answer(question, raw)
|
|
|
|
|
|
|
|
|
|
| 108 |
|
| 109 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 110 |
"""
|