Commit
·
5b5ee28
1
Parent(s):
a99fd50
Removed pycache (hopefully), and fixed assay names
Browse files- .gitignore +2 -0
- __pycache__/about.cpython-311.pyc +0 -0
- __pycache__/evaluation.cpython-311.pyc +0 -0
- __pycache__/submit.cpython-311.pyc +0 -0
- __pycache__/utils.cpython-311.pyc +0 -0
- __pycache__/visualize.cpython-311.pyc +0 -0
- about.py +15 -1
- app.py +7 -6
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
__pycache__/about.cpython-311.pyc
DELETED
|
Binary file (767 Bytes)
|
|
|
__pycache__/evaluation.cpython-311.pyc
DELETED
|
Binary file (481 Bytes)
|
|
|
__pycache__/submit.cpython-311.pyc
DELETED
|
Binary file (4.06 kB)
|
|
|
__pycache__/utils.cpython-311.pyc
DELETED
|
Binary file (3.2 kB)
|
|
|
__pycache__/visualize.cpython-311.pyc
DELETED
|
Binary file (679 Bytes)
|
|
|
about.py
CHANGED
|
@@ -1,7 +1,21 @@
|
|
| 1 |
import os
|
| 2 |
from huggingface_hub import HfApi
|
| 3 |
|
| 4 |
-
ASSAY_LIST = ["AC-SINS_pH7.4", "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
TOKEN = os.environ.get("HF_TOKEN")
|
| 6 |
CACHE_PATH=os.getenv("HF_HOME", ".")
|
| 7 |
API = HfApi(token=TOKEN)
|
|
|
|
| 1 |
import os
|
| 2 |
from huggingface_hub import HfApi
|
| 3 |
|
| 4 |
+
ASSAY_LIST = ["AC-SINS_pH7.4", "PR_CHO", "HIC", "Tm2", "Titer"]
|
| 5 |
+
ASSAY_RENAME = {
|
| 6 |
+
"AC-SINS_pH7.4": "Self-association",
|
| 7 |
+
"PR_CHO": "Polyreactivity",
|
| 8 |
+
"HIC": "Hydrophobicity",
|
| 9 |
+
"Tm2": "Thermostability",
|
| 10 |
+
"Titer": "Titer",
|
| 11 |
+
}
|
| 12 |
+
ASSAY_EMOJIS = {
|
| 13 |
+
"AC-SINS_pH7.4": "🧲",
|
| 14 |
+
"PR_CHO": "🎯",
|
| 15 |
+
"HIC": "💧",
|
| 16 |
+
"Tm2": "🌡️",
|
| 17 |
+
"Titer": "🧪",
|
| 18 |
+
}
|
| 19 |
TOKEN = os.environ.get("HF_TOKEN")
|
| 20 |
CACHE_PATH=os.getenv("HF_HOME", ".")
|
| 21 |
API = HfApi(token=TOKEN)
|
app.py
CHANGED
|
@@ -7,7 +7,7 @@ from gradio_leaderboard import Leaderboard
|
|
| 7 |
from evaluation import evaluate_problem
|
| 8 |
|
| 9 |
from utils import read_submission_from_hub, write_results
|
| 10 |
-
from about import ASSAY_LIST
|
| 11 |
|
| 12 |
def evaluate_boundary(filename):
|
| 13 |
print(filename)
|
|
@@ -36,8 +36,9 @@ def get_leaderboard_table(assay: str | None = None):
|
|
| 36 |
# to_show['user'] = to_show['user'].apply(lambda x: make_user_clickable(x)).astype(str)
|
| 37 |
|
| 38 |
# Previously hosted on HF hub, local for now
|
| 39 |
-
column_order = ["model", "feature", "
|
| 40 |
df = pd.read_csv("data/metrics_all.csv")
|
|
|
|
| 41 |
if assay is not None:
|
| 42 |
df = df[df['assay'] == assay]
|
| 43 |
df = df[column_order]
|
|
@@ -48,11 +49,11 @@ def get_leaderboard_object(assay: str | None = None):
|
|
| 48 |
df = get_leaderboard_table(assay=assay)
|
| 49 |
filter_columns = ["model"]
|
| 50 |
if assay is None:
|
| 51 |
-
filter_columns.append("
|
| 52 |
Leaderboard(
|
| 53 |
value=df,
|
| 54 |
datatype=["str", "str", "str", "number"],
|
| 55 |
-
select_columns=["model", "
|
| 56 |
search_columns=["model"],
|
| 57 |
# hide_columns=["spearman_abs"],
|
| 58 |
filter_columns=filter_columns,
|
|
@@ -77,8 +78,8 @@ with gr.Blocks() as demo:
|
|
| 77 |
|
| 78 |
# Procedurally make these 5 tabs
|
| 79 |
for assay in ASSAY_LIST:
|
| 80 |
-
with gr.TabItem(assay, elem_id=f"abdev-benchmark-tab-table"):
|
| 81 |
-
gr.Markdown(f"# {assay}")
|
| 82 |
get_leaderboard_object(assay=assay)
|
| 83 |
|
| 84 |
with gr.TabItem("❔About", elem_id="abdev-benchmark-tab-table"):
|
|
|
|
| 7 |
from evaluation import evaluate_problem
|
| 8 |
|
| 9 |
from utils import read_submission_from_hub, write_results
|
| 10 |
+
from about import ASSAY_LIST, ASSAY_RENAME, ASSAY_EMOJIS
|
| 11 |
|
| 12 |
def evaluate_boundary(filename):
|
| 13 |
print(filename)
|
|
|
|
| 36 |
# to_show['user'] = to_show['user'].apply(lambda x: make_user_clickable(x)).astype(str)
|
| 37 |
|
| 38 |
# Previously hosted on HF hub, local for now
|
| 39 |
+
column_order = ["model", "feature", "property", "spearman", "spearman_abs"] # "assay",
|
| 40 |
df = pd.read_csv("data/metrics_all.csv")
|
| 41 |
+
df["property"] = df["assay"].map(ASSAY_RENAME)
|
| 42 |
if assay is not None:
|
| 43 |
df = df[df['assay'] == assay]
|
| 44 |
df = df[column_order]
|
|
|
|
| 49 |
df = get_leaderboard_table(assay=assay)
|
| 50 |
filter_columns = ["model"]
|
| 51 |
if assay is None:
|
| 52 |
+
filter_columns.append("property")
|
| 53 |
Leaderboard(
|
| 54 |
value=df,
|
| 55 |
datatype=["str", "str", "str", "number"],
|
| 56 |
+
select_columns=["model", "feature", "property", "spearman"],
|
| 57 |
search_columns=["model"],
|
| 58 |
# hide_columns=["spearman_abs"],
|
| 59 |
filter_columns=filter_columns,
|
|
|
|
| 78 |
|
| 79 |
# Procedurally make these 5 tabs
|
| 80 |
for assay in ASSAY_LIST:
|
| 81 |
+
with gr.TabItem(f"{ASSAY_EMOJIS[assay]} {ASSAY_RENAME[assay]}", elem_id=f"abdev-benchmark-tab-table"):
|
| 82 |
+
gr.Markdown(f"# {ASSAY_RENAME[assay]} (measured by {assay})")
|
| 83 |
get_leaderboard_object(assay=assay)
|
| 84 |
|
| 85 |
with gr.TabItem("❔About", elem_id="abdev-benchmark-tab-table"):
|