Spaces:
Running
Running
Commit
·
4a799a0
1
Parent(s):
1c03709
remove inference api checkbox
Browse files- app_text_classification.py +1 -8
- run_jobs.py +1 -6
- text_classification_ui_helpers.py +5 -7
app_text_classification.py
CHANGED
|
@@ -94,8 +94,6 @@ def get_demo():
|
|
| 94 |
|
| 95 |
with gr.Accordion(label="Model Wrap Advanced Config", open=True):
|
| 96 |
gr.HTML(USE_INFERENCE_API_TIP)
|
| 97 |
-
|
| 98 |
-
run_inference = gr.Checkbox(value=True, label="Run with Inference API")
|
| 99 |
inference_token = gr.Textbox(
|
| 100 |
placeholder="hf_xxxxxxxxxxxxxxxxxxxx",
|
| 101 |
value="",
|
|
@@ -146,7 +144,7 @@ def get_demo():
|
|
| 146 |
logs = gr.Textbox(
|
| 147 |
value=CHECK_LOG_SECTION_RAW,
|
| 148 |
label="Log",
|
| 149 |
-
visible=
|
| 150 |
every=0.5,
|
| 151 |
)
|
| 152 |
|
|
@@ -233,7 +231,6 @@ def get_demo():
|
|
| 233 |
dataset_config_input,
|
| 234 |
dataset_split_input,
|
| 235 |
uid_label,
|
| 236 |
-
run_inference,
|
| 237 |
inference_token,
|
| 238 |
],
|
| 239 |
outputs=[
|
|
@@ -257,7 +254,6 @@ def get_demo():
|
|
| 257 |
dataset_id_input,
|
| 258 |
dataset_config_input,
|
| 259 |
dataset_split_input,
|
| 260 |
-
run_inference,
|
| 261 |
inference_token,
|
| 262 |
uid_label,
|
| 263 |
],
|
|
@@ -274,14 +270,12 @@ def get_demo():
|
|
| 274 |
|
| 275 |
gr.on(
|
| 276 |
triggers=[
|
| 277 |
-
run_inference.input,
|
| 278 |
inference_token.input,
|
| 279 |
scanners.input,
|
| 280 |
],
|
| 281 |
fn=enable_run_btn,
|
| 282 |
inputs=[
|
| 283 |
uid_label,
|
| 284 |
-
run_inference,
|
| 285 |
inference_token,
|
| 286 |
model_id_input,
|
| 287 |
dataset_id_input,
|
|
@@ -296,7 +290,6 @@ def get_demo():
|
|
| 296 |
fn=enable_run_btn,
|
| 297 |
inputs=[
|
| 298 |
uid_label,
|
| 299 |
-
run_inference,
|
| 300 |
inference_token,
|
| 301 |
model_id_input,
|
| 302 |
dataset_id_input,
|
|
|
|
| 94 |
|
| 95 |
with gr.Accordion(label="Model Wrap Advanced Config", open=True):
|
| 96 |
gr.HTML(USE_INFERENCE_API_TIP)
|
|
|
|
|
|
|
| 97 |
inference_token = gr.Textbox(
|
| 98 |
placeholder="hf_xxxxxxxxxxxxxxxxxxxx",
|
| 99 |
value="",
|
|
|
|
| 144 |
logs = gr.Textbox(
|
| 145 |
value=CHECK_LOG_SECTION_RAW,
|
| 146 |
label="Log",
|
| 147 |
+
visible=False,
|
| 148 |
every=0.5,
|
| 149 |
)
|
| 150 |
|
|
|
|
| 231 |
dataset_config_input,
|
| 232 |
dataset_split_input,
|
| 233 |
uid_label,
|
|
|
|
| 234 |
inference_token,
|
| 235 |
],
|
| 236 |
outputs=[
|
|
|
|
| 254 |
dataset_id_input,
|
| 255 |
dataset_config_input,
|
| 256 |
dataset_split_input,
|
|
|
|
| 257 |
inference_token,
|
| 258 |
uid_label,
|
| 259 |
],
|
|
|
|
| 270 |
|
| 271 |
gr.on(
|
| 272 |
triggers=[
|
|
|
|
| 273 |
inference_token.input,
|
| 274 |
scanners.input,
|
| 275 |
],
|
| 276 |
fn=enable_run_btn,
|
| 277 |
inputs=[
|
| 278 |
uid_label,
|
|
|
|
| 279 |
inference_token,
|
| 280 |
model_id_input,
|
| 281 |
dataset_id_input,
|
|
|
|
| 290 |
fn=enable_run_btn,
|
| 291 |
inputs=[
|
| 292 |
uid_label,
|
|
|
|
| 293 |
inference_token,
|
| 294 |
model_id_input,
|
| 295 |
dataset_id_input,
|
run_jobs.py
CHANGED
|
@@ -50,7 +50,6 @@ def prepare_env_and_get_command(
|
|
| 50 |
d_id,
|
| 51 |
config,
|
| 52 |
split,
|
| 53 |
-
inference,
|
| 54 |
inference_token,
|
| 55 |
uid,
|
| 56 |
label_mapping,
|
|
@@ -60,10 +59,6 @@ def prepare_env_and_get_command(
|
|
| 60 |
if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
|
| 61 |
leaderboard_dataset = LEADERBOARD
|
| 62 |
|
| 63 |
-
inference_type = "hf_pipeline"
|
| 64 |
-
if inference and inference_token:
|
| 65 |
-
inference_type = "hf_inference_api"
|
| 66 |
-
|
| 67 |
executable = "giskard_scanner"
|
| 68 |
try:
|
| 69 |
# Copy the current requirements (might be changed)
|
|
@@ -100,7 +95,7 @@ def prepare_env_and_get_command(
|
|
| 100 |
"--scan_config",
|
| 101 |
get_submitted_yaml_path(uid),
|
| 102 |
"--inference_type",
|
| 103 |
-
|
| 104 |
"--inference_api_token",
|
| 105 |
inference_token,
|
| 106 |
]
|
|
|
|
| 50 |
d_id,
|
| 51 |
config,
|
| 52 |
split,
|
|
|
|
| 53 |
inference_token,
|
| 54 |
uid,
|
| 55 |
label_mapping,
|
|
|
|
| 59 |
if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
|
| 60 |
leaderboard_dataset = LEADERBOARD
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
executable = "giskard_scanner"
|
| 63 |
try:
|
| 64 |
# Copy the current requirements (might be changed)
|
|
|
|
| 95 |
"--scan_config",
|
| 96 |
get_submitted_yaml_path(uid),
|
| 97 |
"--inference_type",
|
| 98 |
+
"hf_inference_api",
|
| 99 |
"--inference_api_token",
|
| 100 |
inference_token,
|
| 101 |
]
|
text_classification_ui_helpers.py
CHANGED
|
@@ -252,7 +252,6 @@ def align_columns_and_show_prediction(
|
|
| 252 |
dataset_config,
|
| 253 |
dataset_split,
|
| 254 |
uid,
|
| 255 |
-
run_inference,
|
| 256 |
inference_token,
|
| 257 |
):
|
| 258 |
model_id = strip_model_id_from_url(model_id)
|
|
@@ -347,7 +346,7 @@ def align_columns_and_show_prediction(
|
|
| 347 |
gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
|
| 348 |
gr.update(value=prediction_response, visible=True),
|
| 349 |
gr.update(visible=True, open=True),
|
| 350 |
-
gr.update(interactive=(
|
| 351 |
"",
|
| 352 |
*column_mappings,
|
| 353 |
)
|
|
@@ -357,7 +356,7 @@ def align_columns_and_show_prediction(
|
|
| 357 |
gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
|
| 358 |
gr.update(value=prediction_response, visible=True),
|
| 359 |
gr.update(visible=True, open=False),
|
| 360 |
-
gr.update(interactive=(
|
| 361 |
"",
|
| 362 |
*column_mappings,
|
| 363 |
)
|
|
@@ -375,8 +374,8 @@ def check_column_mapping_keys_validity(all_mappings):
|
|
| 375 |
|
| 376 |
return True
|
| 377 |
|
| 378 |
-
def enable_run_btn(uid,
|
| 379 |
-
if
|
| 380 |
logger.warn("Inference API is not enabled")
|
| 381 |
return gr.update(interactive=False)
|
| 382 |
if model_id == "" or dataset_id == "" or dataset_config == "" or dataset_split == "":
|
|
@@ -422,7 +421,7 @@ def show_hf_token_info(token):
|
|
| 422 |
return gr.update(visible=True)
|
| 423 |
return gr.update(visible=False)
|
| 424 |
|
| 425 |
-
def try_submit(m_id, d_id, config, split,
|
| 426 |
all_mappings = read_column_mapping(uid)
|
| 427 |
if not check_column_mapping_keys_validity(all_mappings):
|
| 428 |
return (gr.update(interactive=True), gr.update(visible=False))
|
|
@@ -440,7 +439,6 @@ def try_submit(m_id, d_id, config, split, inference, inference_token, uid):
|
|
| 440 |
d_id,
|
| 441 |
config,
|
| 442 |
split,
|
| 443 |
-
inference,
|
| 444 |
inference_token,
|
| 445 |
uid,
|
| 446 |
label_mapping,
|
|
|
|
| 252 |
dataset_config,
|
| 253 |
dataset_split,
|
| 254 |
uid,
|
|
|
|
| 255 |
inference_token,
|
| 256 |
):
|
| 257 |
model_id = strip_model_id_from_url(model_id)
|
|
|
|
| 346 |
gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
|
| 347 |
gr.update(value=prediction_response, visible=True),
|
| 348 |
gr.update(visible=True, open=True),
|
| 349 |
+
gr.update(interactive=(inference_token != "")),
|
| 350 |
"",
|
| 351 |
*column_mappings,
|
| 352 |
)
|
|
|
|
| 356 |
gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
|
| 357 |
gr.update(value=prediction_response, visible=True),
|
| 358 |
gr.update(visible=True, open=False),
|
| 359 |
+
gr.update(interactive=(inference_token != "")),
|
| 360 |
"",
|
| 361 |
*column_mappings,
|
| 362 |
)
|
|
|
|
| 374 |
|
| 375 |
return True
|
| 376 |
|
| 377 |
+
def enable_run_btn(uid, inference_token, model_id, dataset_id, dataset_config, dataset_split):
|
| 378 |
+
if inference_token == "":
|
| 379 |
logger.warn("Inference API is not enabled")
|
| 380 |
return gr.update(interactive=False)
|
| 381 |
if model_id == "" or dataset_id == "" or dataset_config == "" or dataset_split == "":
|
|
|
|
| 421 |
return gr.update(visible=True)
|
| 422 |
return gr.update(visible=False)
|
| 423 |
|
| 424 |
+
def try_submit(m_id, d_id, config, split, inference_token, uid):
|
| 425 |
all_mappings = read_column_mapping(uid)
|
| 426 |
if not check_column_mapping_keys_validity(all_mappings):
|
| 427 |
return (gr.update(interactive=True), gr.update(visible=False))
|
|
|
|
| 439 |
d_id,
|
| 440 |
config,
|
| 441 |
split,
|
|
|
|
| 442 |
inference_token,
|
| 443 |
uid,
|
| 444 |
label_mapping,
|