Spaces:
Runtime error
Runtime error
| import os | |
| import time | |
| from huggingface_hub import HfApi, HfFileSystem | |
| import time | |
| import pandas as pd | |
| import threading | |
| import gradio as gr | |
| from gradio_space_ci import enable_space_ci | |
| from functions import commit | |
| enable_space_ci() | |
| HF_TOKEN = os.getenv('HF_TOKEN') | |
| BOT_HF_TOKEN = os.getenv('BOT_HF_TOKEN') | |
| api = HfApi() | |
| fs = HfFileSystem() | |
| def refresh(how_much=43200): # default to 12 hour | |
| time.sleep(how_much) | |
| try: | |
| api.restart_space(repo_id="Weyaxi/leaderboard-results-to-modelcard") | |
| except Exception as e: | |
| print(f"Error while scraping leaderboard, trying again... {e}") | |
| refresh(600) # 10 minutes if any error happens | |
| gradio_title="🧐 Open LLM Leaderboard Results PR Opener" | |
| gradio_desc= """🎯 This tool's aim is to provide [Open LLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) results in the model card. | |
| ## 💭 What Does This Tool Do: | |
| - This tool adds the [Open LLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) result of your model at the end of your model card. | |
| - This tool also adds evaluation results as your model's metadata to showcase the evaluation results as a widget. | |
| ## 🛠️ Backend | |
| The leaderboard's backend mainly runs on the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api). | |
| ## 🤝 Acknowledgements | |
| - Special thanks to [Clémentine Fourrier (clefourrier)](https://huggingface.co/clefourrier) for her help and contributions to the code. | |
| - Special thanks to [Lucain Pouget (Wauplin)](https://huggingface.co/Wauplin) for assisting with the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api). | |
| """ | |
| with gr.Blocks() as demo: | |
| gr.HTML(f"""<h1 align="center" id="space-title">{gradio_title}</h1>""") | |
| gr.HTML(f"""<h1 align="center" id="space-title" style="font-weight: bold; font-size: 40px; color: red;">IMPORTANT NOTE</h1>""") | |
| gr.HTML(f"""<h1 align="center" id="space-title" style="font-weight: bold; font-size: 25px; color: red;"> | |
| Open LLM Leaderboard was <a href="https://x.com/clefourrier/status/1900280339613860057">retired</a> on March 13, 2025. This space will likely be retired soon as well!</h1>""") | |
| gr.Markdown(gradio_desc) | |
| with gr.Row(equal_height=False): | |
| with gr.Column(): | |
| model_id = gr.Textbox(label="Model ID or URL", lines=1) | |
| gr.LoginButton() | |
| with gr.Column(): | |
| output = gr.Textbox(label="Output", lines=1) | |
| submit_btn = gr.Button("Submit", variant="primary") | |
| def validate_model_id(input_id, oauth_token: gr.OAuthToken): | |
| if "mattshumer/" in input_id: | |
| gr.Error("DON'T, DON'T SPAM THAT MODELS") | |
| return "DON'T, DON'T SPAM THAT MODELS" | |
| else: | |
| return commit(input_id, oauth_token=oauth_token.token) | |
| submit_btn.click(validate_model_id, model_id, output) | |
| threading.Thread(target=refresh).start() | |
| demo.launch() |