Spaces:
Running
Running
| """ | |
| Live monitor of the website statistics and leaderboard. | |
| Dependency: | |
| sudo apt install pkg-config libicu-dev | |
| pip install pytz gradio gdown plotly polyglot pyicu pycld2 tabulate | |
| """ | |
| import argparse | |
| import ast | |
| import pickle | |
| import os | |
| import threading | |
| import time | |
| import gradio as gr | |
| import numpy as np | |
| import pandas as pd | |
| basic_component_values = [None] * 6 | |
| leader_component_values = [None] * 5 | |
| nam_dict = { | |
| "dreamfusion": "DreamFusion", | |
| "mvdream": "MVDream", | |
| "lucid-dreamer": "LucidDreamer", | |
| "magic3d": "Magic3D", | |
| "grm-t": "GRM", "grm-i": "GRM", "grm": "GRM", | |
| "latent-nerf": "Latent-NeRF", | |
| "shap-e-t": "Shap-E", "shap-e-i": "Shap-E", "shap-e": "Shap-E", | |
| "point-e-t": "Point-E", "point-e-i": "Point-E", "point-e": "Point-E", | |
| "sjc": "SJC", | |
| "wonder3d": "Wonder3D", | |
| "openlrm": "OpenLRM", | |
| "sz123": "Stable Zero123", "stable-zero123": "Stable Zero123", | |
| "z123": "Zero123-XL", "zero123-xl": "Zero123-XL", | |
| "magic123": "Magic123", | |
| "lgm": "LGM", | |
| "syncdreamer": "SyncDreamer", | |
| "triplane-gaussian": "TriplaneGaussian", | |
| "escher-net": "EscherNet", | |
| "free3d": "Free3D", | |
| "instant-mesh": "InstantMesh", | |
| } | |
| def replace_model_name(name, rank): | |
| name = nam_dict[name] | |
| if rank==0: | |
| return "๐ฅ "+name | |
| elif rank==1: | |
| return "๐ฅ "+name | |
| elif rank==2: | |
| return '๐ฅ '+name | |
| else: | |
| return name | |
| # def make_leaderboard_md(elo_results): | |
| # leaderboard_md = f""" | |
| # # ๐ Chatbot Arena Leaderboard | |
| # | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | | |
| # This leaderboard is based on the following three benchmarks. | |
| # - [Chatbot Arena](https://lmsys.org/blog/2023-05-03-arena/) - a crowdsourced, randomized battle platform. We use 100K+ user votes to compute Elo ratings. | |
| # - [MT-Bench](https://arxiv.org/abs/2306.05685) - a set of challenging multi-turn questions. We use GPT-4 to grade the model responses. | |
| # - [MMLU](https://arxiv.org/abs/2009.03300) (5-shot) - a test to measure a model's multitask accuracy on 57 tasks. | |
| # ๐ป Code: The Arena Elo ratings are computed by this [notebook]({notebook_url}). The MT-bench scores (single-answer grading on a scale of 10) are computed by [fastchat.llm_judge](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge). The MMLU scores are mostly computed by [InstructEval](https://github.com/declare-lab/instruct-eval). Higher values are better for all benchmarks. Empty cells mean not available. Last updated: November, 2023. | |
| # """ | |
| # return leaderboard_md | |
| def make_leaderboard_md(elo_results): | |
| leaderboard_md = f""" | |
| # ๐ 3DGen-Arena Leaderboard | |
| """ | |
| return leaderboard_md | |
| def make_leaderboard_md_live(elo_results): | |
| leaderboard_md = f""" | |
| # Leaderboard | |
| Last updated: {elo_results["last_updated_datetime"]} | |
| {elo_results["leaderboard_table"]} | |
| """ | |
| return leaderboard_md | |
| def model_hyperlink(model_name, link): | |
| return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>' | |
| def load_leaderboard_table_csv(filename, add_hyperlink=True): | |
| df = pd.read_csv(filename) | |
| df = df.drop(df[df["Key"].isnull()].index) | |
| for col in df.columns: | |
| if "Elo rating" in col: | |
| # print(col, df[col], type(df[col]), df[col] is not np.NaN) | |
| df[col] = df[col].apply(lambda x: int(x) if (x != "-" and pd.notna(x)) else np.NaN) | |
| if add_hyperlink and col == "Model": | |
| df[col] = df.apply(lambda row: model_hyperlink(row[col], row["Link"]), axis=1) | |
| return df | |
| def build_basic_stats_tab(): | |
| empty = "Loading ..." | |
| basic_component_values[:] = [empty, None, empty, empty, empty, empty] | |
| md0 = gr.Markdown(empty) | |
| gr.Markdown("#### Figure 1: Number of model calls and votes") | |
| plot_1 = gr.Plot(show_label=False) | |
| with gr.Row(): | |
| with gr.Column(): | |
| md1 = gr.Markdown(empty) | |
| with gr.Column(): | |
| md2 = gr.Markdown(empty) | |
| with gr.Row(): | |
| with gr.Column(): | |
| md3 = gr.Markdown(empty) | |
| with gr.Column(): | |
| md4 = gr.Markdown(empty) | |
| return [md0, plot_1, md1, md2, md3, md4] | |
| def get_full_table(anony_arena_df, full_arena_df, model_table_df): | |
| values = [] | |
| for i in range(len(model_table_df)): | |
| row = [] | |
| model_key = model_table_df.iloc[i]["Key"] | |
| model_name = model_table_df.iloc[i]["Model"] | |
| # model display name | |
| row.append(model_name) | |
| if model_key in anony_arena_df.index: | |
| idx = anony_arena_df.index.get_loc(model_key) | |
| row.append(round(anony_arena_df.iloc[idx]["rating"])) | |
| else: | |
| row.append(np.nan) | |
| if model_key in full_arena_df.index: | |
| idx = full_arena_df.index.get_loc(model_key) | |
| row.append(round(full_arena_df.iloc[idx]["rating"])) | |
| else: | |
| row.append(np.nan) | |
| # row.append(model_table_df.iloc[i]["MT-bench (score)"]) | |
| # row.append(model_table_df.iloc[i]["Num Battles"]) | |
| # row.append(model_table_df.iloc[i]["MMLU"]) | |
| # Organization | |
| row.append(model_table_df.iloc[i]["Organization"]) | |
| # license | |
| row.append(model_table_df.iloc[i]["License"]) | |
| values.append(row) | |
| values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9) | |
| return values | |
| def get_arena_table(arena_dfs, model_table_df): | |
| # sort by rating | |
| # arena_df = arena_df.sort_values(by=["rating"], ascending=False) | |
| values = [] | |
| for i in range(len(model_table_df)): | |
| row = [] | |
| # model_key = arena_df.index[i] | |
| # model_name = model_table_df[model_table_df["Key"] == model_key]["Model"].values[ | |
| # 0 | |
| # ] | |
| model_name = model_table_df.iloc[i]["Key"] | |
| # rank | |
| row.append(i + 1) | |
| # model display name | |
| row.append(replace_model_name(model_name, i)) | |
| # elo rating | |
| num_battles = 0 | |
| for dim in arena_dfs.keys(): | |
| # try: | |
| # print(arena_dfs[dim].loc[model_name]) | |
| # except: | |
| # continue | |
| row.append(round(arena_dfs[dim].loc[model_name]["rating"], 2)) | |
| upper_diff = round(arena_dfs[dim].loc[model_name]["rating_q975"] - arena_dfs[dim].loc[model_name]["rating"]) | |
| lower_diff = round(arena_dfs[dim].loc[model_name]["rating"] - arena_dfs[dim].loc[model_name]["rating_q025"]) | |
| # row.append(f"+{upper_diff}/-{lower_diff}") | |
| try: | |
| num_battles += round(arena_dfs[dim].loc[model_name]["num_battles"]) | |
| except: | |
| num_battles += 0 | |
| # row.append(round(arena_df.iloc[i]["rating"])) | |
| # upper_diff = round(arena_df.iloc[i]["rating_q975"] - arena_df.iloc[i]["rating"]) | |
| # lower_diff = round(arena_df.iloc[i]["rating"] - arena_df.iloc[i]["rating_q025"]) | |
| # row.append(f"+{upper_diff}/-{lower_diff}") | |
| row.append(round(model_table_df.iloc[i]["Arena Elo rating"], 2)) | |
| # num battles | |
| # row.append(round(arena_df.iloc[i]["num_battles"])) | |
| row.append(num_battles) | |
| # Organization | |
| # row.append( | |
| # model_table_df[model_table_df["Key"] == model_key]["Organization"].values[0] | |
| # ) | |
| # # license | |
| # row.append( | |
| # model_table_df[model_table_df["Key"] == model_key]["License"].values[0] | |
| # ) | |
| values.append(row) | |
| return values | |
| def make_arena_leaderboard_md(elo_results): | |
| total_votes = 0 | |
| for dim in elo_results.keys(): | |
| arena_df = elo_results[dim]["leaderboard_table_df"] | |
| last_updated = elo_results[dim]["last_updated_datetime"] | |
| total_votes += sum(arena_df["num_battles"].fillna(0)) // 2 | |
| total_models = len(arena_df) | |
| leaderboard_md = f""" | |
| Total #models: **{total_models}**. \n | |
| Total #votes: **{int(total_votes)}** (Anonymous Votes only). \n | |
| Last updated: {last_updated}. \n | |
| Contribute the votes ๐ณ๏ธ at [3DGen-Arena](https://huggingface.co/spaces/ZhangYuhan/3DGen-Arena)! | |
| """ | |
| return leaderboard_md | |
| def make_full_leaderboard_md(elo_results): | |
| total_votes = 0 | |
| for dim in elo_results.keys(): | |
| arena_df = elo_results[dim]["leaderboard_table_df"] | |
| last_updated = elo_results[dim]["last_updated_datetime"] | |
| total_votes += sum(arena_df["num_battles"].fillna(0)) // 2 | |
| total_models = len(arena_df) | |
| leaderboard_md = f""" | |
| Total #models: **{total_models}**. \n | |
| Total #votes: **{int(total_votes)}** (Anonymous + Named Votes). \n | |
| Last updated: {last_updated}.\n | |
| Contribute the votes ๐ณ๏ธ at [3DGen-Arena](https://huggingface.co/spaces/ZhangYuhan/3DGen-Arena)! | |
| """ | |
| return leaderboard_md | |
| def build_empty_leaderboard_tab(): | |
| leaderboard_md = f""" | |
| # ๐ณ๏ธ Leaderboard | |
| ## Look forward to your votes, and the leaderboard is coming soon! | |
| """ | |
| gr.Markdown(leaderboard_md, elem_id="leaderboard_markdown") | |
| def build_leaderboard_tab(elo_results_file, leaderboard_table_file, show_plot=False): | |
| if elo_results_file is None: # Do live update | |
| md = "Loading ..." | |
| p1 = p2 = p3 = p4 = None | |
| else: | |
| with open(elo_results_file, "rb") as fin: | |
| elo_results = pickle.load(fin) | |
| # print(elo_results) | |
| # print(elo_results.keys()) | |
| anony_elo_results, full_elo_results = {}, {} | |
| anony_arena_dfs, full_arena_dfs = {}, {} | |
| p1, p2, p3, p4 = {}, {}, {}, {} | |
| for dim in elo_results.keys(): | |
| anony_elo_results[dim] = elo_results[dim]["anony"] | |
| full_elo_results[dim] = elo_results[dim]["full"] | |
| anony_arena_dfs[dim] = anony_elo_results[dim]["leaderboard_table_df"] | |
| full_arena_dfs[dim] = full_elo_results[dim]["leaderboard_table_df"] | |
| p1[dim] = anony_elo_results[dim]["win_fraction_heatmap"] | |
| p2[dim] = anony_elo_results[dim]["battle_count_heatmap"] | |
| p3[dim] = anony_elo_results[dim]["bootstrap_elo_rating"] | |
| p4[dim] = anony_elo_results[dim]["average_win_rate_bar"] | |
| print(anony_arena_dfs[dim]) | |
| print(full_arena_dfs[dim]) | |
| md = make_leaderboard_md(anony_elo_results) | |
| md_1 = gr.Markdown(md, elem_id="leaderboard_markdown") | |
| if leaderboard_table_file: | |
| model_table_df = load_leaderboard_table_csv(leaderboard_table_file) | |
| model_table_df_full = load_leaderboard_table_csv(str(leaderboard_table_file).replace('.csv', '_full.csv')) | |
| with gr.Tabs() as tabs: | |
| # arena table | |
| arena_table_vals = get_arena_table(anony_arena_dfs, model_table_df) | |
| with gr.Tab("Anony. Arena", id=0): | |
| md = make_arena_leaderboard_md(anony_elo_results) | |
| gr.Markdown(md, elem_id="leaderboard_markdown") | |
| gr.Dataframe( | |
| # headers=[ | |
| # "Rank", | |
| # "๐ค Model", | |
| # "โญ Arena Elo", | |
| # "๐ 95% CI", | |
| # "๐ณ๏ธ Votes", | |
| # "Organization", | |
| # "License", | |
| # ], | |
| headers=["Rank", "๐ค Model"] + [f"๐ {dim} Elo" for dim in anony_arena_dfs.keys()] + ["โญ Avg. Arena Elo Ranking", "๐ฎ Votes"], | |
| datatype=[ | |
| "str", | |
| "markdown", | |
| "number", | |
| "number", | |
| "number", | |
| "number", | |
| "number", | |
| "number", | |
| "number" | |
| ], | |
| value=arena_table_vals, | |
| # value=model_table_df, | |
| elem_id="arena_leaderboard_dataframe", | |
| height=700, | |
| column_widths=[50, 200, 100, 100, 100, 100, 100, 100, 100], | |
| wrap=True, | |
| ) | |
| with gr.Tab("Full Arena", id=1): | |
| md = make_full_leaderboard_md(full_elo_results) | |
| gr.Markdown(md, elem_id="leaderboard_markdown") | |
| full_table_vals = get_arena_table(full_arena_dfs, model_table_df_full) | |
| gr.Dataframe( | |
| headers=["Rank", "๐ค Model"] + [f"๐ {dim} Elo" for dim in anony_arena_dfs.keys()] + ["โญ Avg. Arena Elo Ranking", "๐ฎ Votes"], | |
| datatype=[ | |
| "str", | |
| "markdown", | |
| "number", | |
| "number", | |
| "number", | |
| "number", | |
| "number", | |
| "number", | |
| "number" | |
| ], | |
| value=full_table_vals, | |
| elem_id="full_leaderboard_dataframe", | |
| column_widths=[50, 200, 100, 100, 100, 100, 100, 100, 100], | |
| height=700, | |
| wrap=True, | |
| ) | |
| if not show_plot: | |
| gr.Markdown( | |
| """ ## We are still collecting more votes on more models. The ranking will be updated very fruquently. Please stay tuned! | |
| """, | |
| elem_id="leaderboard_markdown", | |
| ) | |
| else: | |
| pass | |
| # leader_component_values[:] = [md, p1, p2, p3, p4] | |
| """ | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown( | |
| "#### Figure 1: Fraction of Model A Wins for All Non-tied A vs. B Battles" | |
| ) | |
| plot_1 = gr.Plot(p1, show_label=False) | |
| with gr.Column(): | |
| gr.Markdown( | |
| "#### Figure 2: Battle Count for Each Combination of Models (without Ties)" | |
| ) | |
| plot_2 = gr.Plot(p2, show_label=False) | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown( | |
| "#### Figure 3: Bootstrap of Elo Estimates (1000 Rounds of Random Sampling)" | |
| ) | |
| plot_3 = gr.Plot(p3, show_label=False) | |
| with gr.Column(): | |
| gr.Markdown( | |
| "#### Figure 4: Average Win Rate Against All Other Models (Assuming Uniform Sampling and No Ties)" | |
| ) | |
| plot_4 = gr.Plot(p4, show_label=False) | |
| """ | |
| from .utils import acknowledgment_md | |
| gr.Markdown(acknowledgment_md) | |
| # return [md_1, plot_1, plot_2, plot_3, plot_4] | |
| return [md_1] | |