Important
Collection
5 items
•
Updated
•
1
model_id
stringlengths 16
38
| vram
float64 0
4.97k
| scripts
listlengths 0
2
| code_urls
listlengths 0
2
| execution_urls
listlengths 0
2
|
|---|---|---|---|---|
PaddlePaddle/PaddleOCR-VL
| 2.32
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # See https://www.paddleocr.ai/latest/version3.x/pipeline_usage/PaddleOCR-VL.html to installation\n \n from paddleocr import PaddleOCRVL\n pipeline = PaddleOCRVL()\n output = pipeline.predict(\"path/to/document_image.png\")\n for res in output:\n \tres.print()\n \tres.save_to_json(save_path=\"output\")\n \tres.save_to_markdown(save_path=\"output\")\n with open('PaddlePaddle_PaddleOCR-VL_0.txt', 'w') as f:\n f.write('Everything was good in PaddlePaddle_PaddleOCR-VL_0.txt')\nexcept Exception as e:\n with open('PaddlePaddle_PaddleOCR-VL_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PaddlePaddle_PaddleOCR-VL_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='PaddlePaddle_PaddleOCR-VL_0.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/PaddlePaddle_PaddleOCR-VL_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/PaddlePaddle_PaddleOCR-VL_0.txt"
] |
nvidia/omnivinci
| 0
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"feature-extraction\", model=\"nvidia/omnivinci\", trust_remote_code=True)\n with open('nvidia_omnivinci_0.txt', 'w') as f:\n f.write('Everything was good in nvidia_omnivinci_0.txt')\nexcept Exception as e:\n with open('nvidia_omnivinci_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_omnivinci_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='nvidia_omnivinci_0.txt',\n repo_type='dataset',\n )",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"nvidia/omnivinci\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('nvidia_omnivinci_1.txt', 'w') as f:\n f.write('Everything was good in nvidia_omnivinci_1.txt')\nexcept Exception as e:\n with open('nvidia_omnivinci_1.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_omnivinci_1.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='nvidia_omnivinci_1.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/nvidia_omnivinci_0.py",
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/nvidia_omnivinci_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/nvidia_omnivinci_0.txt",
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/nvidia_omnivinci_1.txt"
] |
deepseek-ai/DeepSeek-OCR
| 8.08
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('deepseek-ai_DeepSeek-OCR_0.txt', 'w') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-OCR_0.txt')\nexcept Exception as e:\n with open('deepseek-ai_DeepSeek-OCR_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-OCR_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-OCR_0.txt',\n repo_type='dataset',\n )"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] |
inclusionAI/LLaDA2.0-flash-preview
| 249.14
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/LLaDA2.0-flash-preview\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_LLaDA2.0-flash-preview_0.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_LLaDA2.0-flash-preview_0.txt')\nexcept Exception as e:\n with open('inclusionAI_LLaDA2.0-flash-preview_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_LLaDA2.0-flash-preview_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_LLaDA2.0-flash-preview_0.txt',\n repo_type='dataset',\n )",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/LLaDA2.0-flash-preview\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_LLaDA2.0-flash-preview_1.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_LLaDA2.0-flash-preview_1.txt')\nexcept Exception as e:\n with open('inclusionAI_LLaDA2.0-flash-preview_1.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_LLaDA2.0-flash-preview_1.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_LLaDA2.0-flash-preview_1.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_LLaDA2.0-flash-preview_0.py",
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_LLaDA2.0-flash-preview_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_LLaDA2.0-flash-preview_0.txt",
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_LLaDA2.0-flash-preview_1.txt"
] |
facebook/MobileLLM-Pro
| 2.63
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"facebook/MobileLLM-Pro\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('facebook_MobileLLM-Pro_0.txt', 'w') as f:\n f.write('Everything was good in facebook_MobileLLM-Pro_0.txt')\nexcept Exception as e:\n with open('facebook_MobileLLM-Pro_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_MobileLLM-Pro_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='facebook_MobileLLM-Pro_0.txt',\n repo_type='dataset',\n )",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"facebook/MobileLLM-Pro\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"facebook/MobileLLM-Pro\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('facebook_MobileLLM-Pro_1.txt', 'w') as f:\n f.write('Everything was good in facebook_MobileLLM-Pro_1.txt')\nexcept Exception as e:\n with open('facebook_MobileLLM-Pro_1.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_MobileLLM-Pro_1.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='facebook_MobileLLM-Pro_1.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/facebook_MobileLLM-Pro_0.py",
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/facebook_MobileLLM-Pro_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/facebook_MobileLLM-Pro_0.txt",
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/facebook_MobileLLM-Pro_1.txt"
] |
nvidia/llama-embed-nemotron-8b
| 18.17
|
[] |
[] |
[] |
inclusionAI/LLaDA2.0-mini-preview
| 39.36
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/LLaDA2.0-mini-preview\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_LLaDA2.0-mini-preview_0.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_LLaDA2.0-mini-preview_0.txt')\nexcept Exception as e:\n with open('inclusionAI_LLaDA2.0-mini-preview_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_LLaDA2.0-mini-preview_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_LLaDA2.0-mini-preview_0.txt',\n repo_type='dataset',\n )",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/LLaDA2.0-mini-preview\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_LLaDA2.0-mini-preview_1.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_LLaDA2.0-mini-preview_1.txt')\nexcept Exception as e:\n with open('inclusionAI_LLaDA2.0-mini-preview_1.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_LLaDA2.0-mini-preview_1.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_LLaDA2.0-mini-preview_1.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_LLaDA2.0-mini-preview_0.py",
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_LLaDA2.0-mini-preview_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_LLaDA2.0-mini-preview_0.txt",
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_LLaDA2.0-mini-preview_1.txt"
] |
inclusionAI/Ling-1T
| 2,420.73
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/Ling-1T\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_Ling-1T_0.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_Ling-1T_0.txt')\nexcept Exception as e:\n with open('inclusionAI_Ling-1T_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ling-1T_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_Ling-1T_0.txt',\n repo_type='dataset',\n )",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/Ling-1T\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_Ling-1T_1.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_Ling-1T_1.txt')\nexcept Exception as e:\n with open('inclusionAI_Ling-1T_1.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ling-1T_1.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_Ling-1T_1.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_Ling-1T_0.py",
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_Ling-1T_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_Ling-1T_0.txt",
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_Ling-1T_1.txt"
] |
moonshotai/Kimi-K2-Instruct-0905
| 4,971.07
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('moonshotai_Kimi-K2-Instruct-0905_0.txt', 'w') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Instruct-0905_0.txt')\nexcept Exception as e:\n with open('moonshotai_Kimi-K2-Instruct-0905_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Instruct-0905_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Instruct-0905_0.txt',\n repo_type='dataset',\n )",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('moonshotai_Kimi-K2-Instruct-0905_1.txt', 'w') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Instruct-0905_1.txt')\nexcept Exception as e:\n with open('moonshotai_Kimi-K2-Instruct-0905_1.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Instruct-0905_1.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Instruct-0905_1.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_0.py",
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_0.txt",
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_1.txt"
] |
rednote-hilab/dots.ocr
| 7.36
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # integration status unknown.\n \n # Please clone model and use locally.\n \n # Also feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('rednote-hilab_dots.ocr_0.txt', 'w') as f:\n f.write('Everything was good in rednote-hilab_dots.ocr_0.txt')\nexcept Exception as e:\n with open('rednote-hilab_dots.ocr_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='rednote-hilab_dots.ocr_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='rednote-hilab_dots.ocr_0.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/rednote-hilab_dots.ocr_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/rednote-hilab_dots.ocr_0.txt"
] |
inclusionAI/Ring-1T
| 4,841.45
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/Ring-1T\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_Ring-1T_0.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_Ring-1T_0.txt')\nexcept Exception as e:\n with open('inclusionAI_Ring-1T_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ring-1T_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_Ring-1T_0.txt',\n repo_type='dataset',\n )",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/Ring-1T\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_Ring-1T_1.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_Ring-1T_1.txt')\nexcept Exception as e:\n with open('inclusionAI_Ring-1T_1.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ring-1T_1.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_Ring-1T_1.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_Ring-1T_0.py",
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_Ring-1T_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_Ring-1T_0.txt",
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_Ring-1T_1.txt"
] |
inclusionAI/Ring-flash-linear-2.0-128k
| 504.54
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/Ring-flash-linear-2.0-128k\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_Ring-flash-linear-2.0-128k_0.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_Ring-flash-linear-2.0-128k_0.txt')\nexcept Exception as e:\n with open('inclusionAI_Ring-flash-linear-2.0-128k_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ring-flash-linear-2.0-128k_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_Ring-flash-linear-2.0-128k_0.txt',\n repo_type='dataset',\n )",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/Ring-flash-linear-2.0-128k\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_Ring-flash-linear-2.0-128k_1.txt', 'w') as f:\n f.write('Everything was good in inclusionAI_Ring-flash-linear-2.0-128k_1.txt')\nexcept Exception as e:\n with open('inclusionAI_Ring-flash-linear-2.0-128k_1.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ring-flash-linear-2.0-128k_1.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='inclusionAI_Ring-flash-linear-2.0-128k_1.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_Ring-flash-linear-2.0-128k_0.py",
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/inclusionAI_Ring-flash-linear-2.0-128k_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_Ring-flash-linear-2.0-128k_0.txt",
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/inclusionAI_Ring-flash-linear-2.0-128k_1.txt"
] |
deepseek-ai/DeepSeek-R1
| 1,657.55
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-R1_0.txt', 'w') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-R1_0.txt')\nexcept Exception as e:\n with open('deepseek-ai_DeepSeek-R1_0.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-R1_0.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-R1_0.txt',\n repo_type='dataset',\n )",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"accelerate\",\n# \"peft\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('deepseek-ai_DeepSeek-R1_1.txt', 'w') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-R1_1.txt')\nexcept Exception as e:\n with open('deepseek-ai_DeepSeek-R1_1.txt', 'w') as f:\n import traceback\n traceback.print_exc(file=f)\nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-R1_1.txt',\n repo_id='model-metadata/custom_code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-R1_1.txt',\n repo_type='dataset',\n )"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/deepseek-ai_DeepSeek-R1_0.py",
"https://huggingface.co/datasets/model-metadata/custom_code_py_files/raw/main/deepseek-ai_DeepSeek-R1_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/deepseek-ai_DeepSeek-R1_0.txt",
"https://huggingface.co/datasets/model-metadata/custom_code_execution_files/raw/main/deepseek-ai_DeepSeek-R1_1.txt"
] |