Spaces:
Running
on
Zero
Running
on
Zero
| # Note that this requirements file is optimised for Hugging Face spaces / Python 3.10. Please use requirements_no_local.txt for installation without local model inference (simplest approach to get going). Please use requirements_cpu.txt for CPU instances and requirements_gpu.txt for GPU instances using Python 3.11 | |
| gradio==6.0.2 | |
| transformers==4.57.2 | |
| spaces==0.42.1 | |
| boto3>=1.42.1 | |
| pandas>=2.3.3 | |
| pyarrow>=21.0.0 | |
| openpyxl>=3.1.5 | |
| markdown>=3.7 | |
| tabulate>=0.9.0 | |
| lxml>=5.3.0 | |
| google-genai>=1.52.0 | |
| openai>=2.8.1 | |
| html5lib>=1.1 | |
| beautifulsoup4>=4.12.3 | |
| rapidfuzz>=3.13.0 | |
| python-dotenv>=1.1.0 | |
| # GPU (for huggingface instance) | |
| # Torch/Unsloth and llama-cpp-python | |
| # Latest compatible with CUDA 12.4 | |
| torch<=2.9.1 --extra-index-url https://download.pytorch.org/whl/cu128 | |
| unsloth[cu128-torch280]<=2025.11.6 | |
| unsloth_zoo<=2025.11.6 | |
| timm | |
| # llama-cpp-python direct wheel link for GPU compatible version 3.17 for use with Python 3.10 and Hugging Face | |
| https://github.com/JamePeng/llama-cpp-python/releases/download/v0.3.17-cu128-Basic-linux-20251202/llama_cpp_python-0.3.17-cp310-cp310-linux_x86_64.whl | |
| #https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.16-cu124/llama_cpp_python-0.3.16-cp310-cp310-linux_x86_64.whl | |