| # Qwen3-4B Tool Calling with llama-cpp-python | |
| # Requirements for running the model locally | |
| # Core dependencies | |
| llama-cpp-python[server]>=0.3.16 # Includes server support for Codex | |
| numpy>=1.20.0 | |
| typing-extensions>=4.5.0 | |
| # API and web dependencies | |
| requests>=2.28.0 # For API examples and Codex integration | |
| jinja2>=2.11.3 # For template processing | |
| diskcache>=5.6.1 # For caching (included with llama-cpp-python) | |
| uvicorn>=0.24.0 # For server functionality | |
| # Development dependencies (optional) | |
| pytest>=7.0.0 # For testing | |
| black>=22.0.0 # For code formatting | |
| flake8>=5.0.0 # For linting | |