Spaces:
Sleeping
Sleeping
| import os | |
| import subprocess | |
| import platform | |
| import shutil | |
| MODEL_DIR = "models" | |
| LLAMA_CPP_DIR = os.path.join(MODEL_DIR, "llama.cpp") | |
| GGUF_MODEL = "llama-3-8b-instruct.Q4_K_M.gguf" | |
| GGUF_URL = f"https://huggingface.co/TheBloke/Llama-3-8B-Instruct-GGUF/resolve/main/{GGUF_MODEL}" | |
| GGUF_PATH = os.path.join(MODEL_DIR, GGUF_MODEL) | |
| def run_cmd(command, cwd=None): | |
| """Run shell command safely.""" | |
| try: | |
| print(f"🔧 Executing: {command}") | |
| subprocess.run(command, shell=True, check=True, cwd=cwd) | |
| except subprocess.CalledProcessError as e: | |
| print(f"❌ Command failed: {command}") | |
| raise e | |
| def install_tools(): | |
| """Install required tools based on OS.""" | |
| system = platform.system() | |
| print(f"🖥️ Detected OS: {system}") | |
| try: | |
| if system == "Linux": | |
| run_cmd("sudo apt update && sudo apt install -y git wget build-essential") | |
| elif system == "Darwin": # macOS | |
| run_cmd("xcode-select --install || true") | |
| run_cmd("brew install git wget cmake make || true") | |
| elif system == "Windows": | |
| print("⚠️ On Windows, please run this script inside Git Bash or WSL.") | |
| else: | |
| print("⚠️ Unsupported OS. Please install git, wget, make manually.") | |
| except Exception as e: | |
| print("⚠️ Could not install dependencies automatically. Please install manually.") | |
| def clone_llama_cpp(): | |
| """Clone llama.cpp if not already cloned.""" | |
| if not os.path.exists(LLAMA_CPP_DIR): | |
| run_cmd(f"git clone https://github.com/ggerganov/llama.cpp.git", cwd=MODEL_DIR) | |
| else: | |
| print("✅ llama.cpp already exists.") | |
| def build_llama_cpp(): | |
| """Compile llama.cpp (optional on Windows).""" | |
| makefile_path = os.path.join(LLAMA_CPP_DIR, "Makefile") | |
| if os.path.exists(makefile_path): | |
| try: | |
| run_cmd("make", cwd=LLAMA_CPP_DIR) | |
| print("✅ llama.cpp built successfully.") | |
| except Exception: | |
| print("⚠️ Failed to compile llama.cpp. You may need build tools installed.") | |
| else: | |
| print("⚠️ Makefile not found, skipping build.") | |
| def download_model(): | |
| """Download GGUF model if not already downloaded.""" | |
| if not os.path.exists(GGUF_PATH): | |
| print("⬇️ Downloading LLaMA 3 model...") | |
| run_cmd(f"wget {GGUF_URL} -O {GGUF_PATH}") | |
| print("✅ Model downloaded.") | |
| else: | |
| print("✅ Model already exists.") | |
| def setup(): | |
| """Main setup logic.""" | |
| os.makedirs(MODEL_DIR, exist_ok=True) | |
| install_tools() | |
| clone_llama_cpp() | |
| build_llama_cpp() | |
| download_model() | |
| print("\n🎉 Setup complete. You are ready to run your local LLaMA agent.") | |
| if __name__ == "__main__": | |
| setup() | |