Lyra / Dockerfile
roll-ai's picture
Update Dockerfile
7550512 verified
raw
history blame
2.52 kB
# Base image with CUDA 12.1 and PyTorch support
FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
# --- 1. SYSTEM DEPS ---
RUN apt-get update && apt-get install -y \
git wget curl ffmpeg libgl1-mesa-glx \
libglib2.0-0 libsm6 libxext6 libxrender-dev \
&& rm -rf /var/lib/apt/lists/*
# --- 2. INSTALL CONDA ---
ENV CONDA_DIR=/opt/conda
RUN curl -sLo ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
bash ~/miniconda.sh -b -p $CONDA_DIR && \
rm ~/miniconda.sh && \
$CONDA_DIR/bin/conda clean -afy
ENV PATH=$CONDA_DIR/bin:$PATH
SHELL ["/bin/bash", "-c"]
# --- 3. COPY FILES ---
COPY . /workspace
WORKDIR /workspace
RUN conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/main && \
conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/r
# --- 4. CREATE CONDA ENV ---
RUN conda env create -f lyra.yaml
ENV PATH /opt/conda/envs/lyra/bin:$PATH
RUN echo "conda activate lyra" >> ~/.bashrc
# --- 5. INSTALL PIP DEPENDENCIES ---
RUN pip install -r requirements_gen3c.txt && \
pip install -r requirements_lyra.txt && \
pip install transformer-engine[pytorch]==1.12.0
# --- 6. INSTALL APEX ---
RUN git clone https://github.com/NVIDIA/apex && \
CUDA_HOME=$CONDA_PREFIX pip install -v --disable-pip-version-check --no-cache-dir \
--no-build-isolation --config-settings "--build-option=--cpp_ext" \
--config-settings "--build-option=--cuda_ext" ./apex
# --- 7. INSTALL MOGE + MAMBA ---
RUN pip install git+https://github.com/microsoft/MoGe.git
RUN pip install --no-build-isolation "git+https://github.com/state-spaces/[email protected]"
# --- 8. PATCH HEADERS ---
RUN ln -sf $CONDA_PREFIX/lib/python3.10/site-packages/nvidia/*/include/* $CONDA_PREFIX/include/ && \
ln -sf $CONDA_PREFIX/lib/python3.10/site-packages/nvidia/*/include/* $CONDA_PREFIX/include/python3.10
# --- 9. DOWNLOAD CHECKPOINTS ---
RUN huggingface-cli login --token YOUR_HF_TOKEN_HERE && \
python3 scripts/download_tokenizer_checkpoints.py --checkpoint_dir checkpoints/cosmos_predict1 --tokenizer_types CV8x8x8-720p && \
CUDA_HOME=$CONDA_PREFIX PYTHONPATH=$(pwd) python scripts/download_gen3c_checkpoints.py --checkpoint_dir checkpoints && \
CUDA_HOME=$CONDA_PREFIX PYTHONPATH=$(pwd) python scripts/download_lyra_checkpoints.py --checkpoint_dir checkpoints
# --- 10. PORT FOR GRADIO ---
EXPOSE 7860
# --- 11. RUN GRADIO ---
CMD ["python", "main_gradio.py"]