kaiku03 commited on
Commit
79267d3
·
verified ·
1 Parent(s): 3302665

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +4 -18
Dockerfile CHANGED
@@ -1,34 +1,20 @@
1
- # Use a minimal Python base image
2
  FROM python:3.10-slim
3
 
4
- # Set working directory
5
  WORKDIR /app
6
 
7
- # Cache env-vars
8
  ENV HF_HOME=/app/.cache/huggingface
9
  ENV XDG_CACHE_HOME=/app/.cache
10
 
11
- # Install curl (and ca-certificates) for Ollama
12
- RUN apt-get update && \
13
- apt-get install -y curl ca-certificates && \
14
- rm -rf /var/lib/apt/lists/*
15
 
16
- # Install Ollama v0.3.1 (CPU-safe)
17
- RUN curl -fsSL https://ollama.com/download/ollama-linux-amd64-v0.3.1.tgz | \
18
- tar -xz -C /usr/local --strip-components=1
19
-
20
- # Python dependencies
21
  COPY requirements.txt .
22
  RUN pip install --no-cache-dir -r requirements.txt
23
 
24
- # Copy the rest of the code
25
  COPY . .
26
 
27
- # Ensure start script is executable
28
- RUN chmod +x /app/start.sh
29
 
30
- # Expose Gradio’s default port
31
  EXPOSE 7860
32
 
33
- # Launch everything
34
- ENTRYPOINT ["bash", "/app/start.sh"]
 
 
1
  FROM python:3.10-slim
2
 
 
3
  WORKDIR /app
4
 
 
5
  ENV HF_HOME=/app/.cache/huggingface
6
  ENV XDG_CACHE_HOME=/app/.cache
7
 
8
+ RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
 
 
 
9
 
 
 
 
 
 
10
  COPY requirements.txt .
11
  RUN pip install --no-cache-dir -r requirements.txt
12
 
 
13
  COPY . .
14
 
15
+ # (Optional) pre-download model to image to avoid runtime download
16
+ # RUN python -c "from transformers import AutoModelForCausalLM, AutoTokenizer; m='google/gemma-2-2b-it'; AutoTokenizer.from_pretrained(m); AutoModelForCausalLM.from_pretrained(m)"
17
 
 
18
  EXPOSE 7860
19
 
20
+ CMD ["python", "main.py"]