File size: 998 Bytes
1a9e80d 413d5ae 1a9e80d 413d5ae e239a1b 1a9e80d e239a1b 413d5ae e239a1b 413d5ae e239a1b 413d5ae e239a1b 413d5ae e239a1b 413d5ae e239a1b 413d5ae e239a1b 1a9e80d 413d5ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import torch
import gradio as gr
from diffusers import StableDiffusionXLImg2ImgPipeline
from PIL import Image
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
use_safetensors=True,
)
pipe = pipe.to(DEVICE)
# ❌ DO NOT enable cpu offload on HF CPU
# pipe.enable_model_cpu_offload() <-- REMOVE THIS
def generate(image, prompt):
image = image.resize((1024, 1024))
result = pipe(
prompt=prompt,
image=image,
strength=0.2,
guidance_scale=6,
num_inference_steps=20, # keep low for CPU
)
return result.images[0]
demo = gr.Interface(
fn=generate,
inputs=[
gr.Image(type="pil", label="Input Image"),
gr.Textbox(label="Prompt"),
],
outputs=gr.Image(type="pil"),
title="SDXL Image-to-Image (CPU Safe)",
)
demo.launch()
|