|
|
import torch |
|
|
import gradio as gr |
|
|
from diffusers import StableDiffusionXLImg2ImgPipeline |
|
|
from PIL import Image |
|
|
|
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( |
|
|
"stabilityai/stable-diffusion-xl-base-1.0", |
|
|
torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32, |
|
|
use_safetensors=True, |
|
|
) |
|
|
|
|
|
pipe = pipe.to(DEVICE) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate(image, prompt): |
|
|
image = image.resize((1024, 1024)) |
|
|
|
|
|
result = pipe( |
|
|
prompt=prompt, |
|
|
image=image, |
|
|
strength=0.2, |
|
|
guidance_scale=6, |
|
|
num_inference_steps=20, |
|
|
) |
|
|
|
|
|
return result.images[0] |
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=generate, |
|
|
inputs=[ |
|
|
gr.Image(type="pil", label="Input Image"), |
|
|
gr.Textbox(label="Prompt"), |
|
|
], |
|
|
outputs=gr.Image(type="pil"), |
|
|
title="SDXL Image-to-Image (CPU Safe)", |
|
|
) |
|
|
|
|
|
demo.launch() |
|
|
|