akhaliq HF Staff commited on
Commit
2f0e386
·
verified ·
1 Parent(s): 50ff509

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -2
app.py CHANGED
@@ -12,6 +12,44 @@ pipeline = QwenImageEditPlusPipeline.from_pretrained(
12
  pipeline.to('cuda')
13
  pipeline.set_progress_bar_config(disable=None)
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  @spaces.GPU(duration=120)
16
  def edit_images(image1, image2, prompt, seed, true_cfg_scale, negative_prompt, num_steps, guidance_scale):
17
  if image1 is None or image2 is None:
@@ -39,7 +77,7 @@ def edit_images(image1, image2, prompt, seed, true_cfg_scale, negative_prompt, n
39
  output = pipeline(**inputs)
40
  return output.images[0]
41
 
42
- # Example prompts
43
  example_prompts = [
44
  "The magician bear is on the left, the alchemist bear is on the right, facing each other in the central park square.",
45
  "Two characters standing side by side in a beautiful garden with flowers blooming",
@@ -47,6 +85,11 @@ example_prompts = [
47
  "Two friends sitting together on a park bench, enjoying the sunset",
48
  ]
49
 
 
 
 
 
 
50
  with gr.Blocks(css="footer {visibility: hidden}") as demo:
51
  gr.Markdown(
52
  """
@@ -86,10 +129,16 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
86
  lines=3
87
  )
88
 
 
 
 
 
 
 
89
  gr.Examples(
90
  examples=[[p] for p in example_prompts],
91
  inputs=[prompt_input],
92
- label="Example Prompts"
93
  )
94
 
95
  with gr.Accordion("Advanced Settings", open=False):
 
12
  pipeline.to('cuda')
13
  pipeline.set_progress_bar_config(disable=None)
14
 
15
+ # AoT Compilation for Qwen Image Edit Plus
16
+ @spaces.GPU(duration=1500)
17
+ def compile_qwen_transformer():
18
+ import torch
19
+ from diffusers import QwenImageEditPlusPipeline
20
+
21
+ # Create a temporary pipeline for compilation
22
+ compile_pipe = QwenImageEditPlusPipeline.from_pretrained(
23
+ "Qwen/Qwen-Image-Edit-2509",
24
+ torch_dtype=torch.bfloat16
25
+ )
26
+ compile_pipe.to('cuda')
27
+
28
+ # Capture inputs for the transformer
29
+ with spaces.aoti_capture(compile_pipe.transformer) as call:
30
+ # Create dummy inputs that match what Qwen expects
31
+ dummy_image = torch.randn(1, 3, 256, 256, device='cuda', dtype=torch.bfloat16)
32
+ dummy_prompt = "test prompt for compilation"
33
+ compile_pipe(dummy_image, dummy_prompt)
34
+
35
+ # Export the transformer model
36
+ exported = torch.export.export(
37
+ compile_pipe.transformer,
38
+ args=call.args,
39
+ kwargs=call.kwargs,
40
+ )
41
+
42
+ # Compile the exported model
43
+ return spaces.aoti_compile(exported)
44
+
45
+ # Apply AoT compilation
46
+ try:
47
+ compiled_transformer = compile_qwen_transformer()
48
+ spaces.aoti_apply(compiled_transformer, pipeline.transformer)
49
+ print("✅ Qwen transformer successfully compiled with AoT")
50
+ except Exception as e:
51
+ print(f"⚠️ AoT compilation failed: {e}")
52
+
53
  @spaces.GPU(duration=120)
54
  def edit_images(image1, image2, prompt, seed, true_cfg_scale, negative_prompt, num_steps, guidance_scale):
55
  if image1 is None or image2 is None:
 
77
  output = pipeline(**inputs)
78
  return output.images[0]
79
 
80
+ # Example prompts and images
81
  example_prompts = [
82
  "The magician bear is on the left, the alchemist bear is on the right, facing each other in the central park square.",
83
  "Two characters standing side by side in a beautiful garden with flowers blooming",
 
85
  "Two friends sitting together on a park bench, enjoying the sunset",
86
  ]
87
 
88
+ # Example image paths
89
+ example_images = [
90
+ ["bear1.jpg", "bear2.jpg", "The magician bear is on the left, the alchemist bear is on the right, facing each other in the central park square."],
91
+ ]
92
+
93
  with gr.Blocks(css="footer {visibility: hidden}") as demo:
94
  gr.Markdown(
95
  """
 
129
  lines=3
130
  )
131
 
132
+ gr.Examples(
133
+ examples=example_images,
134
+ inputs=[image1_input, image2_input, prompt_input],
135
+ label="Example Images and Prompts"
136
+ )
137
+
138
  gr.Examples(
139
  examples=[[p] for p in example_prompts],
140
  inputs=[prompt_input],
141
+ label="Example Prompts Only"
142
  )
143
 
144
  with gr.Accordion("Advanced Settings", open=False):