seawolf2357 commited on
Commit
8554f37
ยท
verified ยท
1 Parent(s): e75a609

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -73
app.py CHANGED
@@ -73,16 +73,12 @@ aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')
73
  aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
74
 
75
 
76
- default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
77
- default_negative_prompt = "่‰ฒ่ฐƒ่‰ณไธฝ, ่ฟ‡ๆ›, ้™ๆ€, ็ป†่Š‚ๆจก็ณŠไธๆธ…, ๅญ—ๅน•, ้ฃŽๆ ผ, ไฝœๅ“, ็”ปไฝœ, ็”ป้ข, ้™ๆญข, ๆ•ดไฝ“ๅ‘็ฐ, ๆœ€ๅทฎ่ดจ้‡, ไฝŽ่ดจ้‡, JPEGๅŽ‹็ผฉๆฎ‹็•™, ไธ‘้™‹็š„, ๆฎ‹็ผบ็š„, ๅคšไฝ™็š„ๆ‰‹ๆŒ‡, ็”ปๅพ—ไธๅฅฝ็š„ๆ‰‹้ƒจ, ็”ปๅพ—ไธๅฅฝ็š„่„ธ้ƒจ, ็•ธๅฝข็š„, ๆฏๅฎน็š„, ๅฝขๆ€็•ธๅฝข็š„่‚ขไฝ“, ๆ‰‹ๆŒ‡่žๅˆ, ้™ๆญขไธๅŠจ็š„็”ป้ข, ๆ‚ไนฑ็š„่ƒŒๆ™ฏ, ไธ‰ๆก่…ฟ, ่ƒŒๆ™ฏไบบๅพˆๅคš, ๅ€’็€่ตฐ"
78
 
79
  def resize_image(image: Image.Image) -> Image.Image:
80
- """
81
- Resizes an image to fit within the model's constraints, preserving aspect ratio as much as possible.
82
- """
83
  width, height = image.size
84
 
85
- # Handle square case
86
  if width == height:
87
  return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
88
 
@@ -94,22 +90,20 @@ def resize_image(image: Image.Image) -> Image.Image:
94
  image_to_resize = image
95
 
96
  if aspect_ratio > MAX_ASPECT_RATIO:
97
- # Very wide image -> crop width to fit 832x480 aspect ratio
98
  target_w, target_h = MAX_DIM, MIN_DIM
99
  crop_width = int(round(height * MAX_ASPECT_RATIO))
100
  left = (width - crop_width) // 2
101
  image_to_resize = image.crop((left, 0, left + crop_width, height))
102
  elif aspect_ratio < MIN_ASPECT_RATIO:
103
- # Very tall image -> crop height to fit 480x832 aspect ratio
104
  target_w, target_h = MIN_DIM, MAX_DIM
105
  crop_height = int(round(width / MIN_ASPECT_RATIO))
106
  top = (height - crop_height) // 2
107
  image_to_resize = image.crop((0, top, width, top + crop_height))
108
  else:
109
- if width > height: # Landscape
110
  target_w = MAX_DIM
111
  target_h = int(round(target_w / aspect_ratio))
112
- else: # Portrait
113
  target_h = MAX_DIM
114
  target_w = int(round(target_h * aspect_ratio))
115
 
@@ -163,48 +157,8 @@ def generate_video(
163
  randomize_seed = False,
164
  progress=gr.Progress(track_tqdm=True),
165
  ):
166
- """
167
- Generate a video from an input image using the Wan 2.2 14B I2V model with Lightning LoRA.
168
-
169
- This function takes an input image and generates a video animation based on the provided
170
- prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
171
- for fast generation in 4-8 steps.
172
-
173
- Args:
174
- input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
175
- prompt (str): Text prompt describing the desired animation or motion.
176
- steps (int, optional): Number of inference steps. More steps = higher quality but slower.
177
- Defaults to 4. Range: 1-30.
178
- negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
179
- Defaults to default_negative_prompt (contains unwanted visual artifacts).
180
- duration_seconds (float, optional): Duration of the generated video in seconds.
181
- Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
182
- guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
183
- Defaults to 1.0. Range: 0.0-20.0.
184
- guidance_scale_2 (float, optional): Controls adherence to the prompt. Higher values = more adherence.
185
- Defaults to 1.0. Range: 0.0-20.0.
186
- seed (int, optional): Random seed for reproducible results. Defaults to 42.
187
- Range: 0 to MAX_SEED (2147483647).
188
- randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
189
- Defaults to False.
190
- progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
191
-
192
- Returns:
193
- tuple: A tuple containing:
194
- - video_path (str): Path to the generated video file (.mp4)
195
- - current_seed (int): The seed used for generation (useful when randomize_seed=True)
196
-
197
- Raises:
198
- gr.Error: If input_image is None (no image uploaded).
199
-
200
- Note:
201
- - Frame count is calculated as duration_seconds * FIXED_FPS (24)
202
- - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
203
- - The function uses GPU acceleration via the @spaces.GPU decorator
204
- - Generation time varies based on steps and duration (see get_duration function)
205
- """
206
  if input_image is None:
207
- raise gr.Error("Please upload an input image.")
208
 
209
  num_frames = get_num_frames(duration_seconds)
210
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
@@ -230,26 +184,35 @@ def generate_video(
230
 
231
  return video_path, current_seed
232
 
233
- with gr.Blocks() as demo:
234
- gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with Lightning LoRA")
235
- gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with ๐Ÿงจ diffusers and ZeroGPUโšก๏ธ")
 
 
236
  with gr.Row():
237
- with gr.Column():
238
- input_image_component = gr.Image(type="pil", label="Input Image")
239
- prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
240
- duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
 
 
 
 
 
 
241
 
242
- with gr.Accordion("Advanced Settings", open=False):
243
- negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
244
- seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
245
- randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
246
- steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
247
- guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
248
- guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
249
-
250
- generate_button = gr.Button("Generate Video", variant="primary")
251
- with gr.Column():
252
- video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
 
253
 
254
  ui_inputs = [
255
  input_image_component, prompt_input, steps_slider,
@@ -262,21 +225,24 @@ with gr.Blocks() as demo:
262
  examples=[
263
  [
264
  "wan_i2v_input.JPG",
265
- "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of catโ€™s face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
266
  4,
267
  ],
268
  [
269
  "wan22_input_2.jpg",
270
- "A sleek lunar vehicle glides into view from left to right, kicking up moon dust as astronauts in white spacesuits hop aboard with characteristic lunar bouncing movements. In the distant background, a VTOL craft descends straight down and lands silently on the surface. Throughout the entire scene, ethereal aurora borealis ribbons dance across the star-filled sky, casting shimmering curtains of green, blue, and purple light that bathe the lunar landscape in an otherworldly, magical glow.",
271
  4,
272
  ],
273
  [
274
  "kill_bill.jpeg",
275
- "Uma Thurman's character, Beatrix Kiddo, holds her razor-sharp katana blade steady in the cinematic lighting. Suddenly, the polished steel begins to soften and distort, like heated metal starting to lose its structural integrity. The blade's perfect edge slowly warps and droops, molten steel beginning to flow downward in silvery rivulets while maintaining its metallic sheen. The transformation starts subtly at first - a slight bend in the blade - then accelerates as the metal becomes increasingly fluid. The camera holds steady on her face as her piercing eyes gradually narrow, not with lethal focus, but with confusion and growing alarm as she watches her weapon dissolve before her eyes. Her breathing quickens slightly as she witnesses this impossible transformation. The melting intensifies, the katana's perfect form becoming increasingly abstract, dripping like liquid mercury from her grip. Molten droplets fall to the ground with soft metallic impacts. Her expression shifts from calm readiness to bewilderment and concern as her legendary instrument of vengeance literally liquefies in her hands, leaving her defenseless and disoriented.",
276
  6,
277
  ],
278
  ],
279
- inputs=[input_image_component, prompt_input, steps_slider], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
 
 
 
280
  )
281
 
282
  if __name__ == "__main__":
 
73
  aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
74
 
75
 
76
+ default_prompt_i2v = "์ด ์ด๋ฏธ์ง€์— ์ƒ๋™๊ฐ์„ ๋ถ€์—ฌํ•˜๊ณ , ์˜ํ™” ๊ฐ™์€ ์›€์ง์ž„๊ณผ ๋ถ€๋“œ๋Ÿฌ์šด ์• ๋‹ˆ๋ฉ”์ด์…˜์„ ์ ์šฉ"
77
+ default_negative_prompt = "์ƒ‰์กฐ ์„ ๋ช…, ๊ณผ๋‹ค ๋…ธ์ถœ, ์ •์ , ์„ธ๋ถ€ ํ๋ฆผ, ์ž๋ง‰, ์Šคํƒ€์ผ, ์ž‘ํ’ˆ, ๊ทธ๋ฆผ, ํ™”๋ฉด, ์ •์ง€, ํšŒ์ƒ‰์กฐ, ์ตœ์•… ํ’ˆ์งˆ, ์ €ํ’ˆ์งˆ, JPEG ์••์ถ•, ์ถ”ํ•จ, ๋ถˆ์™„์ „, ์ถ”๊ฐ€ ์†๊ฐ€๋ฝ, ์ž˜๋ชป ๊ทธ๋ ค์ง„ ์†, ์ž˜๋ชป ๊ทธ๋ ค์ง„ ์–ผ๊ตด, ๊ธฐํ˜•, ๋ณ€ํ˜•, ํ˜•ํƒœ ๋ถˆ๋Ÿ‰ ์‚ฌ์ง€, ์†๊ฐ€๋ฝ ์œตํ•ฉ, ์ •์ง€ ํ™”๋ฉด, ์ง€์ €๋ถ„ํ•œ ๋ฐฐ๊ฒฝ, ์„ธ ๊ฐœ์˜ ๋‹ค๋ฆฌ, ๋ฐฐ๊ฒฝ ์‚ฌ๋žŒ ๋งŽ์Œ, ๋’ค๋กœ ๊ฑท๊ธฐ"
78
 
79
  def resize_image(image: Image.Image) -> Image.Image:
 
 
 
80
  width, height = image.size
81
 
 
82
  if width == height:
83
  return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
84
 
 
90
  image_to_resize = image
91
 
92
  if aspect_ratio > MAX_ASPECT_RATIO:
 
93
  target_w, target_h = MAX_DIM, MIN_DIM
94
  crop_width = int(round(height * MAX_ASPECT_RATIO))
95
  left = (width - crop_width) // 2
96
  image_to_resize = image.crop((left, 0, left + crop_width, height))
97
  elif aspect_ratio < MIN_ASPECT_RATIO:
 
98
  target_w, target_h = MIN_DIM, MAX_DIM
99
  crop_height = int(round(width / MIN_ASPECT_RATIO))
100
  top = (height - crop_height) // 2
101
  image_to_resize = image.crop((0, top, width, top + crop_height))
102
  else:
103
+ if width > height:
104
  target_w = MAX_DIM
105
  target_h = int(round(target_w / aspect_ratio))
106
+ else:
107
  target_h = MAX_DIM
108
  target_w = int(round(target_h * aspect_ratio))
109
 
 
157
  randomize_seed = False,
158
  progress=gr.Progress(track_tqdm=True),
159
  ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  if input_image is None:
161
+ raise gr.Error("์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•ด์ฃผ์„ธ์š”.")
162
 
163
  num_frames = get_num_frames(duration_seconds)
164
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
 
184
 
185
  return video_path, current_seed
186
 
187
+ # ์„ธ๋ จ๋œ ํ•œ๊ธ€ UI
188
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
189
+ gr.Markdown("# ๐ŸŽฌ ์ด๋ฏธ์ง€๋ฅผ ์˜์ƒ์œผ๋กœ ๋ณ€ํ™˜")
190
+ gr.Markdown("**Wan 2.2 Lightning** - 4~8๋‹จ๊ณ„๋กœ ๋น ๋ฅธ ์˜์ƒ ์ƒ์„ฑ")
191
+
192
  with gr.Row():
193
+ with gr.Column(scale=1):
194
+ input_image_component = gr.Image(type="pil", label="์ž…๋ ฅ ์ด๋ฏธ์ง€")
195
+ prompt_input = gr.Textbox(label="ํ”„๋กฌํ”„ํŠธ", value=default_prompt_i2v, lines=2)
196
+ duration_seconds_input = gr.Slider(
197
+ minimum=MIN_DURATION,
198
+ maximum=MAX_DURATION,
199
+ step=0.1,
200
+ value=3.5,
201
+ label="์˜์ƒ ๊ธธ์ด (์ดˆ)"
202
+ )
203
 
204
+ with gr.Accordion("๊ณ ๊ธ‰ ์„ค์ •", open=False):
205
+ negative_prompt_input = gr.Textbox(label="๋„ค๊ฑฐํ‹ฐ๋ธŒ ํ”„๋กฌํ”„ํŠธ", value=default_negative_prompt, lines=2)
206
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="์ƒ์„ฑ ๋‹จ๊ณ„")
207
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="๊ฐ€์ด๋˜์Šค ์Šค์ผ€์ผ 1")
208
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="๊ฐ€์ด๋˜์Šค ์Šค์ผ€์ผ 2")
209
+ seed_input = gr.Slider(label="์‹œ๋“œ", minimum=0, maximum=MAX_SEED, step=1, value=42)
210
+ randomize_seed_checkbox = gr.Checkbox(label="๋žœ๋ค ์‹œ๋“œ ์‚ฌ์šฉ", value=True)
211
+
212
+ generate_button = gr.Button("๐ŸŽฅ ์˜์ƒ ์ƒ์„ฑ", variant="primary", size="lg")
213
+
214
+ with gr.Column(scale=1):
215
+ video_output = gr.Video(label="์ƒ์„ฑ๋œ ์˜์ƒ", autoplay=True, interactive=False)
216
 
217
  ui_inputs = [
218
  input_image_component, prompt_input, steps_slider,
 
225
  examples=[
226
  [
227
  "wan_i2v_input.JPG",
228
+ "POV ์…€์นด ์˜์ƒ, ์„ ๊ธ€๋ผ์Šค ๋‚€ ํฐ ๊ณ ์–‘์ด๊ฐ€ ์„œํ•‘๋ณด๋“œ์— ์„œ์„œ ํŽธ์•ˆํ•œ ๋ฏธ์†Œ. ๋ฐฐ๊ฒฝ์— ์—ด๋Œ€ ํ•ด๋ณ€(๋ง‘์€ ๋ฌผ, ๋…น์ƒ‰ ์–ธ๋•, ๊ตฌ๋ฆ„ ๋‚€ ํ‘ธ๋ฅธ ํ•˜๋Š˜). ์„œํ•‘๋ณด๋“œ๊ฐ€ ๊ธฐ์šธ์–ด์ง€๊ณ  ๊ณ ์–‘์ด๊ฐ€ ๋ฐ”๋‹ค๋กœ ๋–จ์–ด์ง€๋ฉฐ ์นด๋ฉ”๋ผ๊ฐ€ ๊ฑฐํ’ˆ๊ณผ ํ–‡๋น›๊ณผ ํ•จ๊ป˜ ๋ฌผ์†์œผ๋กœ ๋น ์ง. ์ž ๊น ๋ฌผ์†์—์„œ ๊ณ ์–‘์ด ์–ผ๊ตด ๋ณด์ด๋‹ค๊ฐ€ ๋‹ค์‹œ ์ˆ˜๋ฉด ์œ„๋กœ ์˜ฌ๋ผ์™€ ์…€์นด ์ดฌ์˜ ๊ณ„์†, ์ฆ๊ฑฐ์šด ์—ฌ๋ฆ„ ํœด๊ฐ€ ๋ถ„์œ„๊ธฐ.",
229
  4,
230
  ],
231
  [
232
  "wan22_input_2.jpg",
233
+ "์„ธ๋ จ๋œ ๋‹ฌ ํƒ์‚ฌ ์ฐจ๋Ÿ‰์ด ์™ผ์ชฝ์—์„œ ์˜ค๋ฅธ์ชฝ์œผ๋กœ ๋ฏธ๋„๋Ÿฌ์ง€๋“ฏ ์ด๋™ํ•˜๋ฉฐ ๋‹ฌ ๋จผ์ง€๋ฅผ ์ผ์œผํ‚ด. ํฐ ์šฐ์ฃผ๋ณต์„ ์ž…์€ ์šฐ์ฃผ์ธ๋“ค์ด ๋‹ฌ ํŠน์œ ์˜ ๋›ฐ๋Š” ๋™์ž‘์œผ๋กœ ํƒ‘์Šน. ๋จผ ๋ฐฐ๊ฒฝ์—์„œ VTOL ๋น„ํ–‰์ฒด๊ฐ€ ์ˆ˜์ง์œผ๋กœ ํ•˜๊ฐ•ํ•˜์—ฌ ํ‘œ๋ฉด์— ์กฐ์šฉํžˆ ์ฐฉ๋ฅ™. ์žฅ๋ฉด ์ „์ฒด์— ๊ฑธ์ณ ์ดˆํ˜„์‹ค์ ์ธ ์˜ค๋กœ๋ผ๊ฐ€ ๋ณ„์ด ๊ฐ€๋“ํ•œ ํ•˜๋Š˜์„ ๊ฐ€๋กœ์ง€๋ฅด๋ฉฐ ์ถค์ถ”๊ณ , ๋…น์ƒ‰, ํŒŒ๋ž€์ƒ‰, ๋ณด๋ผ์ƒ‰ ๋น›์˜ ์ปคํŠผ์ด ๋‹ฌ ํ’๊ฒฝ์„ ์‹ ๋น„๋กญ๊ณ  ๋งˆ๋ฒ• ๊ฐ™์€ ๋น›์œผ๋กœ ๊ฐ์Œˆ.",
234
  4,
235
  ],
236
  [
237
  "kill_bill.jpeg",
238
+ "์šฐ๋งˆ ์„œ๋จผ์˜ ์บ๋ฆญํ„ฐ ๋ฒ ์•„ํŠธ๋ฆญ์Šค ํ‚ค๋„๊ฐ€ ์˜ํ™” ๊ฐ™์€ ์กฐ๋ช… ์†์—์„œ ๋‚ ์นด๋กœ์šด ์นดํƒ€๋‚˜ ๊ฒ€์„ ์•ˆ์ •์ ์œผ๋กœ ๋“ค๊ณ  ์žˆ์Œ. ๊ฐ‘์ž๊ธฐ ๊ด‘ํƒ ๋‚˜๋Š” ๊ฐ•์ฒ ์ด ๋ถ€๋“œ๋Ÿฌ์›Œ์ง€๊ณ  ์™œ๊ณก๋˜๊ธฐ ์‹œ์ž‘ํ•˜๋ฉฐ ๊ฐ€์—ด๋œ ๊ธˆ์†์ฒ˜๋Ÿผ ๊ตฌ์กฐ์  ์™„์ „์„ฑ์„ ์žƒ๊ธฐ ์‹œ์ž‘. ๊ฒ€๋‚ ์˜ ์™„๋ฒฝํ•œ ๋์ด ์ฒœ์ฒœํžˆ ํœ˜์–ด์ง€๊ณ  ๋Š˜์–ด์ง€๋ฉฐ, ๋…น์€ ๊ฐ•์ฒ ์ด ์€๋น› ๋ฌผ์ค„๊ธฐ๋กœ ์•„๋ž˜๋กœ ํ˜๋Ÿฌ๋‚ด๋ฆผ. ๋ณ€ํ˜•์€ ์ฒ˜์Œ์—๋Š” ๋ฏธ๋ฌ˜ํ•˜๊ฒŒ ์‹œ์ž‘๋˜๋‹ค๊ฐ€ ๊ธˆ์†์ด ์ ์  ๋” ์œ ๋™์ ์ด ๋˜๋ฉด์„œ ๊ฐ€์†ํ™”. ์นด๋ฉ”๋ผ๋Š” ๊ทธ๋…€์˜ ์–ผ๊ตด์„ ๊ณ ์ •ํ•˜๊ณ  ๋‚ ์นด๋กœ์šด ๋ˆˆ๋น›์ด ์ ์ฐจ ์ข์•„์ง€๋Š”๋ฐ, ์น˜๋ช…์ ์ธ ์ง‘์ค‘์ด ์•„๋‹ˆ๋ผ ๋ฌด๊ธฐ๊ฐ€ ๋ˆˆ์•ž์—์„œ ๋…น๋Š” ๊ฒƒ์„ ๋ณด๋ฉฐ ํ˜ผ๋ž€๊ณผ ๊ฒฝ์•…. ํ˜ธํก์ด ์•ฝ๊ฐ„ ๋นจ๋ผ์ง€๋ฉฐ ์ด ๋ถˆ๊ฐ€๋Šฅํ•œ ๋ณ€ํ˜•์„ ๋ชฉ๊ฒฉ. ๋…น๋Š” ํ˜„์ƒ์ด ๊ฐ•ํ™”๋˜๊ณ  ์นดํƒ€๋‚˜์˜ ์™„๋ฒฝํ•œ ํ˜•ํƒœ๊ฐ€ ์ ์  ์ถ”์ƒ์ ์ด ๋˜๋ฉฐ ์†์—์„œ ์ˆ˜์€์ฒ˜๋Ÿผ ๋–จ์–ด์ง. ๋…น์€ ๋ฐฉ์šธ์ด ๋ถ€๋“œ๋Ÿฌ์šด ๊ธˆ์† ์ถฉ๊ฒฉ์Œ๊ณผ ํ•จ๊ป˜ ๋ฐ”๋‹ฅ์— ๋–จ์–ด์ง. ํ‘œ์ •์ด ์ฐจ๋ถ„ํ•œ ์ค€๋น„์—์„œ ๋‹นํ˜น๊ฐ๊ณผ ์šฐ๋ ค๋กœ ๋ฐ”๋€Œ๋ฉฐ ์ „์„ค์ ์ธ ๋ณต์ˆ˜์˜ ๋„๊ตฌ๊ฐ€ ์†์—์„œ ๋ฌธ์ž ๊ทธ๋Œ€๋กœ ์•กํ™”๋˜์–ด ๋ฌด๋ฐฉ๋น„ ์ƒํƒœ๊ฐ€ ๋จ.",
239
  6,
240
  ],
241
  ],
242
+ inputs=[input_image_component, prompt_input, steps_slider],
243
+ outputs=[video_output, seed_input],
244
+ fn=generate_video,
245
+ cache_examples="lazy"
246
  )
247
 
248
  if __name__ == "__main__":