CARLEXsX commited on
Commit
8341849
·
verified ·
1 Parent(s): 3515a22

Update ltx_manager_helpers.py

Browse files
Files changed (1) hide show
  1. ltx_manager_helpers.py +87 -1
ltx_manager_helpers.py CHANGED
@@ -111,4 +111,90 @@ class LtxPoolManager:
111
  motion_prompt: str, conditioning_items_data: list,
112
  width: int, height: int, seed: int, cfg: float, video_total_frames: int,
113
  video_fps: int, num_inference_steps: int, use_attention_slicing: bool,
114
- decode_timestep: float, im
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  motion_prompt: str, conditioning_items_data: list,
112
  width: int, height: int, seed: int, cfg: float, video_total_frames: int,
113
  video_fps: int, num_inference_steps: int, use_attention_slicing: bool,
114
+ decode_timestep: float, image_cond_noise_scale: float,
115
+ current_fragment_index: int, output_path: str, progress
116
+ ):
117
+ worker_to_use = None
118
+ try:
119
+ with self.lock:
120
+ if self.last_cleanup_thread and self.last_cleanup_thread.is_alive():
121
+ print("LTX POOL MANAGER: Aguardando limpeza da GPU anterior...")
122
+ self.last_cleanup_thread.join()
123
+ print("LTX POOL MANAGER: Limpeza anterior concluída.")
124
+
125
+ worker_to_use = self.workers[self.current_worker_index]
126
+ previous_worker_index = (self.current_worker_index - 1 + len(self.workers)) % len(self.workers)
127
+ worker_to_cleanup = self.workers[previous_worker_index]
128
+
129
+ cleanup_thread = threading.Thread(target=self._cleanup_worker, args=(worker_to_cleanup,))
130
+ cleanup_thread.start()
131
+ self.last_cleanup_thread = cleanup_thread
132
+
133
+ worker_to_use.to_gpu()
134
+
135
+ self.current_worker_index = (self.current_worker_index + 1) % len(self.workers)
136
+
137
+ target_device = worker_to_use.device
138
+
139
+ if use_attention_slicing:
140
+ worker_to_use.pipeline.enable_attention_slicing()
141
+
142
+ media_paths = [item[0] for item in conditioning_items_data]
143
+ start_frames = [item[1] for item in conditioning_items_data]
144
+ strengths = [item[2] for item in conditioning_items_data]
145
+
146
+ padded_h, padded_w = ((height - 1) // 32 + 1) * 32, ((width - 1) // 32 + 1) * 32
147
+ padding_vals = calculate_padding(height, width, padded_h, padded_w)
148
+
149
+ conditioning_items = prepare_conditioning(
150
+ conditioning_media_paths=media_paths, conditioning_strengths=strengths,
151
+ conditioning_start_frames=start_frames, height=height, width=width,
152
+ num_frames=video_total_frames, padding=padding_vals, pipeline=worker_to_use.pipeline,
153
+ )
154
+
155
+ for item in conditioning_items:
156
+ item.media_item = item.media_item.to(target_device)
157
+
158
+ first_pass_config = worker_to_use.config.get("first_pass", {}).copy()
159
+ first_pass_config['num_inference_steps'] = int(num_inference_steps)
160
+
161
+ kwargs = {
162
+ "prompt": motion_prompt, "negative_prompt": "blurry, distorted, bad quality, artifacts",
163
+ "height": padded_h, "width": padded_w, "num_frames": video_total_frames,
164
+ "frame_rate": video_fps,
165
+ "generator": torch.Generator(device=target_device).manual_seed(int(seed) + current_fragment_index),
166
+ "output_type": "pt", "guidance_scale": float(cfg),
167
+ "timesteps": first_pass_config.get("timesteps"),
168
+ "conditioning_items": conditioning_items,
169
+ "decode_timestep": decode_timestep,
170
+ "decode_noise_scale": worker_to_use.config.get("decode_noise_scale"),
171
+ "image_cond_noise_scale": image_cond_noise_scale,
172
+ "stochastic_sampling": worker_to_use.config.get("stochastic_sampling"),
173
+ "is_video": True, "vae_per_channel_normalize": True,
174
+ "mixed_precision": (worker_to_use.config.get("precision") == "mixed_precision"),
175
+ "enhance_prompt": False, "decode_every": 4, "num_inference_steps": int(num_inference_steps)
176
+ }
177
+
178
+ # --- Configura os parâmetros dinâmicos do TeaCache antes da geração ---
179
+ if hasattr(worker_to_use.pipeline.transformer, 'enable_teacache') and worker_to_use.pipeline.transformer.enable_teacache:
180
+ print(f"LTX POOL MANAGER em {worker_to_use.device}: Configurando TeaCache com num_steps={int(num_inference_steps)}.")
181
+ worker_to_use.pipeline.transformer.num_steps = int(num_inference_steps)
182
+ worker_to_use.pipeline.transformer.cnt = 0
183
+
184
+ progress(0.1, desc=f"[Câmera LTX em {worker_to_use.device}] Filmando Cena {current_fragment_index}...")
185
+ result_tensor = worker_to_use.generate_video_fragment_internal(**kwargs).images
186
+
187
+ pad_l, pad_r, pad_t, pad_b = map(int, padding_vals); slice_h = -pad_b if pad_b > 0 else None; slice_w = -pad_r if pad_r > 0 else None
188
+ cropped_tensor = result_tensor[:, :, :video_total_frames, pad_t:slice_h, pad_l:slice_w]
189
+ video_np = (cropped_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy() * 255).astype(np.uint8)
190
+
191
+ with imageio.get_writer(output_path, fps=video_fps, codec='libx264', quality=8) as writer:
192
+ for frame in video_np: writer.append_data(frame)
193
+
194
+ return output_path, video_total_frames
195
+
196
+ finally:
197
+ if use_attention_slicing and worker_to_use and worker_to_use.pipeline:
198
+ worker_to_use.pipeline.disable_attention_slicing()
199
+
200
+ ltx_manager_singleton = LtxPoolManager(device_ids=['cuda:0', 'cuda:1', 'cuda:2', 'cuda:3'])