chingshuai
commited on
Commit
·
24367f5
1
Parent(s):
fa80dfd
fix bug of example
Browse files- gradio_app.py +4 -0
- hymotion/utils/gradio_runtime.py +9 -15
gradio_app.py
CHANGED
|
@@ -64,6 +64,9 @@ def ensure_examples_generated(model_inference_obj) -> List[str]:
|
|
| 64 |
# Generate the example
|
| 65 |
print(f">>> Generating example motion: {example['prompt']}")
|
| 66 |
try:
|
|
|
|
|
|
|
|
|
|
| 67 |
html_content, fbx_files = model_inference_obj.run_inference(
|
| 68 |
text=example["prompt"],
|
| 69 |
seeds_csv=example["seeds"],
|
|
@@ -73,6 +76,7 @@ def ensure_examples_generated(model_inference_obj) -> List[str]:
|
|
| 73 |
original_text=example["prompt"],
|
| 74 |
output_dir=example_dir,
|
| 75 |
output_filename=example_filename,
|
|
|
|
| 76 |
)
|
| 77 |
print(f">>> Example '{example_filename}' generated successfully!")
|
| 78 |
generated_examples.append(example_filename)
|
|
|
|
| 64 |
# Generate the example
|
| 65 |
print(f">>> Generating example motion: {example['prompt']}")
|
| 66 |
try:
|
| 67 |
+
# Force CPU device for example generation at startup
|
| 68 |
+
# This is necessary for Hugging Face Zero GPU environment where GPU
|
| 69 |
+
# is only available inside @spaces.GPU decorated functions
|
| 70 |
html_content, fbx_files = model_inference_obj.run_inference(
|
| 71 |
text=example["prompt"],
|
| 72 |
seeds_csv=example["seeds"],
|
|
|
|
| 76 |
original_text=example["prompt"],
|
| 77 |
output_dir=example_dir,
|
| 78 |
output_filename=example_filename,
|
| 79 |
+
device="cpu", # Force CPU for startup example generation
|
| 80 |
)
|
| 81 |
print(f">>> Example '{example_filename}' generated successfully!")
|
| 82 |
generated_examples.append(example_filename)
|
hymotion/utils/gradio_runtime.py
CHANGED
|
@@ -300,27 +300,21 @@ class ModelInference:
|
|
| 300 |
|
| 301 |
return _MODEL_CACHE
|
| 302 |
|
| 303 |
-
def run_inference(self, *args, **kwargs):
|
| 304 |
"""
|
| 305 |
-
Run
|
| 306 |
Args:
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
mask_edges: Whether to mask edges
|
| 310 |
-
filter_black_bg: Whether to filter black background
|
| 311 |
-
filter_white_bg: Whether to filter white background
|
| 312 |
-
process_res_method: Method for resizing input images
|
| 313 |
-
show_camera: Whether to show camera in 3D view
|
| 314 |
-
selected_first_frame: Selected first frame filename
|
| 315 |
-
save_percentage: Percentage of points to save (0-100)
|
| 316 |
-
infer_gs: Whether to infer 3D Gaussian Splatting
|
| 317 |
Returns:
|
| 318 |
-
Tuple of (
|
| 319 |
"""
|
| 320 |
print(f"[{self.__class__.__name__}] Running inference...")
|
| 321 |
-
# Device check
|
| 322 |
-
|
|
|
|
| 323 |
device = torch.device(device)
|
|
|
|
| 324 |
|
| 325 |
# Initialize model if needed - get model instance (not stored in self)
|
| 326 |
model = self.initialize_model(device)
|
|
|
|
| 300 |
|
| 301 |
return _MODEL_CACHE
|
| 302 |
|
| 303 |
+
def run_inference(self, *args, device: str = None, **kwargs):
|
| 304 |
"""
|
| 305 |
+
Run model inference for motion generation.
|
| 306 |
Args:
|
| 307 |
+
device: Device to run inference on. If None, auto-detect.
|
| 308 |
+
Use "cpu" to force CPU inference (e.g., when not in @spaces.GPU context).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 309 |
Returns:
|
| 310 |
+
Tuple of (html_content, fbx_files)
|
| 311 |
"""
|
| 312 |
print(f"[{self.__class__.__name__}] Running inference...")
|
| 313 |
+
# Device check - allow explicit override for Zero GPU compatibility
|
| 314 |
+
if device is None:
|
| 315 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 316 |
device = torch.device(device)
|
| 317 |
+
print(f"[{self.__class__.__name__}] Using device: {device}")
|
| 318 |
|
| 319 |
# Initialize model if needed - get model instance (not stored in self)
|
| 320 |
model = self.initialize_model(device)
|