|
|
import os
|
|
|
import torch
|
|
|
from comfy.model_management import CPUState
|
|
|
|
|
|
|
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
|
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = ""
|
|
|
|
|
|
|
|
|
import comfy.model_management
|
|
|
comfy.model_management.cpu_state = CPUState.CPU
|
|
|
|
|
|
import random
|
|
|
import sys
|
|
|
from typing import Sequence, Mapping, Any, Union
|
|
|
from PIL import Image
|
|
|
from huggingface_hub import hf_hub_download
|
|
|
import spaces
|
|
|
|
|
|
import subprocess, sys
|
|
|
|
|
|
import gradio
|
|
|
import gradio_client
|
|
|
import gradio as gr
|
|
|
|
|
|
print("gradio version:", gradio.__version__)
|
|
|
print("gradio_client version:", gradio_client.__version__)
|
|
|
|
|
|
hf_hub_download(repo_id="ezioruan/inswapper_128.onnx", filename="inswapper_128.onnx", local_dir="models/insightface")
|
|
|
hf_hub_download(repo_id="martintomov/comfy", filename="facerestore_models/GPEN-BFR-512.onnx", local_dir="models")
|
|
|
|
|
|
hf_hub_download(repo_id="facefusion/models-3.3.0", filename="hyperswap_1a_256.onnx", local_dir="models/hyperswap")
|
|
|
hf_hub_download(repo_id="facefusion/models-3.3.0", filename="hyperswap_1b_256.onnx", local_dir="models/hyperswap")
|
|
|
hf_hub_download(repo_id="facefusion/models-3.3.0", filename="hyperswap_1c_256.onnx", local_dir="models/hyperswap")
|
|
|
|
|
|
hf_hub_download(repo_id="martintomov/comfy", filename="facedetection/yolov5l-face.pth", local_dir="models")
|
|
|
|
|
|
hf_hub_download(repo_id="gmk123/GFPGAN", filename="parsing_parsenet.pth", local_dir="models/facedetection")
|
|
|
|
|
|
hf_hub_download(repo_id="MonsterMMORPG/tools", filename="1k3d68.onnx", local_dir="models/insightface/models/buffalo_l")
|
|
|
hf_hub_download(repo_id="MonsterMMORPG/tools", filename="2d106det.onnx", local_dir="models/insightface/models/buffalo_l")
|
|
|
hf_hub_download(repo_id="maze/faceX", filename="det_10g.onnx", local_dir="models/insightface/models/buffalo_l")
|
|
|
hf_hub_download(repo_id="typhoon01/aux_models", filename="genderage.onnx", local_dir="models/insightface/models/buffalo_l")
|
|
|
hf_hub_download(repo_id="maze/faceX", filename="w600k_r50.onnx", local_dir="models/insightface/models/buffalo_l")
|
|
|
|
|
|
hf_hub_download(repo_id="vladmandic/insightface-faceanalysis", filename="buffalo_l.zip", local_dir="models/insightface/models/buffalo_l")
|
|
|
|
|
|
|
|
|
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
|
|
"""Returns the value at the given index of a sequence or mapping.
|
|
|
|
|
|
If the object is a sequence (like list or string), returns the value at the given index.
|
|
|
If the object is a mapping (like a dictionary), returns the value at the index-th key.
|
|
|
|
|
|
Some return a dictionary, in these cases, we look for the "results" key
|
|
|
|
|
|
Args:
|
|
|
obj (Union[Sequence, Mapping]): The object to retrieve the value from.
|
|
|
index (int): The index of the value to retrieve.
|
|
|
|
|
|
Returns:
|
|
|
Any: The value at the given index.
|
|
|
|
|
|
Raises:
|
|
|
IndexError: If the index is out of bounds for the object and the object is not a mapping.
|
|
|
"""
|
|
|
try:
|
|
|
return obj[index]
|
|
|
except KeyError:
|
|
|
return obj["result"][index]
|
|
|
|
|
|
|
|
|
def find_path(name: str, path: str = None) -> str:
|
|
|
"""
|
|
|
Recursively looks at parent folders starting from the given path until it finds the given name.
|
|
|
Returns the path as a Path object if found, or None otherwise.
|
|
|
"""
|
|
|
|
|
|
if path is None:
|
|
|
path = os.getcwd()
|
|
|
|
|
|
|
|
|
if name in os.listdir(path):
|
|
|
path_name = os.path.join(path, name)
|
|
|
print(f"{name} found: {path_name}")
|
|
|
return path_name
|
|
|
|
|
|
|
|
|
parent_directory = os.path.dirname(path)
|
|
|
|
|
|
|
|
|
if parent_directory == path:
|
|
|
return None
|
|
|
|
|
|
|
|
|
return find_path(name, parent_directory)
|
|
|
|
|
|
|
|
|
def add_comfyui_directory_to_sys_path() -> None:
|
|
|
"""
|
|
|
Add 'ComfyUI' to the sys.path
|
|
|
"""
|
|
|
comfyui_path = find_path("ComfyUI")
|
|
|
if comfyui_path is not None and os.path.isdir(comfyui_path):
|
|
|
sys.path.append(comfyui_path)
|
|
|
print(f"'{comfyui_path}' added to sys.path")
|
|
|
|
|
|
|
|
|
def add_extra_model_paths() -> None:
|
|
|
"""
|
|
|
Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
|
|
|
"""
|
|
|
try:
|
|
|
from main import load_extra_path_config
|
|
|
except ImportError:
|
|
|
print(
|
|
|
"Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead."
|
|
|
)
|
|
|
from utils.extra_config import load_extra_path_config
|
|
|
|
|
|
extra_model_paths = find_path("extra_model_paths.yaml")
|
|
|
|
|
|
if extra_model_paths is not None:
|
|
|
load_extra_path_config(extra_model_paths)
|
|
|
else:
|
|
|
print("Could not find the extra_model_paths config file.")
|
|
|
|
|
|
|
|
|
add_comfyui_directory_to_sys_path()
|
|
|
add_extra_model_paths()
|
|
|
|
|
|
|
|
|
def import_custom_nodes() -> None:
|
|
|
"""Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
|
|
|
|
|
|
This function sets up a new asyncio event loop, initializes the PromptServer,
|
|
|
creates a PromptQueue, and initializes the custom nodes.
|
|
|
"""
|
|
|
import asyncio
|
|
|
import execution
|
|
|
from nodes import init_extra_nodes
|
|
|
import server
|
|
|
|
|
|
|
|
|
loop = asyncio.new_event_loop()
|
|
|
asyncio.set_event_loop(loop)
|
|
|
|
|
|
|
|
|
server_instance = server.PromptServer(loop)
|
|
|
execution.PromptQueue(server_instance)
|
|
|
|
|
|
|
|
|
|
|
|
loop.run_until_complete(init_extra_nodes())
|
|
|
|
|
|
import_custom_nodes()
|
|
|
from nodes import NODE_CLASS_MAPPINGS
|
|
|
|
|
|
|
|
|
loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
|
|
|
reactorfaceswap = NODE_CLASS_MAPPINGS["ReActorFaceSwap"]()
|
|
|
saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
|
|
|
|
|
|
|
|
|
def generate_image(source_image, target_image, target_index, swap_model, face_restore_model, restore_strength):
|
|
|
with torch.inference_mode():
|
|
|
loadimage_2 = loadimage.load_image(image=source_image)
|
|
|
loadimage_3 = loadimage.load_image(image=target_image)
|
|
|
|
|
|
reactorfaceswap_76 = reactorfaceswap.execute(
|
|
|
enabled=True,
|
|
|
swap_model=swap_model,
|
|
|
facedetection="YOLOv5l",
|
|
|
face_restore_model=face_restore_model,
|
|
|
face_restore_visibility=restore_strength,
|
|
|
codeformer_weight=0.5,
|
|
|
detect_gender_input="no",
|
|
|
detect_gender_source="no",
|
|
|
input_faces_index=str(target_index),
|
|
|
source_faces_index="0",
|
|
|
console_log_level=1,
|
|
|
input_image=get_value_at_index(loadimage_3, 0),
|
|
|
source_image=get_value_at_index(loadimage_2, 0),
|
|
|
)
|
|
|
|
|
|
saveimage_77 = saveimage.save_images(
|
|
|
filename_prefix="ComfyUI",
|
|
|
images=get_value_at_index(reactorfaceswap_76, 0),
|
|
|
)
|
|
|
|
|
|
saved_path = f"output/{saveimage_77['ui']['images'][0]['filename']}"
|
|
|
return saved_path
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
with gr.Blocks() as app:
|
|
|
with gr.Row():
|
|
|
with gr.Column():
|
|
|
|
|
|
with gr.Row():
|
|
|
|
|
|
with gr.Group():
|
|
|
source_image = gr.Image(label="Source (Face)", type="filepath")
|
|
|
swap_model = gr.Dropdown(
|
|
|
choices=["inswapper_128.onnx", "hyperswap_1a_256.onnx", "hyperswap_1b_256.onnx", "hyperswap_1c_256.onnx"],
|
|
|
value="hyperswap_1b_256.onnx",
|
|
|
label="Swap Model"
|
|
|
)
|
|
|
face_restore_model = gr.Dropdown(
|
|
|
choices=["none", "GPEN-BFR-512.onnx"],
|
|
|
value="none",
|
|
|
label="Face Restore Model"
|
|
|
)
|
|
|
restore_strength = gr.Slider(
|
|
|
minimum=0,
|
|
|
maximum=1,
|
|
|
step=0.05,
|
|
|
value=0.7,
|
|
|
label="Face Restore Strength"
|
|
|
)
|
|
|
|
|
|
|
|
|
with gr.Group():
|
|
|
target_image = gr.Image(label="Target (Body)", type="filepath")
|
|
|
target_index = gr.Dropdown(
|
|
|
choices=[0, 1, 2, 3, 4],
|
|
|
value=0,
|
|
|
label="Target Face Index"
|
|
|
)
|
|
|
gr.Markdown("Index_0 = Largest Face. To switch for another target face - switch to Index_1, Index_2, e.t.c")
|
|
|
generate_btn = gr.Button("Generate")
|
|
|
|
|
|
with gr.Column():
|
|
|
output_image = gr.Image(label="Generated Image")
|
|
|
|
|
|
with gr.Row():
|
|
|
with gr.Column(scale=1):
|
|
|
gr.Markdown("***Hyperswap_1b_256.onnx is the best (in most cases) - but sometimes model produce FAIL swap (do not do any swapping). It's known inner bug.")
|
|
|
gr.Markdown("***Hyperswap models do not need Face Restorer - use it with None. Inswapper_128 need Face Restorer - use it with GPEN-BFR-512 at strength 0.7-0.8.")
|
|
|
gr.Markdown("*** This Space uses only CPU. You have unlimited usage in HF Spaces on CPU.")
|
|
|
gr.Markdown("*** For avoiding queue - duplicate this space to your account (it's free). Top right corner - Three dots - Duplicate this Space. Make them Private. Enjoy!")
|
|
|
gr.Markdown(
|
|
|
"***ComfyUI Reactor Fast Face Swap Hyperswap running directly on Gradio. - "
|
|
|
"[How to convert your any ComfyUI workflow to Gradio]"
|
|
|
"(https://huggingface.co/blog/run-comfyui-workflows-on-spaces)"
|
|
|
)
|
|
|
|
|
|
|
|
|
generate_btn.click(
|
|
|
fn=generate_image,
|
|
|
inputs=[source_image, target_image, target_index, swap_model, face_restore_model, restore_strength],
|
|
|
outputs=[output_image]
|
|
|
)
|
|
|
|
|
|
app.launch(share=True)
|
|
|
|