Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files
app.py
CHANGED
|
@@ -381,12 +381,12 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css) as demo:
|
|
| 381 |
)
|
| 382 |
|
| 383 |
model_name.change(update_task_options, [model_name, task], [task], queue=False, show_api=False)\
|
| 384 |
-
.success(update_task_options, [model_name, task_sel], [task_sel], queue=False, show_api=False)
|
|
|
|
| 385 |
task_sel.select(lambda x: x, [task_sel], [task], queue=False, show_api=False)
|
| 386 |
task.change(change_preprocessor_choices, [task], [preprocessor_name], queue=False, show_api=False)\
|
| 387 |
.success(lambda x: x, [task], [task_sel], queue=False, show_api=False)
|
| 388 |
active_textual_inversion.change(update_textual_inversion, [active_textual_inversion, model_name], [use_textual_inversion], queue=False, show_api=False)
|
| 389 |
-
model_name.change(update_textual_inversion, [active_textual_inversion, model_name], [use_textual_inversion], queue=False, show_api=False)
|
| 390 |
use_textual_inversion.change(set_textual_inversion_prompt, [use_textual_inversion, prompt, negative_prompt, prompt_syntax], [prompt, negative_prompt])
|
| 391 |
result_to_cm_button.click(lambda x: x, [result], [image_base], queue=False, show_api=False)
|
| 392 |
result_to_ic_button.click(lambda x: x, [result], [image_control], queue=False, show_api=False)
|
|
|
|
| 381 |
)
|
| 382 |
|
| 383 |
model_name.change(update_task_options, [model_name, task], [task], queue=False, show_api=False)\
|
| 384 |
+
.success(update_task_options, [model_name, task_sel], [task_sel], queue=False, show_api=False)\
|
| 385 |
+
.success(update_textual_inversion, [active_textual_inversion, model_name], [use_textual_inversion], queue=False, show_api=False)
|
| 386 |
task_sel.select(lambda x: x, [task_sel], [task], queue=False, show_api=False)
|
| 387 |
task.change(change_preprocessor_choices, [task], [preprocessor_name], queue=False, show_api=False)\
|
| 388 |
.success(lambda x: x, [task], [task_sel], queue=False, show_api=False)
|
| 389 |
active_textual_inversion.change(update_textual_inversion, [active_textual_inversion, model_name], [use_textual_inversion], queue=False, show_api=False)
|
|
|
|
| 390 |
use_textual_inversion.change(set_textual_inversion_prompt, [use_textual_inversion, prompt, negative_prompt, prompt_syntax], [prompt, negative_prompt])
|
| 391 |
result_to_cm_button.click(lambda x: x, [result], [image_base], queue=False, show_api=False)
|
| 392 |
result_to_ic_button.click(lambda x: x, [result], [image_control], queue=False, show_api=False)
|
dc.py
CHANGED
|
@@ -672,7 +672,7 @@ def dummy_gpu():
|
|
| 672 |
return None
|
| 673 |
|
| 674 |
|
| 675 |
-
def sd_gen_generate_pipeline(*args):
|
| 676 |
gpu_duration_arg = int(args[-1]) if args[-1] else 59
|
| 677 |
verbose_arg = int(args[-2])
|
| 678 |
load_lora_cpu = args[-3]
|
|
@@ -783,7 +783,7 @@ def process_upscale(image, upscaler_name, upscaler_size):
|
|
| 783 |
# https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
|
| 784 |
# dynamic_gpu_duration.zerogpu = True
|
| 785 |
# sd_gen_generate_pipeline.zerogpu = True
|
| 786 |
-
sd_gen = GuiSD()
|
| 787 |
|
| 788 |
|
| 789 |
from pathlib import Path
|
|
@@ -794,6 +794,8 @@ import random
|
|
| 794 |
import json
|
| 795 |
import shutil
|
| 796 |
import gc
|
|
|
|
|
|
|
| 797 |
from tagger.tagger import insert_model_recom_prompt
|
| 798 |
from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path, valid_model_name, set_textual_inversion_prompt,
|
| 799 |
get_local_model_list, get_model_pipeline, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
|
|
@@ -803,6 +805,21 @@ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_pat
|
|
| 803 |
apply_lora_prompt, update_loras, search_civitai_lora, search_civitai_lora_json, update_civitai_selection, select_civitai_lora)
|
| 804 |
|
| 805 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 806 |
#@spaces.GPU
|
| 807 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 808 |
model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
|
|
@@ -877,13 +894,11 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 877 |
lora6 = get_valid_lora_path(lora6)
|
| 878 |
lora7 = get_valid_lora_path(lora7)
|
| 879 |
progress(1, desc="Preparation completed. Starting inference...")
|
| 880 |
-
|
| 881 |
progress(0, desc="Loading model...")
|
| 882 |
-
|
| 883 |
-
pass
|
| 884 |
progress(1, desc="Model loaded.")
|
| 885 |
progress(0, desc="Starting Inference...")
|
| 886 |
-
for info_state, stream_images, info_images in sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
| 887 |
guidance_scale, clip_skip, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
| 888 |
lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, sampler, schedule_type, schedule_prediction_type,
|
| 889 |
height, width, model_name, vae, task, image_control, preprocessor_name, preprocess_resolution, image_resolution,
|
|
|
|
| 672 |
return None
|
| 673 |
|
| 674 |
|
| 675 |
+
def sd_gen_generate_pipeline(sd_gen, *args):
|
| 676 |
gpu_duration_arg = int(args[-1]) if args[-1] else 59
|
| 677 |
verbose_arg = int(args[-2])
|
| 678 |
load_lora_cpu = args[-3]
|
|
|
|
| 783 |
# https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
|
| 784 |
# dynamic_gpu_duration.zerogpu = True
|
| 785 |
# sd_gen_generate_pipeline.zerogpu = True
|
| 786 |
+
#sd_gen = GuiSD()
|
| 787 |
|
| 788 |
|
| 789 |
from pathlib import Path
|
|
|
|
| 794 |
import json
|
| 795 |
import shutil
|
| 796 |
import gc
|
| 797 |
+
import threading
|
| 798 |
+
from collections import defaultdict
|
| 799 |
from tagger.tagger import insert_model_recom_prompt
|
| 800 |
from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path, valid_model_name, set_textual_inversion_prompt,
|
| 801 |
get_local_model_list, get_model_pipeline, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
|
|
|
|
| 805 |
apply_lora_prompt, update_loras, search_civitai_lora, search_civitai_lora_json, update_civitai_selection, select_civitai_lora)
|
| 806 |
|
| 807 |
|
| 808 |
+
_PIPELINES = {}
|
| 809 |
+
_LOCKS = defaultdict(threading.Lock)
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
def get_gsd(repo_id, vae, task, controlnet_model):
|
| 813 |
+
with _LOCKS[repo_id]:
|
| 814 |
+
gsd = _PIPELINES.get(repo_id)
|
| 815 |
+
if gsd is None:
|
| 816 |
+
gsd = GuiSD()
|
| 817 |
+
for _ in gsd.load_new_model(repo_id, vae, task, controlnet_model):
|
| 818 |
+
pass
|
| 819 |
+
_PIPELINES[repo_id] = gsd
|
| 820 |
+
return gsd
|
| 821 |
+
|
| 822 |
+
|
| 823 |
#@spaces.GPU
|
| 824 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 825 |
model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
|
|
|
|
| 894 |
lora6 = get_valid_lora_path(lora6)
|
| 895 |
lora7 = get_valid_lora_path(lora7)
|
| 896 |
progress(1, desc="Preparation completed. Starting inference...")
|
|
|
|
| 897 |
progress(0, desc="Loading model...")
|
| 898 |
+
gsd = get_gsd(valid_model_name(model_name), vae, task, controlnet_model)
|
|
|
|
| 899 |
progress(1, desc="Model loaded.")
|
| 900 |
progress(0, desc="Starting Inference...")
|
| 901 |
+
for info_state, stream_images, info_images in sd_gen_generate_pipeline(gsd, prompt, negative_prompt, 1, num_inference_steps,
|
| 902 |
guidance_scale, clip_skip, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
| 903 |
lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, sampler, schedule_type, schedule_prediction_type,
|
| 904 |
height, width, model_name, vae, task, image_control, preprocessor_name, preprocess_resolution, image_resolution,
|