Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- dc.py +6 -4
- modutils.py +4 -3
dc.py
CHANGED
|
@@ -316,7 +316,7 @@ class GuiSD:
|
|
| 316 |
retain_task_model_in_cache=False,
|
| 317 |
#device="cpu",
|
| 318 |
)
|
| 319 |
-
self.model.device = torch.device("cpu")
|
| 320 |
|
| 321 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
| 322 |
progress(0, desc="Start inference...")
|
|
@@ -700,7 +700,7 @@ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_pat
|
|
| 700 |
normalize_prompt_list, get_civitai_info, search_lora_on_civitai)
|
| 701 |
|
| 702 |
sd_gen = GuiSD()
|
| 703 |
-
|
| 704 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 705 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
| 706 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
|
@@ -748,7 +748,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 748 |
return output_image
|
| 749 |
|
| 750 |
|
| 751 |
-
|
| 752 |
def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 753 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
| 754 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
|
@@ -809,7 +809,9 @@ def get_t2i_model_info(repo_id: str):
|
|
| 809 |
info = []
|
| 810 |
url = f"https://huggingface.co/{repo_id}/"
|
| 811 |
if not 'diffusers' in tags: return ""
|
| 812 |
-
if 'diffusers:
|
|
|
|
|
|
|
| 813 |
info.append("SDXL")
|
| 814 |
elif 'diffusers:StableDiffusionPipeline' in tags:
|
| 815 |
info.append("SD1.5")
|
|
|
|
| 316 |
retain_task_model_in_cache=False,
|
| 317 |
#device="cpu",
|
| 318 |
)
|
| 319 |
+
self.model.device = torch.device("cpu") #
|
| 320 |
|
| 321 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
| 322 |
progress(0, desc="Start inference...")
|
|
|
|
| 700 |
normalize_prompt_list, get_civitai_info, search_lora_on_civitai)
|
| 701 |
|
| 702 |
sd_gen = GuiSD()
|
| 703 |
+
#@spaces.GPU
|
| 704 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 705 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
| 706 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
|
|
|
| 748 |
return output_image
|
| 749 |
|
| 750 |
|
| 751 |
+
#@spaces.GPU
|
| 752 |
def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 753 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
| 754 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
|
|
|
| 809 |
info = []
|
| 810 |
url = f"https://huggingface.co/{repo_id}/"
|
| 811 |
if not 'diffusers' in tags: return ""
|
| 812 |
+
if 'diffusers:FluxPipeline' in tags:
|
| 813 |
+
info.append("FLUX.1")
|
| 814 |
+
elif 'diffusers:StableDiffusionXLPipeline' in tags:
|
| 815 |
info.append("SDXL")
|
| 816 |
elif 'diffusers:StableDiffusionPipeline' in tags:
|
| 817 |
info.append("SD1.5")
|
modutils.py
CHANGED
|
@@ -39,7 +39,6 @@ def get_local_model_list(dir_path):
|
|
| 39 |
|
| 40 |
def download_things(directory, url, hf_token="", civitai_api_key=""):
|
| 41 |
url = url.strip()
|
| 42 |
-
|
| 43 |
if "drive.google.com" in url:
|
| 44 |
original_dir = os.getcwd()
|
| 45 |
os.chdir(directory)
|
|
@@ -200,8 +199,8 @@ def get_model_id_list():
|
|
| 200 |
anime_models = []
|
| 201 |
real_models = []
|
| 202 |
for model in models_ex:
|
| 203 |
-
if not model.private:
|
| 204 |
-
anime_models.append(model.id) if
|
| 205 |
model_ids.extend(anime_models)
|
| 206 |
model_ids.extend(real_models)
|
| 207 |
model_id_list = model_ids.copy()
|
|
@@ -252,6 +251,8 @@ def get_tupled_model_list(model_list):
|
|
| 252 |
tags = model.tags
|
| 253 |
info = []
|
| 254 |
if not 'diffusers' in tags: continue
|
|
|
|
|
|
|
| 255 |
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
| 256 |
info.append("SDXL")
|
| 257 |
elif 'diffusers:StableDiffusionPipeline' in tags:
|
|
|
|
| 39 |
|
| 40 |
def download_things(directory, url, hf_token="", civitai_api_key=""):
|
| 41 |
url = url.strip()
|
|
|
|
| 42 |
if "drive.google.com" in url:
|
| 43 |
original_dir = os.getcwd()
|
| 44 |
os.chdir(directory)
|
|
|
|
| 199 |
anime_models = []
|
| 200 |
real_models = []
|
| 201 |
for model in models_ex:
|
| 202 |
+
if not model.private and not model.gated and "diffusers:FluxPipeline" not in model.tags:
|
| 203 |
+
anime_models.append(model.id) if "anime" in model.tags else real_models.append(model.id)
|
| 204 |
model_ids.extend(anime_models)
|
| 205 |
model_ids.extend(real_models)
|
| 206 |
model_id_list = model_ids.copy()
|
|
|
|
| 251 |
tags = model.tags
|
| 252 |
info = []
|
| 253 |
if not 'diffusers' in tags: continue
|
| 254 |
+
if 'diffusers:FluxPipeline' in tags:
|
| 255 |
+
info.append("FLUX.1")
|
| 256 |
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
| 257 |
info.append("SDXL")
|
| 258 |
elif 'diffusers:StableDiffusionPipeline' in tags:
|