Files changed (9) hide show
  1. .gitignore +0 -207
  2. README.md +1 -1
  3. app.py +245 -440
  4. constants.py +46 -185
  5. image_processor.py +2 -2
  6. packages.txt +1 -1
  7. pre-requirements.txt +0 -1
  8. requirements.txt +3 -11
  9. utils.py +485 -714
.gitignore DELETED
@@ -1,207 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[codz]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py.cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # UV
98
- # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- #uv.lock
102
-
103
- # poetry
104
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
- # This is especially recommended for binary packages to ensure reproducibility, and is more
106
- # commonly ignored for libraries.
107
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
- #poetry.lock
109
- #poetry.toml
110
-
111
- # pdm
112
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
- # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
- # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
- #pdm.lock
116
- #pdm.toml
117
- .pdm-python
118
- .pdm-build/
119
-
120
- # pixi
121
- # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
- #pixi.lock
123
- # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
- # in the .venv directory. It is recommended not to include this directory in version control.
125
- .pixi
126
-
127
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
- __pypackages__/
129
-
130
- # Celery stuff
131
- celerybeat-schedule
132
- celerybeat.pid
133
-
134
- # SageMath parsed files
135
- *.sage.py
136
-
137
- # Environments
138
- .env
139
- .envrc
140
- .venv
141
- env/
142
- venv/
143
- ENV/
144
- env.bak/
145
- venv.bak/
146
-
147
- # Spyder project settings
148
- .spyderproject
149
- .spyproject
150
-
151
- # Rope project settings
152
- .ropeproject
153
-
154
- # mkdocs documentation
155
- /site
156
-
157
- # mypy
158
- .mypy_cache/
159
- .dmypy.json
160
- dmypy.json
161
-
162
- # Pyre type checker
163
- .pyre/
164
-
165
- # pytype static type analyzer
166
- .pytype/
167
-
168
- # Cython debug symbols
169
- cython_debug/
170
-
171
- # PyCharm
172
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
- # and can be added to the global gitignore or merged into this file. For a more nuclear
175
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
- #.idea/
177
-
178
- # Abstra
179
- # Abstra is an AI-powered process automation framework.
180
- # Ignore directories containing user credentials, local state, and settings.
181
- # Learn more at https://abstra.io/docs
182
- .abstra/
183
-
184
- # Visual Studio Code
185
- # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
- # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
- # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
- # you could uncomment the following to ignore the entire vscode folder
189
- # .vscode/
190
-
191
- # Ruff stuff:
192
- .ruff_cache/
193
-
194
- # PyPI configuration file
195
- .pypirc
196
-
197
- # Cursor
198
- # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
- # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
- # refer to https://docs.cursor.com/context/ignore-files
201
- .cursorignore
202
- .cursorindexingignore
203
-
204
- # Marimo
205
- marimo/_static/
206
- marimo/_lsp/
207
- __marimo__/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🧩🖼️
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: true
10
  license: mit
 
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.31.3
8
  app_file: app.py
9
  pinned: true
10
  license: mit
app.py CHANGED
@@ -1,22 +1,17 @@
1
  import spaces
2
  import os
3
- from argparse import ArgumentParser
4
  from stablepy import (
5
  Model_Diffusers,
6
  SCHEDULE_TYPE_OPTIONS,
7
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
8
  check_scheduler_compatibility,
9
  TASK_AND_PREPROCESSORS,
10
- FACE_RESTORATION_MODELS,
11
- PROMPT_WEIGHT_OPTIONS_PRIORITY,
12
- scheduler_names,
13
  )
14
  from constants import (
15
  DIRECTORY_MODELS,
16
  DIRECTORY_LORAS,
17
  DIRECTORY_VAES,
18
  DIRECTORY_EMBEDS,
19
- DIRECTORY_UPSCALERS,
20
  DOWNLOAD_MODEL,
21
  DOWNLOAD_VAE,
22
  DOWNLOAD_LORA,
@@ -40,16 +35,16 @@ from constants import (
40
  EXAMPLES_GUI,
41
  RESOURCES,
42
  DIFFUSERS_CONTROLNET_MODEL,
43
- IP_MODELS,
44
- MODE_IP_OPTIONS,
45
- CACHE_HF_ROOT,
46
- CACHE_HF,
47
  )
48
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
49
  import torch
50
  import re
 
 
 
 
 
51
  import time
52
- import threading
53
  from PIL import ImageFile
54
  from utils import (
55
  download_things,
@@ -65,7 +60,6 @@ from utils import (
65
  progress_step_bar,
66
  html_template_message,
67
  escape_html,
68
- clear_hf_cache,
69
  )
70
  from image_processor import preprocessor_tab
71
  from datetime import datetime
@@ -76,36 +70,31 @@ import warnings
76
  from stablepy import logger
77
  from diffusers import FluxPipeline
78
  # import urllib.parse
79
- import subprocess
80
-
81
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
82
- HIDE_API = bool(os.getenv("HIDE_API"))
83
- if IS_ZERO_GPU:
84
- subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
85
- IS_GPU_MODE = True if IS_ZERO_GPU else (True if torch.cuda.is_available() else False)
86
- img_path = "./images/"
87
- allowed_path = os.path.abspath(img_path)
88
- delete_cache_time = (9600, 9600) if IS_ZERO_GPU else (86400, 86400)
89
 
90
  ImageFile.LOAD_TRUNCATED_IMAGES = True
91
  torch.backends.cuda.matmul.allow_tf32 = True
92
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
 
93
 
94
- directories = [DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS, DIRECTORY_UPSCALERS]
95
  for directory in directories:
96
  os.makedirs(directory, exist_ok=True)
97
 
98
  # Download stuffs
99
  for url in [url.strip() for url in DOWNLOAD_MODEL.split(',')]:
100
- download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
 
101
  for url in [url.strip() for url in DOWNLOAD_VAE.split(',')]:
102
- download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
 
103
  for url in [url.strip() for url in DOWNLOAD_LORA.split(',')]:
104
- download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
 
105
 
106
  # Download Embeddings
107
  for url_embed in DOWNLOAD_EMBEDS:
108
- download_things(DIRECTORY_EMBEDS, url_embed, HF_TOKEN, CIVITAI_API_KEY)
 
109
 
110
  # Build list models
111
  embed_list = get_model_list(DIRECTORY_EMBEDS)
@@ -123,16 +112,15 @@ vae_model_list.insert(0, "None")
123
 
124
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
125
 
126
- components = None
127
- if IS_ZERO_GPU:
128
- flux_repo = "camenduru/FLUX.1-dev-diffusers"
129
- flux_pipe = FluxPipeline.from_pretrained(
130
- flux_repo,
131
- transformer=None,
132
- torch_dtype=torch.bfloat16,
133
- ).to("cuda")
134
- components = flux_pipe.components
135
- delete_model(flux_repo)
136
 
137
  #######################
138
  # GUI
@@ -142,17 +130,7 @@ diffusers.utils.logging.set_verbosity(40)
142
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
143
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
144
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
145
-
146
- parser = ArgumentParser(description='DiffuseCraft: Create images from text prompts.', add_help=True)
147
- parser.add_argument("--share", action="store_true", dest="share_enabled", default=False, help="Enable sharing")
148
- parser.add_argument('--theme', type=str, default="NoCrypt/miku", help='Set the theme (default: NoCrypt/miku)')
149
- parser.add_argument("--ssr", action="store_true", help="Enable SSR (Server-Side Rendering)")
150
- parser.add_argument("--log-level", type=str, default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], help="Set logging level (default: INFO)")
151
- args = parser.parse_args()
152
-
153
- logger.setLevel(
154
- "INFO" if IS_ZERO_GPU else getattr(logging, args.log_level.upper())
155
- )
156
 
157
  CSS = """
158
  .contain { display: flex; flex-direction: column; }
@@ -162,12 +140,6 @@ CSS = """
162
  """
163
 
164
 
165
- def lora_chk(lora_):
166
- if isinstance(lora_, str) and lora_.strip() not in ["", "None"]:
167
- return lora_
168
- return None
169
-
170
-
171
  class GuiSD:
172
  def __init__(self, stream=True):
173
  self.model = None
@@ -176,26 +148,13 @@ class GuiSD:
176
  self.last_load = datetime.now()
177
  self.inventory = []
178
 
179
- # Avoid duplicate downloads
180
- self.active_downloads = set()
181
- self.download_lock = threading.Lock()
182
-
183
- def update_storage_models(self, storage_floor_gb=30, required_inventory_for_purge=3):
184
  while get_used_storage_gb() > storage_floor_gb:
185
  if len(self.inventory) < required_inventory_for_purge:
186
  break
187
  removal_candidate = self.inventory.pop(0)
188
  delete_model(removal_candidate)
189
 
190
- # Cleanup after 60 seconds of inactivity
191
- lowPrioCleanup = max((datetime.now() - self.last_load).total_seconds(), 0) > 120
192
- if lowPrioCleanup and (len(self.inventory) >= required_inventory_for_purge - 1) and not self.status_loading and get_used_storage_gb(CACHE_HF_ROOT) > (storage_floor_gb * 2):
193
- print("Cleaning up Hugging Face cache...")
194
- clear_hf_cache()
195
- self.inventory = [
196
- m for m in self.inventory if os.path.exists(m)
197
- ]
198
-
199
  def update_inventory(self, model_name):
200
  if model_name not in single_file_model_list:
201
  self.inventory = [
@@ -205,48 +164,23 @@ class GuiSD:
205
 
206
  def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
207
 
208
- lock_key = model_name
209
-
210
- while True:
211
- with self.download_lock:
212
- if lock_key not in self.active_downloads:
213
- self.active_downloads.add(lock_key)
214
- break
215
-
216
- yield f"Waiting for existing download to finish: {model_name}..."
217
- time.sleep(1)
218
 
219
- try:
220
- # download link model > model_name
221
- if model_name.startswith("http"):
222
- yield f"Downloading model: {model_name}"
223
- model_name = download_things(DIRECTORY_MODELS, model_name, HF_TOKEN, CIVITAI_API_KEY)
224
- if not model_name:
225
- raise ValueError("Error retrieving model information from URL")
226
-
227
- if IS_ZERO_GPU:
228
- self.update_storage_models()
229
-
230
- vae_model = vae_model if vae_model != "None" else None
231
- model_type = get_model_type(model_name)
232
- dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
233
-
234
- if not os.path.exists(model_name):
235
- logger.debug(f"model_name={model_name}, vae_model={vae_model}, task={task}, controlnet_model={controlnet_model}")
236
- _ = download_diffuser_repo(
237
- repo_name=model_name,
238
- model_type=model_type,
239
- revision="main",
240
- token=True,
241
- )
242
 
243
- self.update_inventory(model_name)
 
 
 
 
 
 
 
 
 
 
244
 
245
- except Exception as e:
246
- raise e
247
- finally:
248
- with self.download_lock:
249
- self.active_downloads.discard(lock_key)
250
 
251
  for i in range(68):
252
  if not self.status_loading:
@@ -264,7 +198,10 @@ class GuiSD:
264
  yield f"Loading model: {model_name}"
265
 
266
  if vae_model == "BakedVAE":
267
- vae_model = model_name
 
 
 
268
  elif vae_model:
269
  vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
270
  if model_type != vae_type:
@@ -283,10 +220,10 @@ class GuiSD:
283
  type_model_precision=dtype_model,
284
  retain_task_model_in_cache=False,
285
  controlnet_model=controlnet_model,
286
- device="cpu" if IS_ZERO_GPU else None,
287
  env_components=components,
288
  )
289
- self.model.advanced_params(image_preprocessor_cuda_active=IS_GPU_MODE)
290
  else:
291
  if self.model.base_model_id != model_name:
292
  load_now_time = datetime.now()
@@ -296,8 +233,7 @@ class GuiSD:
296
  print("Waiting for the previous model's time ops...")
297
  time.sleep(9 - elapsed_time)
298
 
299
- if IS_ZERO_GPU:
300
- self.model.device = torch.device("cpu")
301
  self.model.load_pipe(
302
  model_name,
303
  task_name=TASK_STABLEPY[task],
@@ -374,8 +310,8 @@ class GuiSD:
374
  syntax_weights,
375
  upscaler_model_path,
376
  upscaler_increases_size,
377
- upscaler_tile_size,
378
- upscaler_tile_overlap,
379
  hires_steps,
380
  hires_denoising_strength,
381
  hires_sampler,
@@ -400,7 +336,7 @@ class GuiSD:
400
  t2i_adapter_preprocessor,
401
  t2i_adapter_conditioning_scale,
402
  t2i_adapter_conditioning_factor,
403
- enable_live_preview,
404
  freeu,
405
  generator_in_cpu,
406
  adetailer_inpaint_only,
@@ -439,9 +375,6 @@ class GuiSD:
439
  mode_ip2,
440
  scale_ip2,
441
  pag_scale,
442
- face_restoration_model,
443
- face_restoration_visibility,
444
- face_restoration_weight,
445
  ):
446
  info_state = html_template_message("Navigating latent space...")
447
  yield info_state, gr.update(), gr.update()
@@ -451,7 +384,7 @@ class GuiSD:
451
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
452
  msg_lora = ""
453
 
454
- logger.debug(f"Config model: {model_name}, {vae_model}, {loras_list}")
455
 
456
  task = TASK_STABLEPY[task]
457
 
@@ -480,20 +413,23 @@ class GuiSD:
480
  self.model.stream_config(concurrency=concurrency, latent_resize_by=1, vae_decoding=False)
481
 
482
  if task != "txt2img" and not image_control:
483
- raise ValueError("Reference image is required. Please upload one in 'Image ControlNet/Inpaint/Img2img'.")
484
 
485
- if task in ["inpaint", "repaint"] and not image_mask:
486
- raise ValueError("Mask image not found. Upload one in 'Image Mask' to proceed.")
487
 
488
- if "https://" not in str(UPSCALER_DICT_GUI[upscaler_model_path]):
489
  upscaler_model = upscaler_model_path
490
  else:
 
 
 
491
  url_upscaler = UPSCALER_DICT_GUI[upscaler_model_path]
492
 
493
- if not os.path.exists(f"./{DIRECTORY_UPSCALERS}/{url_upscaler.split('/')[-1]}"):
494
- download_things(DIRECTORY_UPSCALERS, url_upscaler, HF_TOKEN)
495
 
496
- upscaler_model = f"./{DIRECTORY_UPSCALERS}/{url_upscaler.split('/')[-1]}"
497
 
498
  logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
499
 
@@ -549,26 +485,26 @@ class GuiSD:
549
  "distance_threshold": distance_threshold,
550
  "recolor_gamma_correction": float(recolor_gamma_correction),
551
  "tile_blur_sigma": int(tile_blur_sigma),
552
- "lora_A": lora_chk(lora1),
553
  "lora_scale_A": lora_scale1,
554
- "lora_B": lora_chk(lora2),
555
  "lora_scale_B": lora_scale2,
556
- "lora_C": lora_chk(lora3),
557
  "lora_scale_C": lora_scale3,
558
- "lora_D": lora_chk(lora4),
559
  "lora_scale_D": lora_scale4,
560
- "lora_E": lora_chk(lora5),
561
  "lora_scale_E": lora_scale5,
562
- "lora_F": lora_chk(lora6),
563
  "lora_scale_F": lora_scale6,
564
- "lora_G": lora_chk(lora7),
565
  "lora_scale_G": lora_scale7,
566
  "textual_inversion": embed_list if textual_inversion else [],
567
  "syntax_weights": syntax_weights, # "Classic"
568
  "sampler": sampler,
569
  "schedule_type": schedule_type,
570
  "schedule_prediction_type": schedule_prediction_type,
571
- "xformers_memory_efficient_attention": False,
572
  "gui_active": True,
573
  "loop_generation": loop_generation,
574
  "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
@@ -583,7 +519,7 @@ class GuiSD:
583
  "leave_progress_bar": leave_progress_bar,
584
  "disable_progress_bar": disable_progress_bar,
585
  "image_previews": image_previews,
586
- "display_images": False,
587
  "save_generated_images": save_generated_images,
588
  "filename_pattern": filename_pattern,
589
  "image_storage_location": image_storage_location,
@@ -595,8 +531,8 @@ class GuiSD:
595
  "t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
596
  "upscaler_model_path": upscaler_model,
597
  "upscaler_increases_size": upscaler_increases_size,
598
- "upscaler_tile_size": upscaler_tile_size,
599
- "upscaler_tile_overlap": upscaler_tile_overlap,
600
  "hires_steps": hires_steps,
601
  "hires_denoising_strength": hires_denoising_strength,
602
  "hires_prompt": hires_prompt,
@@ -611,19 +547,16 @@ class GuiSD:
611
  "ip_adapter_model": params_ip_model,
612
  "ip_adapter_mode": params_ip_mode,
613
  "ip_adapter_scale": params_ip_scale,
614
- "face_restoration_model": face_restoration_model,
615
- "face_restoration_visibility": face_restoration_visibility,
616
- "face_restoration_weight": face_restoration_weight,
617
  }
618
 
619
  # kwargs for diffusers pipeline
620
  if guidance_rescale:
621
  pipe_params["guidance_rescale"] = guidance_rescale
622
- if IS_ZERO_GPU:
623
- self.model.device = torch.device("cuda:0")
624
- if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
625
- self.model.pipe.transformer.to(self.model.device)
626
- logger.debug("transformer to cuda")
627
 
628
  actual_progress = 0
629
  info_images = gr.update()
@@ -653,20 +586,15 @@ class GuiSD:
653
 
654
  download_links = "<br>".join(
655
  [
656
- f'<a href="{path.replace("/images/", f"/gradio_api/file={allowed_path}/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
657
  for i, path in enumerate(image_path)
658
  ]
659
  )
660
  if save_generated_images:
661
  info_images += f"<br>{download_links}"
662
 
663
- if not display_images:
664
- img = gr.update()
665
  info_state = "COMPLETE"
666
 
667
- elif not enable_live_preview:
668
- img = gr.update()
669
-
670
  yield info_state, img, info_images
671
 
672
 
@@ -766,27 +694,22 @@ def sd_gen_generate_pipeline(*args):
766
 
767
 
768
  @spaces.GPU(duration=15)
769
- def process_upscale(image, upscaler_name, upscaler_size):
770
- if image is None:
771
- return None
772
 
773
  from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
774
- from stablepy import load_upscaler_model
775
 
776
- image = image.convert("RGB")
777
  exif_image = extract_exif_data(image)
778
 
779
- name_upscaler = UPSCALER_DICT_GUI[upscaler_name]
780
-
781
- if "https://" in str(name_upscaler):
 
 
782
 
783
- if not os.path.exists(f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"):
784
- download_things(DIRECTORY_UPSCALERS, name_upscaler, HF_TOKEN)
785
-
786
- name_upscaler = f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"
787
-
788
- scaler_beta = load_upscaler_model(model=name_upscaler, tile=(0 if IS_ZERO_GPU else 192), tile_overlap=8, device=("cuda" if IS_GPU_MODE else "cpu"), half=IS_GPU_MODE)
789
- image_up = scaler_beta.upscale(image, upscaler_size, True)
790
 
791
  image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
792
 
@@ -794,11 +717,11 @@ def process_upscale(image, upscaler_name, upscaler_size):
794
 
795
 
796
  # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
797
- # dynamic_gpu_duration.zerogpu = True
798
- # sd_gen_generate_pipeline.zerogpu = True
799
  sd_gen = GuiSD()
800
 
801
- with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as app:
802
  gr.Markdown("# 🧩 DiffuseCraft")
803
  gr.Markdown(SUBTITLE_GUI)
804
  with gr.Tab("Generation"):
@@ -814,14 +737,10 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
814
 
815
  return gr.update(value=task_name, choices=new_choices)
816
 
817
- with gr.Accordion("Model and Task", open=True, visible=True):
818
- task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
819
- model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
820
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
821
-
822
- with gr.Accordion("Negative prompt", open=False, visible=True):
823
- neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt", value="bad anatomy, ((many hands, bad hands, missing fingers)), anatomical nonsense, ugly, deformed, bad proportions, bad shadow, extra limbs, missing limbs, floating limbs, disconnected limbs, malformed hands, poorly drawn, mutation, mutated hands and fingers, extra legs, interlocked fingers, extra arms, disfigured face, long neck, asymmetrical eyes, lowres, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, duplicate, bad composition, text, worst quality, normal quality, low quality, very displeasing, desaturated, low contrast, muted tones, washed out, unfinished, incomplete, draft, logo, backlighting")
824
-
825
  with gr.Row(equal_height=False):
826
  set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
827
  clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
@@ -851,180 +770,138 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
851
 
852
  actual_task_info = gr.HTML()
853
 
854
- with gr.Row(equal_height=False, variant="default", visible=IS_ZERO_GPU):
855
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
856
  with gr.Column():
857
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
858
  load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
859
 
860
  with gr.Column(scale=1):
861
- with gr.Accordion("Generation settings", open=True, visible=True):
862
- steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
863
- cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
864
- sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
865
- schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
866
- img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=832, label="Img Width")
867
- img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1216, label="Img Height")
868
- seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
869
- pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
870
- with gr.Row():
871
- clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
872
- free_u_gui = gr.Checkbox(value=True, label="FreeU")
873
-
874
- with gr.Row(equal_height=False):
875
- num_images_gui = gr.Slider(minimum=1, maximum=(16 if IS_ZERO_GPU else 20), step=1, value=1, label="Images")
876
- prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[2][1])
877
- vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
878
-
879
-
880
- def run_set_params_gui(base_prompt, name_model):
881
- valid_receptors = { # default values
882
- "prompt": gr.update(value=base_prompt),
883
- "neg_prompt": gr.update(value=""),
884
- "Steps": gr.update(value=30),
885
- "width": gr.update(value=1024),
886
- "height": gr.update(value=1024),
887
- "Seed": gr.update(value=-1),
888
- "Sampler": gr.update(value="Euler"),
889
- "CFG scale": gr.update(value=7.), # cfg
890
- "Clip skip": gr.update(value=True),
891
- "Model": gr.update(value=name_model),
892
- "Schedule type": gr.update(value="Automatic"),
893
- "PAG": gr.update(value=.0),
894
- "FreeU": gr.update(value=False),
895
- "Hires upscaler": gr.update(),
896
- "Hires upscale": gr.update(),
897
- "Hires steps": gr.update(),
898
- "Hires denoising strength": gr.update(),
899
- "Hires CFG": gr.update(),
900
- "Hires sampler": gr.update(),
901
- "Hires schedule type": gr.update(),
902
- "Image resolution": gr.update(value=1024),
903
- "Strength": gr.update(),
904
- "Prompt emphasis": gr.update(),
905
- }
906
-
907
- # Generate up to 7 LoRAs
908
- for i in range(1, 8):
909
- valid_receptors[f"Lora_{i}"] = gr.update()
910
- valid_receptors[f"Lora_scale_{i}"] = gr.update()
911
-
912
- valid_keys = list(valid_receptors.keys())
913
-
914
- parameters = extract_parameters(base_prompt)
915
- # print(parameters)
916
-
917
- if "Sampler" in parameters:
918
- value_sampler = parameters["Sampler"]
919
- for s_type in SCHEDULE_TYPE_OPTIONS:
920
- if s_type in value_sampler:
921
- value_sampler = value_sampler.replace(s_type, "").strip()
922
- parameters["Sampler"] = value_sampler
923
- parameters["Schedule type"] = s_type
924
-
925
- params_lora = []
926
- if ">" in parameters["prompt"] and "<" in parameters["prompt"]:
927
- params_lora = re.findall(r'<lora:[^>]+>', parameters["prompt"])
928
- if "Loras" in parameters:
929
- params_lora += re.findall(r'<lora:[^>]+>', parameters["Loras"])
930
-
931
- if params_lora:
932
- parsed_params = []
933
- for tag_l in params_lora:
934
- try:
935
- inner = tag_l.strip("<>") # remove < >
936
- _, data_l = inner.split(":", 1) # remove the "lora:" part
937
- parts_l = data_l.split(":")
938
-
939
- name_l = parts_l[0]
940
- weight_l = float(parts_l[1]) if len(parts_l) > 1 else 1.0 # default weight = 1.0
941
-
942
- parsed_params.append((name_l, weight_l))
943
- except Exception as e:
944
- print(f"Error parsing LoRA tag {tag_l}: {e}")
945
-
946
- new_lora_model_list = get_model_list(DIRECTORY_LORAS)
947
- new_lora_model_list.insert(0, "None")
948
-
949
- num_lora = 1
950
- for parsed_l, parsed_s in parsed_params:
951
- filtered_loras = [m for m in new_lora_model_list if parsed_l in m]
952
- if filtered_loras:
953
- parameters[f"Lora_{num_lora}"] = filtered_loras[0]
954
- parameters[f"Lora_scale_{num_lora}"] = parsed_s
955
- num_lora += 1
956
-
957
- # continue = discard new value
958
- for key, val in parameters.items():
959
- # print(val)
960
- if key in valid_keys:
961
- try:
962
- if key == "Sampler":
963
- if val not in scheduler_names:
964
- continue
965
- if key in ["Schedule type", "Hires schedule type"]:
966
- if val not in SCHEDULE_TYPE_OPTIONS:
967
- continue
968
- if key == "Hires sampler":
969
- if val not in POST_PROCESSING_SAMPLER:
970
- continue
971
- if key == "Prompt emphasis":
972
- if val not in PROMPT_WEIGHT_OPTIONS_PRIORITY:
973
- continue
974
- elif key == "Clip skip":
975
- if "," in str(val):
976
- val = val.replace(",", "")
977
- if int(val) >= 2:
978
  val = True
979
- if key == "prompt":
980
- if ">" in val and "<" in val:
981
- val = re.sub(r'<[^>]+>', '', val) # Delete html and loras
982
- print("Removed LoRA written in the prompt")
983
- if key in ["prompt", "neg_prompt"]:
984
- val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
985
- if key in ["Steps", "width", "height", "Seed", "Hires steps", "Image resolution"]:
986
- val = int(val)
987
- if key == "FreeU":
988
- val = True
989
- if key in ["CFG scale", "PAG", "Hires upscale", "Hires denoising strength", "Hires CFG", "Strength"]:
990
- val = float(val)
991
- if key == "Model":
992
- filtered_models = [m for m in model_list if val in m]
993
- if filtered_models:
994
- val = filtered_models[0]
995
- else:
996
- val = name_model
997
- if key == "Hires upscaler":
998
- if val not in UPSCALER_KEYS:
999
  continue
1000
- if key == "Seed":
1001
- continue
1002
-
1003
- valid_receptors[key] = gr.update(value=val)
1004
- # print(val, type(val))
1005
- # print(valid_receptors)
1006
- except Exception as e:
1007
- print(str(e))
1008
- return [value for value in valid_receptors.values()]
1009
-
1010
- def run_clear_prompt_gui():
1011
- return gr.update(value=""), gr.update(value="")
1012
- clear_prompt_gui.click(
1013
- run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
1014
- )
 
 
 
 
 
 
 
 
 
1015
 
1016
- def run_set_random_seed():
1017
- return -1
1018
- set_random_seed.click(
1019
- run_set_random_seed, [], seed_gui
1020
- )
 
 
 
 
 
 
 
 
 
 
1021
 
1022
  with gr.Accordion("Hires fix", open=False, visible=True):
1023
 
1024
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
1025
  upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
1026
- upscaler_tile_size_gui = gr.Slider(minimum=0, maximum=512, step=16, value=(0 if IS_ZERO_GPU else 192), label="Upscaler Tile Size", info="0 = no tiling")
1027
- upscaler_tile_overlap_gui = gr.Slider(minimum=0, maximum=48, step=1, value=8, label="Upscaler Tile Overlap")
1028
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
1029
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
1030
  hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
@@ -1040,8 +917,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1040
  return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True, visible=visible)
1041
 
1042
  def lora_scale_slider(label, visible=True):
1043
- val_lora = 8 if IS_ZERO_GPU else 10
1044
- return gr.Slider(minimum=-val_lora, maximum=val_lora, step=0.01, value=0.33, label=label, visible=visible)
1045
 
1046
  lora1_gui = lora_dropdown("Lora1")
1047
  lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
@@ -1053,10 +929,10 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1053
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
1054
  lora5_gui = lora_dropdown("Lora5")
1055
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
1056
- lora6_gui = lora_dropdown("Lora6")
1057
- lora_scale_6_gui = lora_scale_slider("Lora Scale 6")
1058
- lora7_gui = lora_dropdown("Lora7")
1059
- lora_scale_7_gui = lora_scale_slider("Lora Scale 7")
1060
 
1061
  with gr.Accordion("From URL", open=False, visible=True):
1062
  text_lora = gr.Textbox(
@@ -1065,7 +941,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1065
  lines=1,
1066
  info="It has to be .safetensors files, and you can also download them from Hugging Face.",
1067
  )
1068
- romanize_text = gr.Checkbox(value=False, label="Transliterate name", visible=(not IS_ZERO_GPU))
1069
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
1070
  new_lora_status = gr.HTML()
1071
  button_lora.click(
@@ -1074,16 +950,11 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1074
  [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, lora6_gui, lora7_gui, new_lora_status]
1075
  )
1076
 
1077
- with gr.Accordion("Face restoration", open=False, visible=True):
1078
-
1079
- face_rest_options = [None] + FACE_RESTORATION_MODELS
1080
-
1081
- face_restoration_model_gui = gr.Dropdown(label="Face restoration model", choices=face_rest_options, value=face_rest_options[0])
1082
- face_restoration_visibility_gui = gr.Slider(minimum=0., maximum=1., step=0.001, value=1., label="Visibility")
1083
- face_restoration_weight_gui = gr.Slider(minimum=0., maximum=1., step=0.001, value=.5, label="Weight", info="(0 = maximum effect, 1 = minimum effect)")
1084
-
1085
  with gr.Accordion("IP-Adapter", open=False, visible=True):
1086
 
 
 
 
1087
  with gr.Accordion("IP-Adapter 1", open=False, visible=True):
1088
  image_ip1 = gr.Image(label="IP Image", type="filepath")
1089
  mask_ip1 = gr.Image(label="IP Mask", type="filepath")
@@ -1102,13 +973,13 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1102
  image_mask_gui = gr.Image(label="Image Mask", type="filepath")
1103
  strength_gui = gr.Slider(
1104
  minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
1105
- info="This option adjusts the level of changes for img2img, repaint and inpaint."
1106
  )
1107
  image_resolution_gui = gr.Slider(
1108
  minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
1109
  info="The maximum proportional size of the generated image based on the uploaded image."
1110
  )
1111
- controlnet_model_gui = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0], allow_custom_value=True)
1112
  control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
1113
  control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
1114
  control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
@@ -1130,8 +1001,8 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1130
  preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
1131
  low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
1132
  high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
1133
- value_threshold_gui = gr.Slider(minimum=0.0, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1134
- distance_threshold_gui = gr.Slider(minimum=0.0, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1135
  recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1136
  tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
1137
 
@@ -1166,7 +1037,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1166
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1167
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1168
 
1169
- style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1170
 
1171
  with gr.Accordion("Textual inversion", open=False, visible=False):
1172
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
@@ -1212,67 +1083,23 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1212
  schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
1213
  guidance_rescale_gui = gr.Number(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
1214
  save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
1215
- enable_live_preview_gui = gr.Checkbox(value=True, label="Enable live previews")
1216
- display_images_gui = gr.Checkbox(value=True, label="Show final results")
1217
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1218
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1219
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1220
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
1221
- with gr.Column(visible=(not IS_ZERO_GPU)):
1222
- image_storage_location_gui = gr.Textbox(value=img_path, label="Image Storage Location")
1223
- disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1224
- leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1225
 
1226
  with gr.Accordion("More settings", open=False, visible=False):
1227
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1228
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
1229
-
1230
- image_previews_gui = gr.Checkbox(value=True, label="Image Previews (alt)")
 
 
 
1231
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1232
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1233
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1234
-
1235
- set_params_gui.click(
1236
- run_set_params_gui, [prompt_gui, model_name_gui], [
1237
- prompt_gui,
1238
- neg_prompt_gui,
1239
- steps_gui,
1240
- img_width_gui,
1241
- img_height_gui,
1242
- seed_gui,
1243
- sampler_gui,
1244
- cfg_gui,
1245
- clip_skip_gui,
1246
- model_name_gui,
1247
- schedule_type_gui,
1248
- pag_scale_gui,
1249
- free_u_gui,
1250
- upscaler_model_path_gui,
1251
- upscaler_increases_size_gui,
1252
- hires_steps_gui,
1253
- hires_denoising_strength_gui,
1254
- hires_guidance_scale_gui,
1255
- hires_sampler_gui,
1256
- hires_schedule_type_gui,
1257
- image_resolution_gui,
1258
- strength_gui,
1259
- prompt_syntax_gui,
1260
- lora1_gui,
1261
- lora_scale_1_gui,
1262
- lora2_gui,
1263
- lora_scale_2_gui,
1264
- lora3_gui,
1265
- lora_scale_3_gui,
1266
- lora4_gui,
1267
- lora_scale_4_gui,
1268
- lora5_gui,
1269
- lora_scale_5_gui,
1270
- lora6_gui,
1271
- lora_scale_6_gui,
1272
- lora7_gui,
1273
- lora_scale_7_gui,
1274
- ],
1275
- )
1276
 
1277
  with gr.Accordion("Examples and help", open=False, visible=True):
1278
  gr.Markdown(HELP_GUI)
@@ -1329,21 +1156,10 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1329
  # "hsl(360, 120, 120)" # in fact any valid colorstring
1330
  ]
1331
  ),
1332
- eraser=gr.Eraser(default_size="16"),
1333
- render=True,
1334
- visible=False,
1335
- interactive=False,
1336
  )
1337
-
1338
- show_canvas = gr.Button("SHOW INPAINT CANVAS")
1339
-
1340
- def change_visibility_canvas():
1341
- return gr.update(visible=True, interactive=True), gr.update(visible=False)
1342
- show_canvas.click(change_visibility_canvas, [], [image_base, show_canvas])
1343
-
1344
  invert_mask = gr.Checkbox(value=False, label="Invert mask")
1345
  btn = gr.Button("Create mask")
1346
-
1347
  with gr.Column(scale=1):
1348
  img_source = gr.Image(interactive=False)
1349
  img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
@@ -1374,11 +1190,8 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1374
 
1375
  with gr.Row():
1376
  with gr.Column():
1377
-
1378
- USCALER_TAB_KEYS = [name for name in UPSCALER_KEYS[9:]]
1379
-
1380
  image_up_tab = gr.Image(label="Image", type="pil", sources=["upload"])
1381
- upscaler_tab = gr.Dropdown(label="Upscaler", choices=USCALER_TAB_KEYS, value=USCALER_TAB_KEYS[5])
1382
  upscaler_size_tab = gr.Slider(minimum=1., maximum=4., step=0.1, value=1.1, label="Upscale by")
1383
  generate_button_up_tab = gr.Button(value="START UPSCALE", variant="primary")
1384
 
@@ -1386,7 +1199,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1386
  result_up_tab = gr.Image(label="Result", type="pil", interactive=False, format="png")
1387
 
1388
  generate_button_up_tab.click(
1389
- fn=process_upscale,
1390
  inputs=[image_up_tab, upscaler_tab, upscaler_size_tab],
1391
  outputs=[result_up_tab],
1392
  )
@@ -1405,7 +1218,6 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1405
  outputs=[load_model_gui],
1406
  queue=True,
1407
  show_progress="minimal",
1408
- api_name=(False if HIDE_API else None),
1409
  ).success(
1410
  fn=sd_gen_generate_pipeline, # fn=sd_gen.generate_pipeline,
1411
  inputs=[
@@ -1459,8 +1271,8 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1459
  prompt_syntax_gui,
1460
  upscaler_model_path_gui,
1461
  upscaler_increases_size_gui,
1462
- upscaler_tile_size_gui,
1463
- upscaler_tile_overlap_gui,
1464
  hires_steps_gui,
1465
  hires_denoising_strength_gui,
1466
  hires_sampler_gui,
@@ -1485,7 +1297,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1485
  t2i_adapter_preprocessor_gui,
1486
  adapter_conditioning_scale_gui,
1487
  adapter_conditioning_factor_gui,
1488
- enable_live_preview_gui,
1489
  free_u_gui,
1490
  generator_in_cpu_gui,
1491
  adetailer_inpaint_only_gui,
@@ -1524,9 +1336,6 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1524
  mode_ip2,
1525
  scale_ip2,
1526
  pag_scale_gui,
1527
- face_restoration_model_gui,
1528
- face_restoration_visibility_gui,
1529
- face_restoration_weight_gui,
1530
  load_lora_cpu_gui,
1531
  verbose_info_gui,
1532
  gpu_duration_gui,
@@ -1534,16 +1343,12 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1534
  outputs=[load_model_gui, result_images, actual_task_info],
1535
  queue=True,
1536
  show_progress="minimal",
1537
- # api_name=(False if HIDE_API else None),
1538
  )
1539
 
1540
- if __name__ == "__main__":
1541
- app.queue()
1542
- app.launch(
1543
- show_error=True,
1544
- share=args.share_enabled,
1545
- debug=True,
1546
- ssr_mode=args.ssr,
1547
- allowed_paths=[allowed_path],
1548
- show_api=(not HIDE_API),
1549
- )
 
1
  import spaces
2
  import os
 
3
  from stablepy import (
4
  Model_Diffusers,
5
  SCHEDULE_TYPE_OPTIONS,
6
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
7
  check_scheduler_compatibility,
8
  TASK_AND_PREPROCESSORS,
 
 
 
9
  )
10
  from constants import (
11
  DIRECTORY_MODELS,
12
  DIRECTORY_LORAS,
13
  DIRECTORY_VAES,
14
  DIRECTORY_EMBEDS,
 
15
  DOWNLOAD_MODEL,
16
  DOWNLOAD_VAE,
17
  DOWNLOAD_LORA,
 
35
  EXAMPLES_GUI,
36
  RESOURCES,
37
  DIFFUSERS_CONTROLNET_MODEL,
 
 
 
 
38
  )
39
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
40
  import torch
41
  import re
42
+ from stablepy import (
43
+ scheduler_names,
44
+ IP_ADAPTERS_SD,
45
+ IP_ADAPTERS_SDXL,
46
+ )
47
  import time
 
48
  from PIL import ImageFile
49
  from utils import (
50
  download_things,
 
60
  progress_step_bar,
61
  html_template_message,
62
  escape_html,
 
63
  )
64
  from image_processor import preprocessor_tab
65
  from datetime import datetime
 
70
  from stablepy import logger
71
  from diffusers import FluxPipeline
72
  # import urllib.parse
 
 
 
 
 
 
 
 
 
 
73
 
74
  ImageFile.LOAD_TRUNCATED_IMAGES = True
75
  torch.backends.cuda.matmul.allow_tf32 = True
76
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
77
+ print(os.getenv("SPACES_ZERO_GPU"))
78
 
79
+ directories = [DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS]
80
  for directory in directories:
81
  os.makedirs(directory, exist_ok=True)
82
 
83
  # Download stuffs
84
  for url in [url.strip() for url in DOWNLOAD_MODEL.split(',')]:
85
+ if not os.path.exists(f"./models/{url.split('/')[-1]}"):
86
+ download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
87
  for url in [url.strip() for url in DOWNLOAD_VAE.split(',')]:
88
+ if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
89
+ download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
90
  for url in [url.strip() for url in DOWNLOAD_LORA.split(',')]:
91
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
92
+ download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
93
 
94
  # Download Embeddings
95
  for url_embed in DOWNLOAD_EMBEDS:
96
+ if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
97
+ download_things(DIRECTORY_EMBEDS, url_embed, HF_TOKEN, CIVITAI_API_KEY)
98
 
99
  # Build list models
100
  embed_list = get_model_list(DIRECTORY_EMBEDS)
 
112
 
113
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
114
 
115
+ flux_repo = "camenduru/FLUX.1-dev-diffusers"
116
+ flux_pipe = FluxPipeline.from_pretrained(
117
+ flux_repo,
118
+ transformer=None,
119
+ torch_dtype=torch.bfloat16,
120
+ ).to("cuda")
121
+ components = flux_pipe.components
122
+ components.pop("transformer", None)
123
+ delete_model(flux_repo)
 
124
 
125
  #######################
126
  # GUI
 
130
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
131
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
132
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
133
+ logger.setLevel(logging.DEBUG)
 
 
 
 
 
 
 
 
 
 
134
 
135
  CSS = """
136
  .contain { display: flex; flex-direction: column; }
 
140
  """
141
 
142
 
 
 
 
 
 
 
143
  class GuiSD:
144
  def __init__(self, stream=True):
145
  self.model = None
 
148
  self.last_load = datetime.now()
149
  self.inventory = []
150
 
151
+ def update_storage_models(self, storage_floor_gb=24, required_inventory_for_purge=3):
 
 
 
 
152
  while get_used_storage_gb() > storage_floor_gb:
153
  if len(self.inventory) < required_inventory_for_purge:
154
  break
155
  removal_candidate = self.inventory.pop(0)
156
  delete_model(removal_candidate)
157
 
 
 
 
 
 
 
 
 
 
158
  def update_inventory(self, model_name):
159
  if model_name not in single_file_model_list:
160
  self.inventory = [
 
164
 
165
  def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
166
 
167
+ # download link model > model_name
 
 
 
 
 
 
 
 
 
168
 
169
+ self.update_storage_models()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
+ vae_model = vae_model if vae_model != "None" else None
172
+ model_type = get_model_type(model_name)
173
+ dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
174
+
175
+ if not os.path.exists(model_name):
176
+ _ = download_diffuser_repo(
177
+ repo_name=model_name,
178
+ model_type=model_type,
179
+ revision="main",
180
+ token=True,
181
+ )
182
 
183
+ self.update_inventory(model_name)
 
 
 
 
184
 
185
  for i in range(68):
186
  if not self.status_loading:
 
198
  yield f"Loading model: {model_name}"
199
 
200
  if vae_model == "BakedVAE":
201
+ if not os.path.exists(model_name):
202
+ vae_model = model_name
203
+ else:
204
+ vae_model = None
205
  elif vae_model:
206
  vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
207
  if model_type != vae_type:
 
220
  type_model_precision=dtype_model,
221
  retain_task_model_in_cache=False,
222
  controlnet_model=controlnet_model,
223
+ device="cpu",
224
  env_components=components,
225
  )
226
+ self.model.advanced_params(image_preprocessor_cuda_active=True)
227
  else:
228
  if self.model.base_model_id != model_name:
229
  load_now_time = datetime.now()
 
233
  print("Waiting for the previous model's time ops...")
234
  time.sleep(9 - elapsed_time)
235
 
236
+ self.model.device = torch.device("cpu")
 
237
  self.model.load_pipe(
238
  model_name,
239
  task_name=TASK_STABLEPY[task],
 
310
  syntax_weights,
311
  upscaler_model_path,
312
  upscaler_increases_size,
313
+ esrgan_tile,
314
+ esrgan_tile_overlap,
315
  hires_steps,
316
  hires_denoising_strength,
317
  hires_sampler,
 
336
  t2i_adapter_preprocessor,
337
  t2i_adapter_conditioning_scale,
338
  t2i_adapter_conditioning_factor,
339
+ xformers_memory_efficient_attention,
340
  freeu,
341
  generator_in_cpu,
342
  adetailer_inpaint_only,
 
375
  mode_ip2,
376
  scale_ip2,
377
  pag_scale,
 
 
 
378
  ):
379
  info_state = html_template_message("Navigating latent space...")
380
  yield info_state, gr.update(), gr.update()
 
384
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
385
  msg_lora = ""
386
 
387
+ print("Config model:", model_name, vae_model, loras_list)
388
 
389
  task = TASK_STABLEPY[task]
390
 
 
413
  self.model.stream_config(concurrency=concurrency, latent_resize_by=1, vae_decoding=False)
414
 
415
  if task != "txt2img" and not image_control:
416
+ raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
417
 
418
+ if task == "inpaint" and not image_mask:
419
+ raise ValueError("No mask image found: Specify one in 'Image Mask'")
420
 
421
+ if upscaler_model_path in UPSCALER_KEYS[:9]:
422
  upscaler_model = upscaler_model_path
423
  else:
424
+ directory_upscalers = 'upscalers'
425
+ os.makedirs(directory_upscalers, exist_ok=True)
426
+
427
  url_upscaler = UPSCALER_DICT_GUI[upscaler_model_path]
428
 
429
+ if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
430
+ download_things(directory_upscalers, url_upscaler, HF_TOKEN)
431
 
432
+ upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
433
 
434
  logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
435
 
 
485
  "distance_threshold": distance_threshold,
486
  "recolor_gamma_correction": float(recolor_gamma_correction),
487
  "tile_blur_sigma": int(tile_blur_sigma),
488
+ "lora_A": lora1 if lora1 != "None" else None,
489
  "lora_scale_A": lora_scale1,
490
+ "lora_B": lora2 if lora2 != "None" else None,
491
  "lora_scale_B": lora_scale2,
492
+ "lora_C": lora3 if lora3 != "None" else None,
493
  "lora_scale_C": lora_scale3,
494
+ "lora_D": lora4 if lora4 != "None" else None,
495
  "lora_scale_D": lora_scale4,
496
+ "lora_E": lora5 if lora5 != "None" else None,
497
  "lora_scale_E": lora_scale5,
498
+ "lora_F": lora6 if lora6 != "None" else None,
499
  "lora_scale_F": lora_scale6,
500
+ "lora_G": lora7 if lora7 != "None" else None,
501
  "lora_scale_G": lora_scale7,
502
  "textual_inversion": embed_list if textual_inversion else [],
503
  "syntax_weights": syntax_weights, # "Classic"
504
  "sampler": sampler,
505
  "schedule_type": schedule_type,
506
  "schedule_prediction_type": schedule_prediction_type,
507
+ "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
508
  "gui_active": True,
509
  "loop_generation": loop_generation,
510
  "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
 
519
  "leave_progress_bar": leave_progress_bar,
520
  "disable_progress_bar": disable_progress_bar,
521
  "image_previews": image_previews,
522
+ "display_images": display_images,
523
  "save_generated_images": save_generated_images,
524
  "filename_pattern": filename_pattern,
525
  "image_storage_location": image_storage_location,
 
531
  "t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
532
  "upscaler_model_path": upscaler_model,
533
  "upscaler_increases_size": upscaler_increases_size,
534
+ "esrgan_tile": esrgan_tile,
535
+ "esrgan_tile_overlap": esrgan_tile_overlap,
536
  "hires_steps": hires_steps,
537
  "hires_denoising_strength": hires_denoising_strength,
538
  "hires_prompt": hires_prompt,
 
547
  "ip_adapter_model": params_ip_model,
548
  "ip_adapter_mode": params_ip_mode,
549
  "ip_adapter_scale": params_ip_scale,
 
 
 
550
  }
551
 
552
  # kwargs for diffusers pipeline
553
  if guidance_rescale:
554
  pipe_params["guidance_rescale"] = guidance_rescale
555
+
556
+ self.model.device = torch.device("cuda:0")
557
+ if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
558
+ self.model.pipe.transformer.to(self.model.device)
559
+ print("transformer to cuda")
560
 
561
  actual_progress = 0
562
  info_images = gr.update()
 
586
 
587
  download_links = "<br>".join(
588
  [
589
+ f'<a href="{path.replace("/images/", "/file=/home/user/app/images/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
590
  for i, path in enumerate(image_path)
591
  ]
592
  )
593
  if save_generated_images:
594
  info_images += f"<br>{download_links}"
595
 
 
 
596
  info_state = "COMPLETE"
597
 
 
 
 
598
  yield info_state, img, info_images
599
 
600
 
 
694
 
695
 
696
  @spaces.GPU(duration=15)
697
+ def esrgan_upscale(image, upscaler_name, upscaler_size):
698
+ if image is None: return None
 
699
 
700
  from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
701
+ from stablepy import UpscalerESRGAN
702
 
 
703
  exif_image = extract_exif_data(image)
704
 
705
+ url_upscaler = UPSCALER_DICT_GUI[upscaler_name]
706
+ directory_upscalers = 'upscalers'
707
+ os.makedirs(directory_upscalers, exist_ok=True)
708
+ if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
709
+ download_things(directory_upscalers, url_upscaler, HF_TOKEN)
710
 
711
+ scaler_beta = UpscalerESRGAN(0, 0)
712
+ image_up = scaler_beta.upscale(image, upscaler_size, f"./upscalers/{url_upscaler.split('/')[-1]}")
 
 
 
 
 
713
 
714
  image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
715
 
 
717
 
718
 
719
  # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
720
+ dynamic_gpu_duration.zerogpu = True
721
+ sd_gen_generate_pipeline.zerogpu = True
722
  sd_gen = GuiSD()
723
 
724
+ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
725
  gr.Markdown("# 🧩 DiffuseCraft")
726
  gr.Markdown(SUBTITLE_GUI)
727
  with gr.Tab("Generation"):
 
737
 
738
  return gr.update(value=task_name, choices=new_choices)
739
 
740
+ task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
741
+ model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
 
742
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
743
+ neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt", value="lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, worst quality, low quality, very displeasing, (bad)")
 
 
 
744
  with gr.Row(equal_height=False):
745
  set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
746
  clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
 
770
 
771
  actual_task_info = gr.HTML()
772
 
773
+ with gr.Row(equal_height=False, variant="default"):
774
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
775
  with gr.Column():
776
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
777
  load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
778
 
779
  with gr.Column(scale=1):
780
+ steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=28, label="Steps")
781
+ cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
782
+ sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
783
+ schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
784
+ img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Width")
785
+ img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
786
+ seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
787
+ pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
788
+ with gr.Row():
789
+ clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
790
+ free_u_gui = gr.Checkbox(value=False, label="FreeU")
791
+
792
+ with gr.Row(equal_height=False):
793
+
794
+ def run_set_params_gui(base_prompt, name_model):
795
+ valid_receptors = { # default values
796
+ "prompt": gr.update(value=base_prompt),
797
+ "neg_prompt": gr.update(value=""),
798
+ "Steps": gr.update(value=30),
799
+ "width": gr.update(value=1024),
800
+ "height": gr.update(value=1024),
801
+ "Seed": gr.update(value=-1),
802
+ "Sampler": gr.update(value="Euler"),
803
+ "CFG scale": gr.update(value=7.), # cfg
804
+ "Clip skip": gr.update(value=True),
805
+ "Model": gr.update(value=name_model),
806
+ "Schedule type": gr.update(value="Automatic"),
807
+ "PAG": gr.update(value=.0),
808
+ "FreeU": gr.update(value=False),
809
+ }
810
+ valid_keys = list(valid_receptors.keys())
811
+
812
+ parameters = extract_parameters(base_prompt)
813
+ # print(parameters)
814
+
815
+ if "Sampler" in parameters:
816
+ value_sampler = parameters["Sampler"]
817
+ for s_type in SCHEDULE_TYPE_OPTIONS:
818
+ if s_type in value_sampler:
819
+ value_sampler = value_sampler.replace(s_type, "").strip()
820
+ parameters["Sampler"] = value_sampler
821
+ parameters["Schedule type"] = s_type
822
+
823
+ for key, val in parameters.items():
824
+ # print(val)
825
+ if key in valid_keys:
826
+ try:
827
+ if key == "Sampler":
828
+ if val not in scheduler_names:
829
+ continue
830
+ if key == "Schedule type":
831
+ if val not in SCHEDULE_TYPE_OPTIONS:
832
+ val = "Automatic"
833
+ elif key == "Clip skip":
834
+ if "," in str(val):
835
+ val = val.replace(",", "")
836
+ if int(val) >= 2:
837
+ val = True
838
+ if key == "prompt":
839
+ if ">" in val and "<" in val:
840
+ val = re.sub(r'<[^>]+>', '', val)
841
+ print("Removed LoRA written in the prompt")
842
+ if key in ["prompt", "neg_prompt"]:
843
+ val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
844
+ if key in ["Steps", "width", "height", "Seed"]:
845
+ val = int(val)
846
+ if key == "FreeU":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
847
  val = True
848
+ if key in ["CFG scale", "PAG"]:
849
+ val = float(val)
850
+ if key == "Model":
851
+ filtered_models = [m for m in model_list if val in m]
852
+ if filtered_models:
853
+ val = filtered_models[0]
854
+ else:
855
+ val = name_model
856
+ if key == "Seed":
 
 
 
 
 
 
 
 
 
 
 
857
  continue
858
+ valid_receptors[key] = gr.update(value=val)
859
+ # print(val, type(val))
860
+ # print(valid_receptors)
861
+ except Exception as e:
862
+ print(str(e))
863
+ return [value for value in valid_receptors.values()]
864
+
865
+ set_params_gui.click(
866
+ run_set_params_gui, [prompt_gui, model_name_gui], [
867
+ prompt_gui,
868
+ neg_prompt_gui,
869
+ steps_gui,
870
+ img_width_gui,
871
+ img_height_gui,
872
+ seed_gui,
873
+ sampler_gui,
874
+ cfg_gui,
875
+ clip_skip_gui,
876
+ model_name_gui,
877
+ schedule_type_gui,
878
+ pag_scale_gui,
879
+ free_u_gui,
880
+ ],
881
+ )
882
 
883
+ def run_clear_prompt_gui():
884
+ return gr.update(value=""), gr.update(value="")
885
+ clear_prompt_gui.click(
886
+ run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
887
+ )
888
+
889
+ def run_set_random_seed():
890
+ return -1
891
+ set_random_seed.click(
892
+ run_set_random_seed, [], seed_gui
893
+ )
894
+
895
+ num_images_gui = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Images")
896
+ prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
897
+ vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
898
 
899
  with gr.Accordion("Hires fix", open=False, visible=True):
900
 
901
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
902
  upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
903
+ esrgan_tile_gui = gr.Slider(minimum=0, value=0, maximum=500, step=1, label="ESRGAN Tile")
904
+ esrgan_tile_overlap_gui = gr.Slider(minimum=1, maximum=200, step=1, value=8, label="ESRGAN Tile Overlap")
905
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
906
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
907
  hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
 
917
  return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True, visible=visible)
918
 
919
  def lora_scale_slider(label, visible=True):
920
+ return gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label=label, visible=visible)
 
921
 
922
  lora1_gui = lora_dropdown("Lora1")
923
  lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
 
929
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
930
  lora5_gui = lora_dropdown("Lora5")
931
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
932
+ lora6_gui = lora_dropdown("Lora6", visible=False)
933
+ lora_scale_6_gui = lora_scale_slider("Lora Scale 6", visible=False)
934
+ lora7_gui = lora_dropdown("Lora7", visible=False)
935
+ lora_scale_7_gui = lora_scale_slider("Lora Scale 7", visible=False)
936
 
937
  with gr.Accordion("From URL", open=False, visible=True):
938
  text_lora = gr.Textbox(
 
941
  lines=1,
942
  info="It has to be .safetensors files, and you can also download them from Hugging Face.",
943
  )
944
+ romanize_text = gr.Checkbox(value=False, label="Transliterate name", visible=False)
945
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
946
  new_lora_status = gr.HTML()
947
  button_lora.click(
 
950
  [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, lora6_gui, lora7_gui, new_lora_status]
951
  )
952
 
 
 
 
 
 
 
 
 
953
  with gr.Accordion("IP-Adapter", open=False, visible=True):
954
 
955
+ IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
956
+ MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
957
+
958
  with gr.Accordion("IP-Adapter 1", open=False, visible=True):
959
  image_ip1 = gr.Image(label="IP Image", type="filepath")
960
  mask_ip1 = gr.Image(label="IP Mask", type="filepath")
 
973
  image_mask_gui = gr.Image(label="Image Mask", type="filepath")
974
  strength_gui = gr.Slider(
975
  minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
976
+ info="This option adjusts the level of changes for img2img and inpainting."
977
  )
978
  image_resolution_gui = gr.Slider(
979
  minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
980
  info="The maximum proportional size of the generated image based on the uploaded image."
981
  )
982
+ controlnet_model_gui = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0])
983
  control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
984
  control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
985
  control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
 
1001
  preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
1002
  low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
1003
  high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
1004
+ value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1005
+ distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1006
  recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1007
  tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
1008
 
 
1037
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1038
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1039
 
1040
+ style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1041
 
1042
  with gr.Accordion("Textual inversion", open=False, visible=False):
1043
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
 
1083
  schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
1084
  guidance_rescale_gui = gr.Number(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
1085
  save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
 
 
1086
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1087
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1088
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1089
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
 
 
 
 
1090
 
1091
  with gr.Accordion("More settings", open=False, visible=False):
1092
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1093
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
1094
+ leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1095
+ disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1096
+ display_images_gui = gr.Checkbox(value=False, label="Display Images")
1097
+ image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1098
+ image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
1099
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1100
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1101
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1102
+ xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103
 
1104
  with gr.Accordion("Examples and help", open=False, visible=True):
1105
  gr.Markdown(HELP_GUI)
 
1156
  # "hsl(360, 120, 120)" # in fact any valid colorstring
1157
  ]
1158
  ),
1159
+ eraser=gr.Eraser(default_size="16")
 
 
 
1160
  )
 
 
 
 
 
 
 
1161
  invert_mask = gr.Checkbox(value=False, label="Invert mask")
1162
  btn = gr.Button("Create mask")
 
1163
  with gr.Column(scale=1):
1164
  img_source = gr.Image(interactive=False)
1165
  img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
 
1190
 
1191
  with gr.Row():
1192
  with gr.Column():
 
 
 
1193
  image_up_tab = gr.Image(label="Image", type="pil", sources=["upload"])
1194
+ upscaler_tab = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS[9:], value=UPSCALER_KEYS[11])
1195
  upscaler_size_tab = gr.Slider(minimum=1., maximum=4., step=0.1, value=1.1, label="Upscale by")
1196
  generate_button_up_tab = gr.Button(value="START UPSCALE", variant="primary")
1197
 
 
1199
  result_up_tab = gr.Image(label="Result", type="pil", interactive=False, format="png")
1200
 
1201
  generate_button_up_tab.click(
1202
+ fn=esrgan_upscale,
1203
  inputs=[image_up_tab, upscaler_tab, upscaler_size_tab],
1204
  outputs=[result_up_tab],
1205
  )
 
1218
  outputs=[load_model_gui],
1219
  queue=True,
1220
  show_progress="minimal",
 
1221
  ).success(
1222
  fn=sd_gen_generate_pipeline, # fn=sd_gen.generate_pipeline,
1223
  inputs=[
 
1271
  prompt_syntax_gui,
1272
  upscaler_model_path_gui,
1273
  upscaler_increases_size_gui,
1274
+ esrgan_tile_gui,
1275
+ esrgan_tile_overlap_gui,
1276
  hires_steps_gui,
1277
  hires_denoising_strength_gui,
1278
  hires_sampler_gui,
 
1297
  t2i_adapter_preprocessor_gui,
1298
  adapter_conditioning_scale_gui,
1299
  adapter_conditioning_factor_gui,
1300
+ xformers_memory_efficient_attention_gui,
1301
  free_u_gui,
1302
  generator_in_cpu_gui,
1303
  adetailer_inpaint_only_gui,
 
1336
  mode_ip2,
1337
  scale_ip2,
1338
  pag_scale_gui,
 
 
 
1339
  load_lora_cpu_gui,
1340
  verbose_info_gui,
1341
  gpu_duration_gui,
 
1343
  outputs=[load_model_gui, result_images, actual_task_info],
1344
  queue=True,
1345
  show_progress="minimal",
 
1346
  )
1347
 
1348
+ app.queue()
1349
+
1350
+ app.launch(
1351
+ show_error=True,
1352
+ debug=True,
1353
+ allowed_paths=["./images/"],
1354
+ )
 
 
 
constants.py CHANGED
@@ -4,72 +4,51 @@ from stablepy import (
4
  scheduler_names,
5
  SD15_TASKS,
6
  SDXL_TASKS,
7
- ALL_BUILTIN_UPSCALERS,
8
- IP_ADAPTERS_SD,
9
- IP_ADAPTERS_SDXL,
10
- PROMPT_WEIGHT_OPTIONS_PRIORITY,
11
  )
12
 
13
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
14
-
15
  # - **Download Models**
16
- DOWNLOAD_MODEL = "https://huggingface.co/zuv0/test/resolve/main/milkyWonderland_v40.safetensors"
17
 
18
  # - **Download VAEs**
19
- DOWNLOAD_VAE = "https://huggingface.co/Anzhc/Anzhcs-VAEs/resolve/main/SDXL%20Anime%20VAE%20Dec-only%20B3.safetensors, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
20
 
21
  # - **Download LoRAs**
22
  DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
23
 
24
  LOAD_DIFFUSERS_FORMAT_MODEL = [
25
- 'TestOrganizationPleaseIgnore/potato_quality_anime_plzwork_sdxl',
26
- 'TestOrganizationPleaseIgnore/rinAnim8drawIllustriousXL_v20_sdxl',
27
- 'TestOrganizationPleaseIgnore/perfectrsbmixIllustrious_definitiveiota_sdxl',
28
  'stabilityai/stable-diffusion-xl-base-1.0',
29
  'Laxhar/noobai-XL-1.1',
30
- 'Laxhar/noobai-XL-Vpred-1.0',
31
  'black-forest-labs/FLUX.1-dev',
32
- 'black-forest-labs/FLUX.1-Krea-dev',
33
  'John6666/blue-pencil-flux1-v021-fp8-flux',
34
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
35
  'John6666/xe-anime-flux-v04-fp8-flux',
36
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
37
  'John6666/carnival-unchained-v10-fp8-flux',
 
38
  'Freepik/flux.1-lite-8B-alpha',
39
  'shauray/FluxDev-HyperSD-merged',
40
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
41
  'terminusresearch/FluxBooru-v0.3',
42
- 'black-forest-labs/FLUX.1-schnell',
43
- # 'ostris/OpenFLUX.1',
44
  'shuttleai/shuttle-3-diffusion',
45
  'Laxhar/noobai-XL-1.0',
 
46
  'Laxhar/noobai-XL-0.77',
47
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
48
  'Laxhar/noobai-XL-0.6',
49
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
50
  'John6666/noobai-cyberfix-v10-sdxl',
51
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
52
- 'John6666/ripplemix-noob-vpred10-illustrious01-v14-sdxl',
53
- 'John6666/sigmaih-15-sdxl',
54
- 'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
55
- 'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
56
- 'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
57
- 'martineux/nova-unreal10',
58
- 'John6666/mistoon-anime-v10illustrious-sdxl',
59
- 'John6666/hassaku-xl-illustrious-v22-sdxl',
60
- 'John6666/hassaku-xl-illustrious-v31-sdxl',
61
  'John6666/haruki-mix-illustrious-v10-sdxl',
62
  'John6666/noobreal-v10-sdxl',
63
  'John6666/complicated-noobai-merge-vprediction-sdxl',
64
- 'Laxhar/noobai-XL-Vpred-0.9r',
65
- 'Laxhar/noobai-XL-Vpred-0.75s',
66
- 'Laxhar/noobai-XL-Vpred-0.75',
67
  'Laxhar/noobai-XL-Vpred-0.65s',
68
  'Laxhar/noobai-XL-Vpred-0.65',
69
  'Laxhar/noobai-XL-Vpred-0.6',
70
- 'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
71
- 'John6666/cat-tower-noobai-xl-checkpoint-v15vpred-sdxl',
72
- 'John6666/cat-tower-noobai-xl-checkpoint-v20-vpred-sdxl',
73
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
74
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
75
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
@@ -79,47 +58,18 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
79
  'John6666/illustrious-pencil-xl-v200-sdxl',
80
  'John6666/obsession-illustriousxl-v21-sdxl',
81
  'John6666/obsession-illustriousxl-v30-sdxl',
82
- 'John6666/obsession-illustriousxl-v31-sdxl',
83
- 'John6666/one-obsession-13-sdxl',
84
- 'John6666/one-obsession-14-24d-sdxl',
85
- 'John6666/one-obsession-15-noobai-sdxl',
86
- 'John6666/one-obsession-v16-noobai-sdxl',
87
- 'John6666/one-obsession-17-red-sdxl',
88
- 'martineux/oneobs18',
89
- 'martineux/oneobsession19',
90
- 'John6666/cat-tower-noobai-xl-checkpoint-v14-epsilon-pred-sdxl',
91
- 'martineux/cattower-chenkin-xl',
92
- 'John6666/prefect-illustrious-xl-v3-sdxl',
93
- 'martineux/perfect4',
94
- 'martineux/prefectIllustriousXL_v5',
95
  'John6666/wai-nsfw-illustrious-v70-sdxl',
96
- 'John6666/wai-nsfw-illustrious-sdxl-v140-sdxl',
97
- 'martineux/waiIllustriousSDXL_v160',
98
  'John6666/illustrious-pony-mix-v3-sdxl',
99
- 'John6666/nova-anime-xl-il-v90-sdxl',
100
- 'John6666/nova-anime-xl-il-v110-sdxl',
101
- 'frankjoshua/novaAnimeXL_ilV140',
102
- 'John6666/nova-orange-xl-re-v10-sdxl',
103
- 'John6666/nova-orange-xl-v110-sdxl',
104
- 'John6666/nova-orange-xl-re-v20-sdxl',
105
- 'John6666/nova-unreal-xl-v60-sdxl',
106
- 'John6666/nova-unreal-xl-v70-sdxl',
107
- 'John6666/nova-unreal-xl-v80-sdxl',
108
- 'martineux/nova-unreal10',
109
- 'John6666/nova-cartoon-xl-v40-sdxl',
110
- 'martineux/novacartoon6',
111
- 'martineux/novareal8',
112
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
113
  'eienmojiki/Anything-XL',
114
  'eienmojiki/Starry-XL-v5.2',
115
- 'votepurchase/plantMilkModelSuite_walnut',
116
  'John6666/meinaxl-v2-sdxl',
117
  'Eugeoter/artiwaifu-diffusion-2.0',
118
  'comin/IterComp',
119
- 'John6666/epicrealism-xl-v8kiss-sdxl',
120
  'John6666/epicrealism-xl-v10kiss2-sdxl',
121
- 'John6666/epicrealism-xl-vxiabeast-sdxl',
122
- 'John6666/epicrealism-xl-vxvii-crystal-clear-realism-sdxl',
123
  'misri/zavychromaxl_v80',
124
  'SG161222/RealVisXL_V4.0',
125
  'SG161222/RealVisXL_V5.0',
@@ -131,14 +81,11 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
131
  'John6666/ras-real-anime-screencap-v1-sdxl',
132
  'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
133
  'John6666/mistoon-anime-ponyalpha-sdxl',
134
- 'John6666/mistoon-xl-copper-v20fast-sdxl',
135
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
136
  'John6666/t-ponynai3-v51-sdxl',
137
  'John6666/t-ponynai3-v65-sdxl',
138
- 'John6666/t-ponynai3-v7-sdxl',
139
  'John6666/prefect-pony-xl-v3-sdxl',
140
  'John6666/prefect-pony-xl-v4-sdxl',
141
- 'John6666/prefect-pony-xl-v50-sdxl',
142
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
143
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
144
  'John6666/wai-real-mix-v11-sdxl',
@@ -146,48 +93,24 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
146
  'John6666/wai-c-v6-sdxl',
147
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
148
  'John6666/sifw-annihilation-xl-v2-sdxl',
149
- 'John6666/sifw-annihilation-xl-v305illustrious-beta-sdxl',
150
  'John6666/photo-realistic-pony-v5-sdxl',
151
  'John6666/pony-realism-v21main-sdxl',
152
  'John6666/pony-realism-v22main-sdxl',
153
- 'John6666/pony-realism-v23-ultra-sdxl',
 
154
  'John6666/cyberrealistic-pony-v65-sdxl',
155
- 'John6666/cyberrealistic-pony-v7-sdxl',
156
- 'John6666/cyberrealistic-pony-v127-alternative-sdxl',
157
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
158
  'John6666/nova-anime-xl-pony-v5-sdxl',
159
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
160
  'John6666/aimz-dream-real-pony-mix-v3-sdxl',
161
- 'John6666/prefectious-xl-nsfw-v10-sdxl',
162
- 'GraydientPlatformAPI/iniverseponyRealGuofeng49',
163
  'John6666/duchaiten-pony-real-v11fix-sdxl',
164
  'John6666/duchaiten-pony-real-v20-sdxl',
165
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
166
  'KBlueLeaf/Kohaku-XL-Zeta',
167
  'cagliostrolab/animagine-xl-3.1',
168
- 'cagliostrolab/animagine-xl-4.0',
169
  'yodayo-ai/kivotos-xl-2.0',
170
  'yodayo-ai/holodayo-xl-2.1',
171
  'yodayo-ai/clandestine-xl-1.0',
172
- 'Raelina/Raehoshi-illust-XL-8',
173
- 'johnkillington/chenkinxmilfynoobai_v20-MLX',
174
- 'martineux/unholydesire5-xl',
175
- 'abacaxthebrave/Unholy_Desire_Mix_ILXL',
176
- 'martineux/diving5',
177
- 'martineux/diving7',
178
- 'martineux/mergestein-animuplus-xl',
179
- 'martineux/mergestein-uncannyr2-xl',
180
- 'martineux/steincustom_V12',
181
- 'martineux/miaomiao-realskin1p25-xl',
182
- 'martineux/miaov18',
183
- 'John6666/garage-mix-noob-vpred-eps-v10-vpred-sdxl',
184
- 'TestOrganizationPleaseIgnore/perfectrsbmixIllustrious_definitivelambda_sdxl',
185
- 'TestOrganizationPleaseIgnore/rinFlanimeIllustrious_v27_sdxl',
186
- 'TestOrganizationPleaseIgnore/rinAnimepopcute_v30_sdxl',
187
- 'TestOrganizationPleaseIgnore/potato_quality_anime_zzz_sdxl',
188
- 'https://huggingface.co/chemwolf/Karmix-XL-v0/resolve/main/Karmix-XL-v0.safetensors?download=true',
189
- 'https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16',
190
- 'https://civitai.com/models/30240?modelVersionId=125771',
191
  'digiplay/majicMIX_sombre_v2',
192
  'digiplay/majicMIX_realistic_v6',
193
  'digiplay/majicMIX_realistic_v7',
@@ -197,9 +120,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
197
  'digiplay/darkphoenix3D_v1.1',
198
  'digiplay/BeenYouLiteL11_diffusers',
199
  'GraydientPlatformAPI/rev-animated2',
200
- 'myxlmynx/cyberrealistic_classic40',
201
- 'GraydientPlatformAPI/cyberreal6',
202
- 'GraydientPlatformAPI/cyberreal5',
203
  'youknownothing/deliberate-v6',
204
  'GraydientPlatformAPI/deliberate-cyber3',
205
  'GraydientPlatformAPI/picx-real',
@@ -213,9 +134,9 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
213
  'GraydientPlatformAPI/realcartoon3d-17',
214
  'GraydientPlatformAPI/realcartoon-pixar11',
215
  'GraydientPlatformAPI/realcartoon-real17',
 
216
  ]
217
 
218
-
219
  DIFFUSERS_FORMAT_LORAS = [
220
  "nerijs/animation2k-flux",
221
  "XLabs-AI/flux-RealismLora",
@@ -235,13 +156,9 @@ DIRECTORY_MODELS = 'models'
235
  DIRECTORY_LORAS = 'loras'
236
  DIRECTORY_VAES = 'vaes'
237
  DIRECTORY_EMBEDS = 'embedings'
238
- DIRECTORY_UPSCALERS = 'upscalers'
239
 
 
240
  STORAGE_ROOT = "/home/user/"
241
- CACHE_HF_ROOT = os.path.expanduser("~/.cache/huggingface")
242
- CACHE_HF = os.path.join(CACHE_HF_ROOT, "hub")
243
- if IS_ZERO_GPU:
244
- os.environ["HF_HOME"] = CACHE_HF
245
 
246
  TASK_STABLEPY = {
247
  'txt2img': 'txt2img',
@@ -267,23 +184,28 @@ TASK_STABLEPY = {
267
  'optical pattern ControlNet': 'pattern',
268
  'recolor ControlNet': 'recolor',
269
  'tile ControlNet': 'tile',
270
- 'repaint ControlNet': 'repaint',
271
  }
272
 
273
  TASK_MODEL_LIST = list(TASK_STABLEPY.keys())
274
 
275
  UPSCALER_DICT_GUI = {
276
  None: None,
277
- **{bu: bu for bu in ALL_BUILTIN_UPSCALERS if bu not in ["HAT x4", "DAT x4", "DAT x3", "DAT x2", "SwinIR 4x"]},
278
- # "RealESRGAN_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
 
 
 
 
 
 
 
279
  "RealESRNet_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
280
- # "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
281
- # "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
282
- # "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
283
- # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
284
- # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
285
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
286
- "Real-ESRGAN-Anime-finetuning": "https://huggingface.co/danhtran2mind/Real-ESRGAN-Anime-finetuning/resolve/main/Real-ESRGAN-Anime-finetuning.pth",
287
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
288
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
289
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
@@ -297,7 +219,6 @@ UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
297
  DIFFUSERS_CONTROLNET_MODEL = [
298
  "Automatic",
299
 
300
- "brad-twinkl/controlnet-union-sdxl-1.0-promax",
301
  "xinsir/controlnet-union-sdxl-1.0",
302
  "xinsir/anime-painter",
303
  "Eugeoter/noob-sdxl-controlnet-canny",
@@ -320,6 +241,7 @@ DIFFUSERS_CONTROLNET_MODEL = [
320
  "r3gm/controlnet-recolor-sdxl-fp16",
321
  "r3gm/controlnet-openpose-twins-sdxl-1.0-fp16",
322
  "r3gm/controlnet-qr-pattern-sdxl-fp16",
 
323
  "Yakonrus/SDXL_Controlnet_Tile_Realistic_v2",
324
  "TheMistoAI/MistoLine",
325
  "briaai/BRIA-2.3-ControlNet-Recoloring",
@@ -356,9 +278,15 @@ DIFFUSERS_CONTROLNET_MODEL = [
356
  # "InstantX/FLUX.1-dev-Controlnet-Canny",
357
  ]
358
 
359
- PROMPT_W_OPTIONS = [(pwf, pwf) for pwf in PROMPT_WEIGHT_OPTIONS_PRIORITY]
360
- PROMPT_W_OPTIONS[0] = ("Classic format: (word:weight)", "Classic")
361
- PROMPT_W_OPTIONS[1] = ("Compel format: (word)weight", "Compel")
 
 
 
 
 
 
362
 
363
  WARNING_MSG_VAE = (
364
  "Use the right VAE for your model to maintain image quality. The wrong"
@@ -392,30 +320,14 @@ POST_PROCESSING_SAMPLER = ["Use same sampler"] + [
392
  name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
393
  ]
394
 
395
- IP_MODELS = []
396
- ALL_IPA = sorted(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL))
397
-
398
- for origin_name in ALL_IPA:
399
- suffixes = []
400
- if origin_name in IP_ADAPTERS_SD:
401
- suffixes.append("sd1.5")
402
- if origin_name in IP_ADAPTERS_SDXL:
403
- suffixes.append("sdxl")
404
- ref_name = f"{origin_name} ({'/'.join(suffixes)})"
405
- IP_MODELS.append((ref_name, origin_name))
406
-
407
- MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
408
-
409
  SUBTITLE_GUI = (
410
  "### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
411
  " to perform different tasks in image generation."
412
  )
413
 
414
- msg_zero = "" if not IS_ZERO_GPU else "- The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'."
415
-
416
  HELP_GUI = (
417
- f"""### Help:
418
- {msg_zero}
419
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
420
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
421
  """
@@ -428,9 +340,7 @@ EXAMPLES_GUI_HELP = (
428
  3. ControlNet Canny SDXL
429
  4. Optical pattern (Optical illusion) SDXL
430
  5. Convert an image to a coloring drawing
431
- 6. V prediction model inference
432
- 7. V prediction model sd_embed variant inference
433
- 8. ControlNet OpenPose SD 1.5 and Latent upscale
434
 
435
  - Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
436
  """
@@ -539,7 +449,7 @@ EXAMPLES_GUI = [
539
  20,
540
  4.0,
541
  -1,
542
- ("loras/Coloring_book_-_LineArt.safetensors" if os.path.exists("loras/Coloring_book_-_LineArt.safetensors") else "None"),
543
  1.0,
544
  "DPM++ 2M SDE",
545
  1024,
@@ -557,54 +467,6 @@ EXAMPLES_GUI = [
557
  35,
558
  False,
559
  ],
560
- [
561
- "[mochizuki_shiina], [syuri22], newest, reimu, solo, outdoors, water, flower, lantern",
562
- "worst quality, normal quality, old, sketch,",
563
- 28,
564
- 7.0,
565
- -1,
566
- "None",
567
- 0.33,
568
- "DPM 3M Ef",
569
- 1600,
570
- 1024,
571
- "Laxhar/noobai-XL-Vpred-1.0",
572
- "txt2img",
573
- "color_image.png", # img conttol
574
- 1024, # img resolution
575
- 0.35, # strength
576
- 1.0, # cn scale
577
- 0.0, # cn start
578
- 1.0, # cn end
579
- "Classic",
580
- None,
581
- 30,
582
- False,
583
- ],
584
- [
585
- "[mochizuki_shiina], [syuri22], newest, multiple girls, 2girls, earrings, jewelry, gloves, purple eyes, black hair, looking at viewer, nail polish, hat, smile, open mouth, fingerless gloves, sleeveless, :d, upper body, blue eyes, closed mouth, black gloves, hands up, long hair, shirt, bare shoulders, white headwear, blush, black headwear, blue nails, upper teeth only, short hair, white gloves, white shirt, teeth, rabbit hat, star earrings, purple nails, pink hair, detached sleeves, fingernails, fake animal ears, animal hat, sleeves past wrists, black shirt, medium hair, fur trim, sleeveless shirt, turtleneck, long sleeves, rabbit ears, star \\(symbol\\)",
586
- "worst quality, normal quality, old, sketch,",
587
- 28,
588
- 7.0,
589
- -1,
590
- "None",
591
- 0.33,
592
- "DPM 3M Ef",
593
- 1600,
594
- 1024,
595
- "Laxhar/noobai-XL-Vpred-1.0",
596
- "txt2img",
597
- "color_image.png", # img conttol
598
- 1024, # img resolution
599
- 0.35, # strength
600
- 1.0, # cn scale
601
- 0.0, # cn start
602
- 1.0, # cn end
603
- "Classic-sd_embed",
604
- None,
605
- 30,
606
- False,
607
- ],
608
  [
609
  "1girl,face,curly hair,red hair,white background,",
610
  "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
@@ -634,7 +496,6 @@ EXAMPLES_GUI = [
634
  RESOURCES = (
635
  """### Resources
636
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
637
- - Try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
638
- - `DiffuseCraft` in Colab:[link](https://github.com/R3gm/DiffuseCraft?tab=readme-ov-file#diffusecraft).
639
  """
640
  )
 
4
  scheduler_names,
5
  SD15_TASKS,
6
  SDXL_TASKS,
 
 
 
 
7
  )
8
 
 
 
9
  # - **Download Models**
10
+ DOWNLOAD_MODEL = "https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
11
 
12
  # - **Download VAEs**
13
+ DOWNLOAD_VAE = "https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
14
 
15
  # - **Download LoRAs**
16
  DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
17
 
18
  LOAD_DIFFUSERS_FORMAT_MODEL = [
 
 
 
19
  'stabilityai/stable-diffusion-xl-base-1.0',
20
  'Laxhar/noobai-XL-1.1',
 
21
  'black-forest-labs/FLUX.1-dev',
 
22
  'John6666/blue-pencil-flux1-v021-fp8-flux',
23
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
24
  'John6666/xe-anime-flux-v04-fp8-flux',
25
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
26
  'John6666/carnival-unchained-v10-fp8-flux',
27
+ 'John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux',
28
  'Freepik/flux.1-lite-8B-alpha',
29
  'shauray/FluxDev-HyperSD-merged',
30
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
31
  'terminusresearch/FluxBooru-v0.3',
32
+ 'ostris/OpenFLUX.1',
 
33
  'shuttleai/shuttle-3-diffusion',
34
  'Laxhar/noobai-XL-1.0',
35
+ 'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
36
  'Laxhar/noobai-XL-0.77',
37
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
38
  'Laxhar/noobai-XL-0.6',
39
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
40
  'John6666/noobai-cyberfix-v10-sdxl',
41
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
42
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
43
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
44
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
45
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
 
 
 
 
 
46
  'John6666/haruki-mix-illustrious-v10-sdxl',
47
  'John6666/noobreal-v10-sdxl',
48
  'John6666/complicated-noobai-merge-vprediction-sdxl',
 
 
 
49
  'Laxhar/noobai-XL-Vpred-0.65s',
50
  'Laxhar/noobai-XL-Vpred-0.65',
51
  'Laxhar/noobai-XL-Vpred-0.6',
 
 
 
52
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
53
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
54
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
 
58
  'John6666/illustrious-pencil-xl-v200-sdxl',
59
  'John6666/obsession-illustriousxl-v21-sdxl',
60
  'John6666/obsession-illustriousxl-v30-sdxl',
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  'John6666/wai-nsfw-illustrious-v70-sdxl',
 
 
62
  'John6666/illustrious-pony-mix-v3-sdxl',
63
+ 'John6666/nova-anime-xl-illustriousv10-sdxl',
64
+ 'John6666/nova-orange-xl-v30-sdxl',
 
 
 
 
 
 
 
 
 
 
 
65
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
66
  'eienmojiki/Anything-XL',
67
  'eienmojiki/Starry-XL-v5.2',
 
68
  'John6666/meinaxl-v2-sdxl',
69
  'Eugeoter/artiwaifu-diffusion-2.0',
70
  'comin/IterComp',
 
71
  'John6666/epicrealism-xl-v10kiss2-sdxl',
72
+ 'John6666/epicrealism-xl-v8kiss-sdxl',
 
73
  'misri/zavychromaxl_v80',
74
  'SG161222/RealVisXL_V4.0',
75
  'SG161222/RealVisXL_V5.0',
 
81
  'John6666/ras-real-anime-screencap-v1-sdxl',
82
  'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
83
  'John6666/mistoon-anime-ponyalpha-sdxl',
 
84
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
85
  'John6666/t-ponynai3-v51-sdxl',
86
  'John6666/t-ponynai3-v65-sdxl',
 
87
  'John6666/prefect-pony-xl-v3-sdxl',
88
  'John6666/prefect-pony-xl-v4-sdxl',
 
89
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
90
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
91
  'John6666/wai-real-mix-v11-sdxl',
 
93
  'John6666/wai-c-v6-sdxl',
94
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
95
  'John6666/sifw-annihilation-xl-v2-sdxl',
 
96
  'John6666/photo-realistic-pony-v5-sdxl',
97
  'John6666/pony-realism-v21main-sdxl',
98
  'John6666/pony-realism-v22main-sdxl',
99
+ 'John6666/cyberrealistic-pony-v63-sdxl',
100
+ 'John6666/cyberrealistic-pony-v64-sdxl',
101
  'John6666/cyberrealistic-pony-v65-sdxl',
 
 
102
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
103
  'John6666/nova-anime-xl-pony-v5-sdxl',
104
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
105
  'John6666/aimz-dream-real-pony-mix-v3-sdxl',
 
 
106
  'John6666/duchaiten-pony-real-v11fix-sdxl',
107
  'John6666/duchaiten-pony-real-v20-sdxl',
108
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
109
  'KBlueLeaf/Kohaku-XL-Zeta',
110
  'cagliostrolab/animagine-xl-3.1',
 
111
  'yodayo-ai/kivotos-xl-2.0',
112
  'yodayo-ai/holodayo-xl-2.1',
113
  'yodayo-ai/clandestine-xl-1.0',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  'digiplay/majicMIX_sombre_v2',
115
  'digiplay/majicMIX_realistic_v6',
116
  'digiplay/majicMIX_realistic_v7',
 
120
  'digiplay/darkphoenix3D_v1.1',
121
  'digiplay/BeenYouLiteL11_diffusers',
122
  'GraydientPlatformAPI/rev-animated2',
123
+ 'youknownothing/cyberrealistic_v50',
 
 
124
  'youknownothing/deliberate-v6',
125
  'GraydientPlatformAPI/deliberate-cyber3',
126
  'GraydientPlatformAPI/picx-real',
 
134
  'GraydientPlatformAPI/realcartoon3d-17',
135
  'GraydientPlatformAPI/realcartoon-pixar11',
136
  'GraydientPlatformAPI/realcartoon-real17',
137
+ 'nitrosocke/Ghibli-Diffusion',
138
  ]
139
 
 
140
  DIFFUSERS_FORMAT_LORAS = [
141
  "nerijs/animation2k-flux",
142
  "XLabs-AI/flux-RealismLora",
 
156
  DIRECTORY_LORAS = 'loras'
157
  DIRECTORY_VAES = 'vaes'
158
  DIRECTORY_EMBEDS = 'embedings'
 
159
 
160
+ CACHE_HF = "/home/user/.cache/huggingface/hub/"
161
  STORAGE_ROOT = "/home/user/"
 
 
 
 
162
 
163
  TASK_STABLEPY = {
164
  'txt2img': 'txt2img',
 
184
  'optical pattern ControlNet': 'pattern',
185
  'recolor ControlNet': 'recolor',
186
  'tile ControlNet': 'tile',
 
187
  }
188
 
189
  TASK_MODEL_LIST = list(TASK_STABLEPY.keys())
190
 
191
  UPSCALER_DICT_GUI = {
192
  None: None,
193
+ "Lanczos": "Lanczos",
194
+ "Nearest": "Nearest",
195
+ 'Latent': 'Latent',
196
+ 'Latent (antialiased)': 'Latent (antialiased)',
197
+ 'Latent (bicubic)': 'Latent (bicubic)',
198
+ 'Latent (bicubic antialiased)': 'Latent (bicubic antialiased)',
199
+ 'Latent (nearest)': 'Latent (nearest)',
200
+ 'Latent (nearest-exact)': 'Latent (nearest-exact)',
201
+ "RealESRGAN_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
202
  "RealESRNet_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
203
+ "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
204
+ "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
205
+ "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
206
+ "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
207
+ "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
208
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
 
209
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
210
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
211
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
 
219
  DIFFUSERS_CONTROLNET_MODEL = [
220
  "Automatic",
221
 
 
222
  "xinsir/controlnet-union-sdxl-1.0",
223
  "xinsir/anime-painter",
224
  "Eugeoter/noob-sdxl-controlnet-canny",
 
241
  "r3gm/controlnet-recolor-sdxl-fp16",
242
  "r3gm/controlnet-openpose-twins-sdxl-1.0-fp16",
243
  "r3gm/controlnet-qr-pattern-sdxl-fp16",
244
+ "brad-twinkl/controlnet-union-sdxl-1.0-promax",
245
  "Yakonrus/SDXL_Controlnet_Tile_Realistic_v2",
246
  "TheMistoAI/MistoLine",
247
  "briaai/BRIA-2.3-ControlNet-Recoloring",
 
278
  # "InstantX/FLUX.1-dev-Controlnet-Canny",
279
  ]
280
 
281
+ PROMPT_W_OPTIONS = [
282
+ ("Compel format: (word)weight", "Compel"),
283
+ ("Classic format: (word:weight)", "Classic"),
284
+ ("Classic-original format: (word:weight)", "Classic-original"),
285
+ ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
286
+ ("Classic-sd_embed format: (word:weight)", "Classic-sd_embed"),
287
+ ("Classic-ignore", "Classic-ignore"),
288
+ ("None", "None"),
289
+ ]
290
 
291
  WARNING_MSG_VAE = (
292
  "Use the right VAE for your model to maintain image quality. The wrong"
 
320
  name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
321
  ]
322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  SUBTITLE_GUI = (
324
  "### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
325
  " to perform different tasks in image generation."
326
  )
327
 
 
 
328
  HELP_GUI = (
329
+ """### Help:
330
+ - The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'.
331
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
332
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
333
  """
 
340
  3. ControlNet Canny SDXL
341
  4. Optical pattern (Optical illusion) SDXL
342
  5. Convert an image to a coloring drawing
343
+ 6. ControlNet OpenPose SD 1.5 and Latent upscale
 
 
344
 
345
  - Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
346
  """
 
449
  20,
450
  4.0,
451
  -1,
452
+ "loras/Coloring_book_-_LineArt.safetensors",
453
  1.0,
454
  "DPM++ 2M SDE",
455
  1024,
 
467
  35,
468
  False,
469
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
470
  [
471
  "1girl,face,curly hair,red hair,white background,",
472
  "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
 
496
  RESOURCES = (
497
  """### Resources
498
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
499
+ - You can also try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
 
500
  """
501
  )
image_processor.py CHANGED
@@ -92,8 +92,8 @@ def preprocessor_tab():
92
  pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
  pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
  pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
- pre_value_threshold = gr.Slider(minimum=0., maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
- pre_distance_threshold = gr.Slider(minimum=0., maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
  pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
  pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
  pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
 
92
  pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
  pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
  pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
+ pre_value_threshold = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
+ pre_distance_threshold = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
  pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
  pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
  pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
packages.txt CHANGED
@@ -1,3 +1,3 @@
1
  git-lfs
2
- aria2
3
  ffmpeg
 
1
  git-lfs
2
+ aria2 -y
3
  ffmpeg
pre-requirements.txt DELETED
@@ -1 +0,0 @@
1
- pip>=23.0.0
 
 
requirements.txt CHANGED
@@ -1,13 +1,5 @@
1
- stablepy==0.6.5
2
- torch==2.5.1
3
- diffusers
4
  gdown
5
  opencv-python
6
- unidecode
7
- pydantic==2.10.6
8
- huggingface_hub
9
- hf_transfer
10
- hf_xet
11
- spaces
12
- gradio==5.44.1
13
- matplotlib-inline
 
1
+ git+https://github.com/R3gm/stablepy.git@a9fe2dc # -b refactor_sampler_fix
2
+ torch==2.2.0
 
3
  gdown
4
  opencv-python
5
+ unidecode
 
 
 
 
 
 
 
utils.py CHANGED
@@ -1,714 +1,485 @@
1
- import os
2
- import re
3
- import gradio as gr
4
- from constants import (
5
- DIFFUSERS_FORMAT_LORAS,
6
- CIVITAI_API_KEY,
7
- HF_TOKEN,
8
- MODEL_TYPE_CLASS,
9
- DIRECTORY_LORAS,
10
- DIRECTORY_MODELS,
11
- DIFFUSECRAFT_CHECKPOINT_NAME,
12
- CACHE_HF_ROOT,
13
- CACHE_HF,
14
- STORAGE_ROOT,
15
- )
16
- from huggingface_hub import HfApi, get_hf_file_metadata, snapshot_download
17
- from diffusers import DiffusionPipeline
18
- from huggingface_hub import model_info as model_info_data
19
- from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
- from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
- from pathlib import PosixPath
22
- from unidecode import unidecode
23
- import urllib.parse
24
- import copy
25
- import requests
26
- from requests.adapters import HTTPAdapter
27
- from urllib3.util import Retry
28
- import shutil
29
- import subprocess
30
- import json
31
- import html as _html
32
-
33
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
34
- USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
35
- MODEL_ARCH = {
36
- 'stable-diffusion-xl-v1-base/lora': "Stable Diffusion XL (Illustrious, Pony, NoobAI)",
37
- 'stable-diffusion-v1/lora': "Stable Diffusion 1.5",
38
- 'flux-1-dev/lora': "Flux",
39
- }
40
-
41
-
42
- def read_safetensors_header_from_url(url: str):
43
- """Read safetensors header from a remote Hugging Face file."""
44
- meta = get_hf_file_metadata(url)
45
-
46
- # Step 1: first 8 bytes → header length
47
- resp = requests.get(meta.location, headers={"Range": "bytes=0-7"})
48
- resp.raise_for_status()
49
- header_len = int.from_bytes(resp.content, "little")
50
-
51
- # Step 2: fetch full header JSON
52
- end = 8 + header_len - 1
53
- resp = requests.get(meta.location, headers={"Range": f"bytes=8-{end}"})
54
- resp.raise_for_status()
55
- header_json = resp.content.decode("utf-8")
56
-
57
- return json.loads(header_json)
58
-
59
-
60
- def read_safetensors_header_from_file(path: str):
61
- """Read safetensors header from a local file."""
62
- with open(path, "rb") as f:
63
- # Step 1: first 8 bytes → header length
64
- header_len = int.from_bytes(f.read(8), "little")
65
-
66
- # Step 2: read header JSON
67
- header_json = f.read(header_len).decode("utf-8")
68
-
69
- return json.loads(header_json)
70
-
71
-
72
- class LoraHeaderInformation:
73
- """
74
- Encapsulates parsed info from a LoRA JSON header and provides
75
- a compact HTML summary via .to_html().
76
- """
77
-
78
- def __init__(self, json_data):
79
- self.original_json = copy.deepcopy(json_data or {})
80
-
81
- # Check if text encoder was trained
82
- # guard for json_data being a mapping
83
- try:
84
- self.text_encoder_trained = any("text_model" in ln for ln in json_data)
85
- except Exception:
86
- self.text_encoder_trained = False
87
-
88
- # Metadata (may be None)
89
- metadata = (json_data or {}).get("__metadata__", None)
90
- self.metadata = metadata
91
-
92
- # Default values
93
- self.architecture = "undefined"
94
- self.prediction_type = "undefined"
95
- self.base_model = "undefined"
96
- self.author = "undefined"
97
- self.title = "undefined"
98
- self.common_tags_list = []
99
-
100
- if metadata:
101
- self.architecture = MODEL_ARCH.get(
102
- metadata.get('modelspec.architecture', None),
103
- "undefined"
104
- )
105
-
106
- self.prediction_type = metadata.get('modelspec.prediction_type', "undefined")
107
- self.base_model = metadata.get('ss_sd_model_name', "undefined")
108
- self.author = metadata.get('modelspec.author', "undefined")
109
- self.title = metadata.get('modelspec.title', "undefined")
110
-
111
- base_model_hash = metadata.get('ss_new_sd_model_hash', None) # SHA256
112
- # AUTOV1 ss_sd_model_hash
113
- # https://civitai.com/api/v1/model-versions/by-hash/{base_model_hash} # Info
114
- if base_model_hash:
115
- self.base_model += f" hash={base_model_hash}"
116
-
117
- # Extract tags
118
- try:
119
- tags = metadata.get('ss_tag_frequency') if "ss_tag_frequency" in metadata else metadata.get('ss_datasets', "")
120
- tags = json.loads(tags) if tags else ""
121
-
122
- if isinstance(tags, list):
123
- tags = tags[0].get("tag_frequency", {})
124
-
125
- if tags:
126
- self.common_tags_list = list(tags[list(tags.keys())[0]].keys())
127
- except Exception:
128
- self.common_tags_list = []
129
-
130
- def to_dict(self):
131
- """Return a plain dict summary of parsed fields."""
132
- return {
133
- "architecture": self.architecture,
134
- "prediction_type": self.prediction_type,
135
- "base_model": self.base_model,
136
- "author": self.author,
137
- "title": self.title,
138
- "text_encoder_trained": bool(self.text_encoder_trained),
139
- "common_tags": self.common_tags_list,
140
- }
141
-
142
- def to_html(self, limit_tags=20):
143
- """
144
- Return a compact HTML snippet (string) showing the parsed info
145
- in a small font. Values are HTML-escaped.
146
- """
147
- # helper to escape
148
- esc = _html.escape
149
-
150
- rows = [
151
- ("Title", esc(str(self.title))),
152
- ("Author", esc(str(self.author))),
153
- ("Architecture", esc(str(self.architecture))),
154
- ("Base model", esc(str(self.base_model))),
155
- ("Prediction type", esc(str(self.prediction_type))),
156
- ("Text encoder trained", esc(str(self.text_encoder_trained))),
157
- ("Reference tags", esc(str(", ".join(self.common_tags_list[:limit_tags])))),
158
- ]
159
-
160
- # small, compact table with inline styling (small font)
161
- html_rows = "".join(
162
- f"<tr><th style='text-align:left;padding:2px 6px;white-space:nowrap'>{k}</th>"
163
- f"<td style='padding:2px 6px'>{v}</td></tr>"
164
- for k, v in rows
165
- )
166
-
167
- html_snippet = (
168
- "<div style='font-family:system-ui, -apple-system, \"Segoe UI\", Roboto, "
169
- "Helvetica, Arial, \"Noto Sans\", sans-serif; font-size:12px; line-height:1.2; "
170
- "'>"
171
- f"<table style='border-collapse:collapse; font-size:12px;'>"
172
- f"{html_rows}"
173
- "</table>"
174
- "</div>"
175
- )
176
-
177
- return html_snippet
178
-
179
-
180
- def request_json_data(url):
181
- model_version_id = url.split('/')[-1]
182
- if "?modelVersionId=" in model_version_id:
183
- match = re.search(r'modelVersionId=(\d+)', url)
184
- model_version_id = match.group(1)
185
-
186
- endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_version_id}"
187
-
188
- params = {}
189
- headers = {'User-Agent': USER_AGENT, 'content-type': 'application/json'}
190
- session = requests.Session()
191
- retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
192
- session.mount("https://", HTTPAdapter(max_retries=retries))
193
-
194
- try:
195
- result = session.get(endpoint_url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
196
- result.raise_for_status()
197
- json_data = result.json()
198
- return json_data if json_data else None
199
- except Exception as e:
200
- print(f"Error: {e}")
201
- return None
202
-
203
-
204
- class ModelInformation:
205
- def __init__(self, json_data):
206
- self.model_version_id = json_data.get("id", "")
207
- self.model_id = json_data.get("modelId", "")
208
- self.download_url = json_data.get("downloadUrl", "")
209
- self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
210
- self.filename_url = next(
211
- (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "") and v.get("type", "Model") == "Model"), ""
212
- )
213
- self.filename_url = self.filename_url if self.filename_url else ""
214
- self.description = json_data.get("description", "")
215
- if self.description is None:
216
- self.description = ""
217
- self.model_name = json_data.get("model", {}).get("name", "")
218
- self.model_type = json_data.get("model", {}).get("type", "")
219
- self.nsfw = json_data.get("model", {}).get("nsfw", False)
220
- self.poi = json_data.get("model", {}).get("poi", False)
221
- self.images = [img.get("url", "") for img in json_data.get("images", [])]
222
- self.example_prompt = json_data.get("trainedWords", [""])[0] if json_data.get("trainedWords") else ""
223
- self.original_json = copy.deepcopy(json_data)
224
-
225
-
226
- def get_civit_params(url):
227
- try:
228
- json_data = request_json_data(url)
229
- mdc = ModelInformation(json_data)
230
- if mdc.download_url and mdc.filename_url:
231
- return mdc.download_url, mdc.filename_url, mdc.model_url
232
- else:
233
- ValueError("Invalid Civitai model URL")
234
- except Exception as e:
235
- print(f"Error retrieving Civitai metadata: {e} — fallback to direct download")
236
- return url, None, None
237
-
238
-
239
- def civ_redirect_down(url, dir_, civitai_api_key, romanize, alternative_name):
240
- filename_base = filename = None
241
-
242
- if alternative_name:
243
- output_path = os.path.join(dir_, alternative_name)
244
- if os.path.exists(output_path):
245
- return output_path, alternative_name
246
-
247
- # Follow the redirect to get the actual download URL
248
- curl_command = (
249
- f'curl -L -sI --connect-timeout 5 --max-time 5 '
250
- f'-H "Content-Type: application/json" '
251
- f'-H "Authorization: Bearer {civitai_api_key}" "{url}"'
252
- )
253
-
254
- headers = os.popen(curl_command).read()
255
-
256
- # Look for the redirected "Location" URL
257
- location_match = re.search(r'location: (.+)', headers, re.IGNORECASE)
258
-
259
- if location_match:
260
- redirect_url = location_match.group(1).strip()
261
-
262
- # Extract the filename from the redirect URL's "Content-Disposition"
263
- filename_match = re.search(r'filename%3D%22(.+?)%22', redirect_url)
264
- if filename_match:
265
- encoded_filename = filename_match.group(1)
266
- # Decode the URL-encoded filename
267
- decoded_filename = urllib.parse.unquote(encoded_filename)
268
-
269
- filename = unidecode(decoded_filename) if romanize else decoded_filename
270
- # print(f"Filename redirect: {filename}")
271
-
272
- filename_base = alternative_name if alternative_name else filename
273
- if not filename_base:
274
- return None, None
275
- elif os.path.exists(os.path.join(dir_, filename_base)):
276
- return os.path.join(dir_, filename_base), filename_base
277
-
278
- aria2_command = (
279
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
280
- f'-k 1M -s 16 -d "{dir_}" -o "{filename_base}" "{redirect_url}"'
281
- )
282
- r_code = os.system(aria2_command) # noqa
283
-
284
- # if r_code != 0:
285
- # raise RuntimeError(f"Failed to download file: {filename_base}. Error code: {r_code}")
286
-
287
- output_path = os.path.join(dir_, filename_base)
288
- if not os.path.exists(output_path):
289
- return None, filename_base
290
-
291
- return output_path, filename_base
292
-
293
-
294
- def civ_api_down(url, dir_, civitai_api_key, civ_filename):
295
- """
296
- This method is susceptible to being blocked because it generates a lot of temp redirect links with aria2c.
297
- If an API key limit is reached, generating a new API key and using it can fix the issue.
298
- """
299
- output_path = None
300
-
301
- url_dl = url + f"?token={civitai_api_key}"
302
- if not civ_filename:
303
- aria2_command = f'aria2c -c -x 1 -s 1 -d "{dir_}" "{url_dl}"'
304
- os.system(aria2_command)
305
- else:
306
- output_path = os.path.join(dir_, civ_filename)
307
- if not os.path.exists(output_path):
308
- aria2_command = (
309
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
310
- f'-k 1M -s 16 -d "{dir_}" -o "{civ_filename}" "{url_dl}"'
311
- )
312
- os.system(aria2_command)
313
-
314
- return output_path
315
-
316
-
317
- def drive_down(url, dir_):
318
- import gdown
319
-
320
- output_path = None
321
-
322
- drive_id, _ = gdown.parse_url.parse_url(url, warning=False)
323
- dir_files = os.listdir(dir_)
324
-
325
- for dfile in dir_files:
326
- if drive_id in dfile:
327
- output_path = os.path.join(dir_, dfile)
328
- break
329
-
330
- if not output_path:
331
- original_path = gdown.download(url, f"{dir_}/", fuzzy=True)
332
-
333
- dir_name, base_name = os.path.split(original_path)
334
- name, ext = base_name.rsplit(".", 1)
335
- new_name = f"{name}_{drive_id}.{ext}"
336
- output_path = os.path.join(dir_name, new_name)
337
-
338
- os.rename(original_path, output_path)
339
-
340
- return output_path
341
-
342
-
343
- def hf_down(url, dir_, hf_token, romanize):
344
- url = url.replace("?download=true", "")
345
- # url = urllib.parse.quote(url, safe=':/') # fix encoding
346
-
347
- filename = unidecode(url.split('/')[-1]) if romanize else url.split('/')[-1]
348
- output_path = os.path.join(dir_, filename)
349
-
350
- if os.path.exists(output_path):
351
- return output_path
352
-
353
- if "/blob/" in url:
354
- url = url.replace("/blob/", "/resolve/")
355
-
356
- if hf_token:
357
- user_header = f'"Authorization: Bearer {hf_token}"'
358
- os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
359
- else:
360
- os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
361
-
362
- return output_path
363
-
364
-
365
- def download_things(directory, url, hf_token="", civitai_api_key="", romanize=False):
366
- url = url.strip()
367
- downloaded_file_path = None
368
-
369
- if "drive.google.com" in url:
370
- downloaded_file_path = drive_down(url, directory)
371
- elif "huggingface.co" in url:
372
- downloaded_file_path = hf_down(url, directory, hf_token, romanize)
373
- elif "civitai.com" in url:
374
- if not civitai_api_key:
375
- msg = "You need an API key to download Civitai models."
376
- print(f"\033[91m{msg}\033[0m")
377
- gr.Warning(msg)
378
- return None
379
-
380
- url, civ_filename, civ_page = get_civit_params(url)
381
- if civ_page and not IS_ZERO_GPU:
382
- print(f"\033[92mCivitai model: {civ_filename} [page: {civ_page}]\033[0m")
383
-
384
- downloaded_file_path, civ_filename = civ_redirect_down(url, directory, civitai_api_key, romanize, civ_filename)
385
-
386
- if not downloaded_file_path:
387
- msg = (
388
- "Download failed.\n"
389
- "If this is due to an API limit, generating a new API key may resolve the issue.\n"
390
- "Attempting to download using the old method..."
391
- )
392
- print(msg)
393
- gr.Warning(msg)
394
- downloaded_file_path = civ_api_down(url, directory, civitai_api_key, civ_filename)
395
- else:
396
- os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
397
-
398
- return downloaded_file_path
399
-
400
-
401
- def get_model_list(directory_path):
402
- model_list = []
403
- valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
404
-
405
- for filename in os.listdir(directory_path):
406
- if os.path.splitext(filename)[1] in valid_extensions:
407
- # name_without_extension = os.path.splitext(filename)[0]
408
- file_path = os.path.join(directory_path, filename)
409
- # model_list.append((name_without_extension, file_path))
410
- model_list.append(file_path)
411
- print('\033[34mFILE: ' + file_path + '\033[0m')
412
- return model_list
413
-
414
-
415
- def extract_parameters(input_string):
416
- parameters = {}
417
- input_string = input_string.replace("\n", "")
418
-
419
- if "Negative prompt:" not in input_string:
420
- if "Steps:" in input_string:
421
- input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
422
- else:
423
- msg = "Generation data is invalid."
424
- gr.Warning(msg)
425
- print(msg)
426
- parameters["prompt"] = input_string
427
- return parameters
428
-
429
- parm = input_string.split("Negative prompt:")
430
- parameters["prompt"] = parm[0].strip()
431
- if "Steps:" not in parm[1]:
432
- parameters["neg_prompt"] = parm[1].strip()
433
- return parameters
434
- parm = parm[1].split("Steps:")
435
- parameters["neg_prompt"] = parm[0].strip()
436
- input_string = "Steps:" + parm[1]
437
-
438
- # Extracting Steps
439
- steps_match = re.search(r'Steps: (\d+)', input_string)
440
- if steps_match:
441
- parameters['Steps'] = int(steps_match.group(1))
442
-
443
- # Extracting Size
444
- size_match = re.search(r'Size: (\d+x\d+)', input_string)
445
- if size_match:
446
- parameters['Size'] = size_match.group(1)
447
- width, height = map(int, parameters['Size'].split('x'))
448
- parameters['width'] = width
449
- parameters['height'] = height
450
-
451
- # Extracting other parameters
452
- other_parameters = re.findall(r'([^,:]+): (.*?)(?=, [^,:]+:|$)', input_string)
453
- for param in other_parameters:
454
- parameters[param[0].strip()] = param[1].strip('"')
455
-
456
- return parameters
457
-
458
-
459
- def get_my_lora(link_url, romanize):
460
- l_name = ""
461
- for url in [url.strip() for url in link_url.split(',')]:
462
- if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
463
- l_name = download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY, romanize)
464
- new_lora_model_list = get_model_list(DIRECTORY_LORAS)
465
- new_lora_model_list.insert(0, "None")
466
- new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
467
- msg_lora = "Downloaded"
468
- if l_name:
469
- msg_lora += f": <b>{l_name}</b>"
470
- print(msg_lora)
471
-
472
- try:
473
- # Works with non-Civitai loras.
474
- json_data = read_safetensors_header_from_file(l_name)
475
- metadata_lora = LoraHeaderInformation(json_data)
476
- msg_lora += "<br>" + metadata_lora.to_html()
477
- except Exception:
478
- pass
479
-
480
- return gr.update(
481
- choices=new_lora_model_list
482
- ), gr.update(
483
- choices=new_lora_model_list
484
- ), gr.update(
485
- choices=new_lora_model_list
486
- ), gr.update(
487
- choices=new_lora_model_list
488
- ), gr.update(
489
- choices=new_lora_model_list
490
- ), gr.update(
491
- choices=new_lora_model_list
492
- ), gr.update(
493
- choices=new_lora_model_list
494
- ), gr.update(
495
- value=msg_lora
496
- )
497
-
498
-
499
- def info_html(json_data, title, subtitle):
500
- return f"""
501
- <div style='padding: 0; border-radius: 10px;'>
502
- <p style='margin: 0; font-weight: bold;'>{title}</p>
503
- <details>
504
- <summary>Details</summary>
505
- <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
506
- </details>
507
- </div>
508
- """
509
-
510
-
511
- def get_model_type(repo_id: str):
512
- api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
513
- default = "SD 1.5"
514
- try:
515
- if os.path.exists(repo_id):
516
- tag, _, _, _ = checkpoint_model_type(repo_id)
517
- return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
518
- else:
519
- model = api.model_info(repo_id=repo_id, timeout=5.0)
520
- tags = model.tags
521
- for tag in tags:
522
- if tag in MODEL_TYPE_CLASS.keys():
523
- return MODEL_TYPE_CLASS.get(tag, default)
524
-
525
- except Exception:
526
- return default
527
- return default
528
-
529
-
530
- def restart_space(repo_id: str, factory_reboot: bool):
531
- api = HfApi(token=os.environ.get("HF_TOKEN"))
532
- try:
533
- runtime = api.get_space_runtime(repo_id=repo_id)
534
- if runtime.stage == "RUNNING":
535
- api.restart_space(repo_id=repo_id, factory_reboot=factory_reboot)
536
- print(f"Restarting space: {repo_id}")
537
- else:
538
- print(f"Space {repo_id} is in stage: {runtime.stage}")
539
- except Exception as e:
540
- print(e)
541
-
542
-
543
- def extract_exif_data(image):
544
- if image is None:
545
- return ""
546
-
547
- try:
548
- metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
549
-
550
- for key in metadata_keys:
551
- if key in image.info:
552
- return image.info[key]
553
-
554
- return str(image.info)
555
-
556
- except Exception as e:
557
- return f"Error extracting metadata: {str(e)}"
558
-
559
-
560
- def create_mask_now(img, invert):
561
- import numpy as np
562
- import time
563
-
564
- time.sleep(0.5)
565
-
566
- transparent_image = img["layers"][0]
567
-
568
- # Extract the alpha channel
569
- alpha_channel = np.array(transparent_image)[:, :, 3]
570
-
571
- # Create a binary mask by thresholding the alpha channel
572
- binary_mask = alpha_channel > 1
573
-
574
- if invert:
575
- print("Invert")
576
- # Invert the binary mask so that the drawn shape is white and the rest is black
577
- binary_mask = np.invert(binary_mask)
578
-
579
- # Convert the binary mask to a 3-channel RGB mask
580
- rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
581
-
582
- # Convert the mask to uint8
583
- rgb_mask = rgb_mask.astype(np.uint8) * 255
584
-
585
- return img["background"], rgb_mask
586
-
587
-
588
- def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "main", token=True):
589
-
590
- variant = None
591
- if token is True and not os.environ.get("HF_TOKEN"):
592
- token = None
593
-
594
- if model_type == "SDXL":
595
- info = model_info_data(
596
- repo_name,
597
- token=token,
598
- revision=revision,
599
- timeout=5.0,
600
- )
601
-
602
- filenames = {sibling.rfilename for sibling in info.siblings}
603
- model_filenames, variant_filenames = variant_compatible_siblings(
604
- filenames, variant="fp16"
605
- )
606
-
607
- if len(variant_filenames):
608
- variant = "fp16"
609
-
610
- if model_type == "FLUX":
611
- cached_folder = snapshot_download(
612
- repo_id=repo_name,
613
- allow_patterns="transformer/*"
614
- )
615
- else:
616
- cached_folder = DiffusionPipeline.download(
617
- pretrained_model_name=repo_name,
618
- force_download=False,
619
- token=token,
620
- revision=revision,
621
- # mirror="https://hf-mirror.com",
622
- variant=variant,
623
- use_safetensors=True,
624
- trust_remote_code=False,
625
- timeout=5.0,
626
- )
627
-
628
- if isinstance(cached_folder, PosixPath):
629
- cached_folder = cached_folder.as_posix()
630
-
631
- # Task model
632
- # from huggingface_hub import hf_hub_download
633
- # hf_hub_download(
634
- # task_model,
635
- # filename="diffusion_pytorch_model.safetensors", # fix fp16 variant
636
- # )
637
-
638
- return cached_folder
639
-
640
-
641
- def get_folder_size_gb(folder_path):
642
- result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
643
-
644
- total_size_kb = int(result.stdout.split()[0])
645
- total_size_gb = total_size_kb / (1024 ** 2)
646
-
647
- return total_size_gb
648
-
649
-
650
- def get_used_storage_gb(path_storage=STORAGE_ROOT):
651
- try:
652
- used_gb = get_folder_size_gb(path_storage)
653
- print(f"Used Storage: {used_gb:.2f} GB")
654
- except Exception as e:
655
- used_gb = 999
656
- print(f"Error while retrieving the used storage: {e}.")
657
-
658
- return used_gb
659
-
660
-
661
- def delete_model(removal_candidate):
662
- print(f"Removing: {removal_candidate}")
663
-
664
- if os.path.exists(removal_candidate):
665
- os.remove(removal_candidate)
666
- else:
667
- diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
668
- if os.path.isdir(diffusers_model):
669
- shutil.rmtree(diffusers_model)
670
-
671
-
672
- def clear_hf_cache():
673
- """
674
- Clears the entire Hugging Face cache at ~/.cache/huggingface.
675
- Hugging Face will re-download models as needed later.
676
- """
677
- try:
678
- if os.path.exists(CACHE_HF):
679
- shutil.rmtree(CACHE_HF, ignore_errors=True)
680
- print(f"Hugging Face cache cleared: {CACHE_HF}")
681
- else:
682
- print(f"No Hugging Face cache found at: {CACHE_HF}")
683
- except Exception as e:
684
- print(f"Error clearing Hugging Face cache: {e}")
685
-
686
-
687
- def progress_step_bar(step, total):
688
- # Calculate the percentage for the progress bar width
689
- percentage = min(100, ((step / total) * 100))
690
-
691
- return f"""
692
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
693
- <div style="width: {percentage}%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
694
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 13px;">
695
- {int(percentage)}%
696
- </div>
697
- </div>
698
- """
699
-
700
-
701
- def html_template_message(msg):
702
- return f"""
703
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
704
- <div style="width: 0%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
705
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 14px; font-weight: bold; text-shadow: 1px 1px 2px black;">
706
- {msg}
707
- </div>
708
- </div>
709
- """
710
-
711
-
712
- def escape_html(text):
713
- """Escapes HTML special characters in the input text."""
714
- return text.replace("<", "&lt;").replace(">", "&gt;").replace("\n", "<br>")
 
1
+ import os
2
+ import re
3
+ import gradio as gr
4
+ from constants import (
5
+ DIFFUSERS_FORMAT_LORAS,
6
+ CIVITAI_API_KEY,
7
+ HF_TOKEN,
8
+ MODEL_TYPE_CLASS,
9
+ DIRECTORY_LORAS,
10
+ DIRECTORY_MODELS,
11
+ DIFFUSECRAFT_CHECKPOINT_NAME,
12
+ CACHE_HF,
13
+ STORAGE_ROOT,
14
+ )
15
+ from huggingface_hub import HfApi
16
+ from huggingface_hub import snapshot_download
17
+ from diffusers import DiffusionPipeline
18
+ from huggingface_hub import model_info as model_info_data
19
+ from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
+ from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
+ from pathlib import PosixPath
22
+ from unidecode import unidecode
23
+ import urllib.parse
24
+ import copy
25
+ import requests
26
+ from requests.adapters import HTTPAdapter
27
+ from urllib3.util import Retry
28
+ import shutil
29
+ import subprocess
30
+
31
+ USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
32
+
33
+
34
+ def request_json_data(url):
35
+ model_version_id = url.split('/')[-1]
36
+ if "?modelVersionId=" in model_version_id:
37
+ match = re.search(r'modelVersionId=(\d+)', url)
38
+ model_version_id = match.group(1)
39
+
40
+ endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_version_id}"
41
+
42
+ params = {}
43
+ headers = {'User-Agent': USER_AGENT, 'content-type': 'application/json'}
44
+ session = requests.Session()
45
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
46
+ session.mount("https://", HTTPAdapter(max_retries=retries))
47
+
48
+ try:
49
+ result = session.get(endpoint_url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
50
+ result.raise_for_status()
51
+ json_data = result.json()
52
+ return json_data if json_data else None
53
+ except Exception as e:
54
+ print(f"Error: {e}")
55
+ return None
56
+
57
+
58
+ class ModelInformation:
59
+ def __init__(self, json_data):
60
+ self.model_version_id = json_data.get("id", "")
61
+ self.model_id = json_data.get("modelId", "")
62
+ self.download_url = json_data.get("downloadUrl", "")
63
+ self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
64
+ self.filename_url = next(
65
+ (v.get("name", "") for v in reversed(json_data.get("files", [])) if str(self.model_version_id) in v.get("downloadUrl", "")), ""
66
+ )
67
+ self.filename_url = self.filename_url if self.filename_url else ""
68
+ self.description = json_data.get("description", "")
69
+ if self.description is None: self.description = ""
70
+ self.model_name = json_data.get("model", {}).get("name", "")
71
+ self.model_type = json_data.get("model", {}).get("type", "")
72
+ self.nsfw = json_data.get("model", {}).get("nsfw", False)
73
+ self.poi = json_data.get("model", {}).get("poi", False)
74
+ self.images = [img.get("url", "") for img in json_data.get("images", [])]
75
+ self.example_prompt = json_data.get("trainedWords", [""])[0] if json_data.get("trainedWords") else ""
76
+ self.original_json = copy.deepcopy(json_data)
77
+
78
+
79
+ def retrieve_model_info(url):
80
+ json_data = request_json_data(url)
81
+ if not json_data:
82
+ return None
83
+ model_descriptor = ModelInformation(json_data)
84
+ return model_descriptor
85
+
86
+
87
+ def download_things(directory, url, hf_token="", civitai_api_key="", romanize=False):
88
+ url = url.strip()
89
+ downloaded_file_path = None
90
+
91
+ if "drive.google.com" in url:
92
+ original_dir = os.getcwd()
93
+ os.chdir(directory)
94
+ os.system(f"gdown --fuzzy {url}")
95
+ os.chdir(original_dir)
96
+ elif "huggingface.co" in url:
97
+ url = url.replace("?download=true", "")
98
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
99
+ if "/blob/" in url:
100
+ url = url.replace("/blob/", "/resolve/")
101
+ user_header = f'"Authorization: Bearer {hf_token}"'
102
+
103
+ filename = unidecode(url.split('/')[-1]) if romanize else url.split('/')[-1]
104
+
105
+ if hf_token:
106
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {filename}")
107
+ else:
108
+ os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {filename}")
109
+
110
+ downloaded_file_path = os.path.join(directory, filename)
111
+
112
+ elif "civitai.com" in url:
113
+
114
+ if not civitai_api_key:
115
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
116
+
117
+ model_profile = retrieve_model_info(url)
118
+ if (
119
+ model_profile is not None
120
+ and model_profile.download_url
121
+ and model_profile.filename_url
122
+ ):
123
+ url = model_profile.download_url
124
+ filename = unidecode(model_profile.filename_url) if romanize else model_profile.filename_url
125
+ else:
126
+ if "?" in url:
127
+ url = url.split("?")[0]
128
+ filename = ""
129
+
130
+ url_dl = url + f"?token={civitai_api_key}"
131
+ print(f"Filename: {filename}")
132
+
133
+ param_filename = ""
134
+ if filename:
135
+ param_filename = f"-o '{filename}'"
136
+
137
+ aria2_command = (
138
+ f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
139
+ f'-k 1M -s 16 -d "{directory}" {param_filename} "{url_dl}"'
140
+ )
141
+ os.system(aria2_command)
142
+
143
+ if param_filename and os.path.exists(os.path.join(directory, filename)):
144
+ downloaded_file_path = os.path.join(directory, filename)
145
+
146
+ # # PLAN B
147
+ # # Follow the redirect to get the actual download URL
148
+ # curl_command = (
149
+ # f'curl -L -sI --connect-timeout 5 --max-time 5 '
150
+ # f'-H "Content-Type: application/json" '
151
+ # f'-H "Authorization: Bearer {civitai_api_key}" "{url}"'
152
+ # )
153
+
154
+ # headers = os.popen(curl_command).read()
155
+
156
+ # # Look for the redirected "Location" URL
157
+ # location_match = re.search(r'location: (.+)', headers, re.IGNORECASE)
158
+
159
+ # if location_match:
160
+ # redirect_url = location_match.group(1).strip()
161
+
162
+ # # Extract the filename from the redirect URL's "Content-Disposition"
163
+ # filename_match = re.search(r'filename%3D%22(.+?)%22', redirect_url)
164
+ # if filename_match:
165
+ # encoded_filename = filename_match.group(1)
166
+ # # Decode the URL-encoded filename
167
+ # decoded_filename = urllib.parse.unquote(encoded_filename)
168
+
169
+ # filename = unidecode(decoded_filename) if romanize else decoded_filename
170
+ # print(f"Filename: {filename}")
171
+
172
+ # aria2_command = (
173
+ # f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
174
+ # f'-k 1M -s 16 -d "{directory}" -o "{filename}" "{redirect_url}"'
175
+ # )
176
+ # return_code = os.system(aria2_command)
177
+
178
+ # # if return_code != 0:
179
+ # # raise RuntimeError(f"Failed to download file: {filename}. Error code: {return_code}")
180
+ # downloaded_file_path = os.path.join(directory, filename)
181
+ # if not os.path.exists(downloaded_file_path):
182
+ # downloaded_file_path = None
183
+
184
+ # if not downloaded_file_path:
185
+ # # Old method
186
+ # if "?" in url:
187
+ # url = url.split("?")[0]
188
+ # url = url + f"?token={civitai_api_key}"
189
+ # os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
190
+
191
+ else:
192
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
193
+
194
+ return downloaded_file_path
195
+
196
+
197
+ def get_model_list(directory_path):
198
+ model_list = []
199
+ valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
200
+
201
+ for filename in os.listdir(directory_path):
202
+ if os.path.splitext(filename)[1] in valid_extensions:
203
+ # name_without_extension = os.path.splitext(filename)[0]
204
+ file_path = os.path.join(directory_path, filename)
205
+ # model_list.append((name_without_extension, file_path))
206
+ model_list.append(file_path)
207
+ print('\033[34mFILE: ' + file_path + '\033[0m')
208
+ return model_list
209
+
210
+
211
+ def extract_parameters(input_string):
212
+ parameters = {}
213
+ input_string = input_string.replace("\n", "")
214
+
215
+ if "Negative prompt:" not in input_string:
216
+ if "Steps:" in input_string:
217
+ input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
218
+ else:
219
+ print("Invalid metadata")
220
+ parameters["prompt"] = input_string
221
+ return parameters
222
+
223
+ parm = input_string.split("Negative prompt:")
224
+ parameters["prompt"] = parm[0].strip()
225
+ if "Steps:" not in parm[1]:
226
+ print("Steps not detected")
227
+ parameters["neg_prompt"] = parm[1].strip()
228
+ return parameters
229
+ parm = parm[1].split("Steps:")
230
+ parameters["neg_prompt"] = parm[0].strip()
231
+ input_string = "Steps:" + parm[1]
232
+
233
+ # Extracting Steps
234
+ steps_match = re.search(r'Steps: (\d+)', input_string)
235
+ if steps_match:
236
+ parameters['Steps'] = int(steps_match.group(1))
237
+
238
+ # Extracting Size
239
+ size_match = re.search(r'Size: (\d+x\d+)', input_string)
240
+ if size_match:
241
+ parameters['Size'] = size_match.group(1)
242
+ width, height = map(int, parameters['Size'].split('x'))
243
+ parameters['width'] = width
244
+ parameters['height'] = height
245
+
246
+ # Extracting other parameters
247
+ other_parameters = re.findall(r'([^,:]+): (.*?)(?=, [^,:]+:|$)', input_string)
248
+ for param in other_parameters:
249
+ parameters[param[0].strip()] = param[1].strip('"')
250
+
251
+ return parameters
252
+
253
+
254
+ def get_my_lora(link_url, romanize):
255
+ l_name = ""
256
+ for url in [url.strip() for url in link_url.split(',')]:
257
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
258
+ l_name = download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY, romanize)
259
+ new_lora_model_list = get_model_list(DIRECTORY_LORAS)
260
+ new_lora_model_list.insert(0, "None")
261
+ new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
262
+ msg_lora = "Downloaded"
263
+ if l_name:
264
+ msg_lora += f": <b>{l_name}</b>"
265
+ print(msg_lora)
266
+
267
+ return gr.update(
268
+ choices=new_lora_model_list
269
+ ), gr.update(
270
+ choices=new_lora_model_list
271
+ ), gr.update(
272
+ choices=new_lora_model_list
273
+ ), gr.update(
274
+ choices=new_lora_model_list
275
+ ), gr.update(
276
+ choices=new_lora_model_list
277
+ ), gr.update(
278
+ choices=new_lora_model_list
279
+ ), gr.update(
280
+ choices=new_lora_model_list
281
+ ), gr.update(
282
+ value=msg_lora
283
+ )
284
+
285
+
286
+ def info_html(json_data, title, subtitle):
287
+ return f"""
288
+ <div style='padding: 0; border-radius: 10px;'>
289
+ <p style='margin: 0; font-weight: bold;'>{title}</p>
290
+ <details>
291
+ <summary>Details</summary>
292
+ <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
293
+ </details>
294
+ </div>
295
+ """
296
+
297
+
298
+ def get_model_type(repo_id: str):
299
+ api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
300
+ default = "SD 1.5"
301
+ try:
302
+ if os.path.exists(repo_id):
303
+ tag = checkpoint_model_type(repo_id)
304
+ return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
305
+ else:
306
+ model = api.model_info(repo_id=repo_id, timeout=5.0)
307
+ tags = model.tags
308
+ for tag in tags:
309
+ if tag in MODEL_TYPE_CLASS.keys(): return MODEL_TYPE_CLASS.get(tag, default)
310
+
311
+ except Exception:
312
+ return default
313
+ return default
314
+
315
+
316
+ def restart_space(repo_id: str, factory_reboot: bool):
317
+ api = HfApi(token=os.environ.get("HF_TOKEN"))
318
+ try:
319
+ runtime = api.get_space_runtime(repo_id=repo_id)
320
+ if runtime.stage == "RUNNING":
321
+ api.restart_space(repo_id=repo_id, factory_reboot=factory_reboot)
322
+ print(f"Restarting space: {repo_id}")
323
+ else:
324
+ print(f"Space {repo_id} is in stage: {runtime.stage}")
325
+ except Exception as e:
326
+ print(e)
327
+
328
+
329
+ def extract_exif_data(image):
330
+ if image is None:
331
+ return ""
332
+
333
+ try:
334
+ metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
335
+
336
+ for key in metadata_keys:
337
+ if key in image.info:
338
+ return image.info[key]
339
+
340
+ return str(image.info)
341
+
342
+ except Exception as e:
343
+ return f"Error extracting metadata: {str(e)}"
344
+
345
+
346
+ def create_mask_now(img, invert):
347
+ import numpy as np
348
+ import time
349
+
350
+ time.sleep(0.5)
351
+
352
+ transparent_image = img["layers"][0]
353
+
354
+ # Extract the alpha channel
355
+ alpha_channel = np.array(transparent_image)[:, :, 3]
356
+
357
+ # Create a binary mask by thresholding the alpha channel
358
+ binary_mask = alpha_channel > 1
359
+
360
+ if invert:
361
+ print("Invert")
362
+ # Invert the binary mask so that the drawn shape is white and the rest is black
363
+ binary_mask = np.invert(binary_mask)
364
+
365
+ # Convert the binary mask to a 3-channel RGB mask
366
+ rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
367
+
368
+ # Convert the mask to uint8
369
+ rgb_mask = rgb_mask.astype(np.uint8) * 255
370
+
371
+ return img["background"], rgb_mask
372
+
373
+
374
+ def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "main", token=True):
375
+
376
+ variant = None
377
+ if token is True and not os.environ.get("HF_TOKEN"):
378
+ token = None
379
+
380
+ if model_type == "SDXL":
381
+ info = model_info_data(
382
+ repo_name,
383
+ token=token,
384
+ revision=revision,
385
+ timeout=5.0,
386
+ )
387
+
388
+ filenames = {sibling.rfilename for sibling in info.siblings}
389
+ model_filenames, variant_filenames = variant_compatible_siblings(
390
+ filenames, variant="fp16"
391
+ )
392
+
393
+ if len(variant_filenames):
394
+ variant = "fp16"
395
+
396
+ if model_type == "FLUX":
397
+ cached_folder = snapshot_download(
398
+ repo_id=repo_name,
399
+ allow_patterns="transformer/*"
400
+ )
401
+ else:
402
+ cached_folder = DiffusionPipeline.download(
403
+ pretrained_model_name=repo_name,
404
+ force_download=False,
405
+ token=token,
406
+ revision=revision,
407
+ # mirror="https://hf-mirror.com",
408
+ variant=variant,
409
+ use_safetensors=True,
410
+ trust_remote_code=False,
411
+ timeout=5.0,
412
+ )
413
+
414
+ if isinstance(cached_folder, PosixPath):
415
+ cached_folder = cached_folder.as_posix()
416
+
417
+ # Task model
418
+ # from huggingface_hub import hf_hub_download
419
+ # hf_hub_download(
420
+ # task_model,
421
+ # filename="diffusion_pytorch_model.safetensors", # fix fp16 variant
422
+ # )
423
+
424
+ return cached_folder
425
+
426
+
427
+ def get_folder_size_gb(folder_path):
428
+ result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
429
+
430
+ total_size_kb = int(result.stdout.split()[0])
431
+ total_size_gb = total_size_kb / (1024 ** 2)
432
+
433
+ return total_size_gb
434
+
435
+
436
+ def get_used_storage_gb():
437
+ try:
438
+ used_gb = get_folder_size_gb(STORAGE_ROOT)
439
+ print(f"Used Storage: {used_gb:.2f} GB")
440
+ except Exception as e:
441
+ used_gb = 999
442
+ print(f"Error while retrieving the used storage: {e}.")
443
+
444
+ return used_gb
445
+
446
+
447
+ def delete_model(removal_candidate):
448
+ print(f"Removing: {removal_candidate}")
449
+
450
+ if os.path.exists(removal_candidate):
451
+ os.remove(removal_candidate)
452
+ else:
453
+ diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
454
+ if os.path.isdir(diffusers_model):
455
+ shutil.rmtree(diffusers_model)
456
+
457
+
458
+ def progress_step_bar(step, total):
459
+ # Calculate the percentage for the progress bar width
460
+ percentage = min(100, ((step / total) * 100))
461
+
462
+ return f"""
463
+ <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
464
+ <div style="width: {percentage}%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
465
+ <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 13px;">
466
+ {int(percentage)}%
467
+ </div>
468
+ </div>
469
+ """
470
+
471
+
472
+ def html_template_message(msg):
473
+ return f"""
474
+ <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
475
+ <div style="width: 0%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
476
+ <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 14px; font-weight: bold; text-shadow: 1px 1px 2px black;">
477
+ {msg}
478
+ </div>
479
+ </div>
480
+ """
481
+
482
+
483
+ def escape_html(text):
484
+ """Escapes HTML special characters in the input text."""
485
+ return text.replace("<", "&lt;").replace(">", "&gt;").replace("\n", "<br>")