Upload 869 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +40 -9
- extensions-builtin/Lora/network.py +3 -99
- extensions-builtin/Lora/ui_edit_user_metadata.py +9 -10
- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +229 -215
- extensions-builtin/forge_legacy_preprocessors/annotator/densepose/densepose.py +1 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/hed/__init__.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/multi_depth_model_woauxi.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/util/visualizer.py +0 -2
- extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/blocks.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/dpt_depth.py +0 -2
- extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/midas_net_custom.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/__init__.py +1 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_large.py +0 -2
- extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_tiny.py +0 -2
- extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/utils.py +1 -2
- extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmcv/ops/fused_bias_leakyrelu.py +2 -2
- extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmseg/apis/inference.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/openpose/body.py +1 -4
- extensions-builtin/forge_legacy_preprocessors/annotator/openpose/face.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/openpose/hand.py +0 -5
- extensions-builtin/forge_legacy_preprocessors/annotator/openpose/model.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/openpose/types.py +1 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/pidinet/model.py +0 -2
- extensions-builtin/forge_legacy_preprocessors/annotator/teed/Fsmish.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/teed/Xsmish.py +0 -2
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py +0 -6
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py +0 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py +0 -3
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py +0 -3
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py +0 -3
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py +0 -7
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_onnx.py +1 -3
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py +1 -1
- extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/utils/misc.py +0 -4
- extensions-builtin/forge_legacy_preprocessors/install.py +1 -1
- extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor.py +2 -3
- extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py +2 -0
- extensions-builtin/sd_forge_controlnet/lib_controlnet/controlnet_ui/controlnet_ui_group.py +3 -2
- extensions-builtin/sd_forge_controlnet/lib_controlnet/external_code.py +1 -1
- extensions-builtin/sd_forge_controlnet/scripts/controlnet.py +68 -192
- extensions-builtin/sd_forge_multidiffusion/lib_multidiffusion/tiled_diffusion.py +539 -0
- extensions-builtin/sd_forge_multidiffusion/scripts/forge_multidiffusion.py +46 -0
- extensions-builtin/xyz/lib_xyz/builtins.py +41 -29
- html/extra-networks-no-cards.html +1 -2
- html/extra-networks-pane.html +3 -0
- javascript/extraNetworks.js +22 -10
- javascript/localization.js +0 -3
- javascript/ui.js +10 -25
- ldm_patched/ldm/modules/attention.py +2 -2
README.md
CHANGED
|
@@ -18,7 +18,7 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
| 18 |
|
| 19 |
<br>
|
| 20 |
|
| 21 |
-
## Features [
|
| 22 |
> Most base features of the original [Automatic1111 Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) should still function
|
| 23 |
|
| 24 |
#### New Features
|
|
@@ -48,6 +48,7 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
| 48 |
- enable in **Settings/Optimizations**
|
| 49 |
- [X] Support fast `fp8` operation *(`torch._scaled_mm`)*
|
| 50 |
- requires RTX **40** +
|
|
|
|
| 51 |
- ~10% speed up; reduce quality
|
| 52 |
- enable in **Settings/Optimizations**
|
| 53 |
|
|
@@ -55,12 +56,14 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
| 55 |
> - Both `fp16_accumulation` and `cublas_ops` achieve the same speed up; if you already install/update to PyTorch **2.7.0**, you do not need to go for `cublas_ops`
|
| 56 |
> - The `fp16_accumulation` and `cublas_ops` require `fp16` precision, thus is not compatible with the `fp8` operation
|
| 57 |
|
|
|
|
|
|
|
| 58 |
- [X] Persistent LoRA Patching
|
| 59 |
- speed up LoRA loading in subsequent generations
|
| 60 |
- see [Commandline](#by-classic)
|
| 61 |
- [X] Implement new Samplers
|
| 62 |
- *(ported from reForge Webui)*
|
| 63 |
-
- [X] Implement Scheduler
|
| 64 |
- *(backported from Automatic1111 Webui upstream)*
|
| 65 |
- enable in **Settings/UI Alternatives**
|
| 66 |
- [X] Add `CFG` slider to the `Hires. fix` section
|
|
@@ -72,18 +75,34 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
| 72 |
- enable in **Settings/UI Alternatives**
|
| 73 |
- [X] Implement full precision calculation for `Mask blur` blending
|
| 74 |
- enable in **Settings/img2img**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
- [X] Implement `diskcache` for hashes
|
| 76 |
- *(backported from Automatic1111 Webui upstream)*
|
| 77 |
- [X] Implement `skip_early_cond`
|
| 78 |
- *(backported from Automatic1111 Webui upstream)*
|
| 79 |
- enable in **Settings/Optimizations**
|
| 80 |
-
- [X]
|
|
|
|
|
|
|
| 81 |
- [X] Support new LoRA architectures
|
| 82 |
- [X] Update `spandrel`
|
| 83 |
- support new Upscaler architectures
|
| 84 |
- [X] Add `pillow-heif` package
|
| 85 |
- support `.avif` and `.heif` images
|
| 86 |
- [X] Automatically determine the optimal row count for `X/Y/Z Plot`
|
|
|
|
| 87 |
- [X] `DepthAnything v2` Preprocessor
|
| 88 |
- [X] Support [NoobAI Inpaint](https://civitai.com/models/1376234/noobai-inpainting-controlnet) ControlNet
|
| 89 |
- [X] Support [Union](https://huggingface.co/xinsir/controlnet-union-sdxl-1.0) / [ProMax](https://huggingface.co/brad-twinkl/controlnet-union-sdxl-1.0-promax) ControlNet
|
|
@@ -110,15 +129,17 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
| 110 |
- [X] Some Preprocessors *(ControlNet)*
|
| 111 |
- [X] `Photopea` and `openpose_editor` *(ControlNet)*
|
| 112 |
- [X] Unix `.sh` launch scripts
|
| 113 |
-
- You can still use this WebUI by copying a launch script from
|
| 114 |
|
| 115 |
#### Optimizations
|
| 116 |
|
| 117 |
- [X] **[Freedom]** Natively integrate the `SD1` and `SDXL` logics
|
| 118 |
- no longer `git` `clone` any repository on fresh install
|
| 119 |
- no more random hacks and monkey patches
|
|
|
|
|
|
|
| 120 |
- [X] Fix memory leak when switching checkpoints
|
| 121 |
-
- [X] Clean up the `ldm_patched` *(**
|
| 122 |
- [X] Remove unused `cmd_args`
|
| 123 |
- [X] Remove unused `args_parser`
|
| 124 |
- [X] Remove unused `shared_options`
|
|
@@ -127,6 +148,9 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
| 127 |
- [X] Remove redundant upscaler codes
|
| 128 |
- put every upscaler inside the `ESRGAN` folder
|
| 129 |
- [X] Optimize upscaler logics
|
|
|
|
|
|
|
|
|
|
| 130 |
- [X] Improve color correction
|
| 131 |
- [X] Improve hash caching
|
| 132 |
- [X] Improve error logs
|
|
@@ -135,16 +159,21 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
| 135 |
- improve formatting
|
| 136 |
- update descriptions
|
| 137 |
- [X] Check for Extension updates in parallel
|
| 138 |
-
- [X]
|
| 139 |
- [X] ControlNet Rewrite
|
| 140 |
- change Units to `gr.Tab`
|
| 141 |
- remove multi-inputs, as they are "[misleading](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/932)"
|
| 142 |
- change `visible` toggle to `interactive` toggle; now the UI will no longer jump around
|
| 143 |
-
-
|
|
|
|
| 144 |
- [X] Disable Refiner by default
|
| 145 |
- enable again in **Settings/UI Alternatives**
|
| 146 |
- [X] Disable Tree View by default
|
| 147 |
- enable again in **Settings/Extra Networks**
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
- [X] Run `text encoder` on CPU by default
|
| 149 |
- [X] Fix `pydantic` Errors
|
| 150 |
- [X] Fix `Soft Inpainting`
|
|
@@ -154,7 +183,7 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
| 154 |
- [X] Update `protobuf`
|
| 155 |
- faster `insightface` loading
|
| 156 |
- [X] Update to latest PyTorch
|
| 157 |
-
- `torch==2.7.
|
| 158 |
- `xformers==0.0.30`
|
| 159 |
|
| 160 |
> [!Note]
|
|
@@ -175,7 +204,6 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
| 175 |
- `--no-download-sd-model`: Do not download a default checkpoint
|
| 176 |
- can be removed after you download some checkpoints of your choice
|
| 177 |
- `--xformers`: Install the `xformers` package to speed up generation
|
| 178 |
-
- Currently, `torch==2.7.0` does **not** support `xformers` yet
|
| 179 |
- `--port`: Specify a server port to use
|
| 180 |
- defaults to `7860`
|
| 181 |
- `--api`: Enable [API](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API) access
|
|
@@ -449,6 +477,9 @@ In my experience, the speed of each attention function for SDXL is ranked in the
|
|
| 449 |
> [!Note]
|
| 450 |
> `SageAttention` is based on quantization, so its quality might be slightly worse than others
|
| 451 |
|
|
|
|
|
|
|
|
|
|
| 452 |
<br>
|
| 453 |
|
| 454 |
## Issues & Requests
|
|
|
|
| 18 |
|
| 19 |
<br>
|
| 20 |
|
| 21 |
+
## Features [Jul. 23]
|
| 22 |
> Most base features of the original [Automatic1111 Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) should still function
|
| 23 |
|
| 24 |
#### New Features
|
|
|
|
| 48 |
- enable in **Settings/Optimizations**
|
| 49 |
- [X] Support fast `fp8` operation *(`torch._scaled_mm`)*
|
| 50 |
- requires RTX **40** +
|
| 51 |
+
- requires **UNet Weights in fp8** option
|
| 52 |
- ~10% speed up; reduce quality
|
| 53 |
- enable in **Settings/Optimizations**
|
| 54 |
|
|
|
|
| 56 |
> - Both `fp16_accumulation` and `cublas_ops` achieve the same speed up; if you already install/update to PyTorch **2.7.0**, you do not need to go for `cublas_ops`
|
| 57 |
> - The `fp16_accumulation` and `cublas_ops` require `fp16` precision, thus is not compatible with the `fp8` operation
|
| 58 |
|
| 59 |
+
<br>
|
| 60 |
+
|
| 61 |
- [X] Persistent LoRA Patching
|
| 62 |
- speed up LoRA loading in subsequent generations
|
| 63 |
- see [Commandline](#by-classic)
|
| 64 |
- [X] Implement new Samplers
|
| 65 |
- *(ported from reForge Webui)*
|
| 66 |
+
- [X] Implement Scheduler dropdown
|
| 67 |
- *(backported from Automatic1111 Webui upstream)*
|
| 68 |
- enable in **Settings/UI Alternatives**
|
| 69 |
- [X] Add `CFG` slider to the `Hires. fix` section
|
|
|
|
| 75 |
- enable in **Settings/UI Alternatives**
|
| 76 |
- [X] Implement full precision calculation for `Mask blur` blending
|
| 77 |
- enable in **Settings/img2img**
|
| 78 |
+
- [X] Support loading upscalers in `half` precision
|
| 79 |
+
- speed up; reduce quality
|
| 80 |
+
- enable in **Settings/Upscaling**
|
| 81 |
+
- [X] Support running tile composition on GPU
|
| 82 |
+
- enable in **Settings/Upscaling**
|
| 83 |
+
- [X] Allow `newline` in LoRA metadata
|
| 84 |
+
- *(backported from Automatic1111 Webui upstream)*
|
| 85 |
+
- [X] Implement sending parameters from generation result rather than from UI
|
| 86 |
+
- **e.g.** send the prompts instead of `Wildcard` syntax
|
| 87 |
+
- enable in **Settings/Infotext**
|
| 88 |
+
- [X] Implement tiling optimization for VAE
|
| 89 |
+
- reduce memory usage; reduce speed
|
| 90 |
+
- enable in **Settings/VAE**
|
| 91 |
- [X] Implement `diskcache` for hashes
|
| 92 |
- *(backported from Automatic1111 Webui upstream)*
|
| 93 |
- [X] Implement `skip_early_cond`
|
| 94 |
- *(backported from Automatic1111 Webui upstream)*
|
| 95 |
- enable in **Settings/Optimizations**
|
| 96 |
+
- [X] Allow inserting the upscaled image to the Gallery instead of overriding the input image
|
| 97 |
+
- *(backported from upstream [PR](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16405))*
|
| 98 |
+
- [X] Support `v-pred` **SDXL** checkpoints *(**e.g.** [NoobAI](https://civitai.com/models/833294?modelVersionId=1190596))*
|
| 99 |
- [X] Support new LoRA architectures
|
| 100 |
- [X] Update `spandrel`
|
| 101 |
- support new Upscaler architectures
|
| 102 |
- [X] Add `pillow-heif` package
|
| 103 |
- support `.avif` and `.heif` images
|
| 104 |
- [X] Automatically determine the optimal row count for `X/Y/Z Plot`
|
| 105 |
+
- [X] Support new LoRA architectures
|
| 106 |
- [X] `DepthAnything v2` Preprocessor
|
| 107 |
- [X] Support [NoobAI Inpaint](https://civitai.com/models/1376234/noobai-inpainting-controlnet) ControlNet
|
| 108 |
- [X] Support [Union](https://huggingface.co/xinsir/controlnet-union-sdxl-1.0) / [ProMax](https://huggingface.co/brad-twinkl/controlnet-union-sdxl-1.0-promax) ControlNet
|
|
|
|
| 129 |
- [X] Some Preprocessors *(ControlNet)*
|
| 130 |
- [X] `Photopea` and `openpose_editor` *(ControlNet)*
|
| 131 |
- [X] Unix `.sh` launch scripts
|
| 132 |
+
- You can still use this WebUI by simply copying a launch script from other working WebUI
|
| 133 |
|
| 134 |
#### Optimizations
|
| 135 |
|
| 136 |
- [X] **[Freedom]** Natively integrate the `SD1` and `SDXL` logics
|
| 137 |
- no longer `git` `clone` any repository on fresh install
|
| 138 |
- no more random hacks and monkey patches
|
| 139 |
+
- [X] Fix `canvas-zoom-and-pan` built-in extension
|
| 140 |
+
- no more infinite-resizing bug when using `Send to` buttons
|
| 141 |
- [X] Fix memory leak when switching checkpoints
|
| 142 |
+
- [X] Clean up the `ldm_patched` *(**i.e.** `comfy`)* folder
|
| 143 |
- [X] Remove unused `cmd_args`
|
| 144 |
- [X] Remove unused `args_parser`
|
| 145 |
- [X] Remove unused `shared_options`
|
|
|
|
| 148 |
- [X] Remove redundant upscaler codes
|
| 149 |
- put every upscaler inside the `ESRGAN` folder
|
| 150 |
- [X] Optimize upscaler logics
|
| 151 |
+
- [X] Optimize certain operations in `Spandrel`
|
| 152 |
+
- [X] Optimize the creation of Extra Networks pages
|
| 153 |
+
- *(backported from Automatic1111 Webui upstream)*
|
| 154 |
- [X] Improve color correction
|
| 155 |
- [X] Improve hash caching
|
| 156 |
- [X] Improve error logs
|
|
|
|
| 159 |
- improve formatting
|
| 160 |
- update descriptions
|
| 161 |
- [X] Check for Extension updates in parallel
|
| 162 |
+
- [X] Move `embeddings` folder into `models` folder
|
| 163 |
- [X] ControlNet Rewrite
|
| 164 |
- change Units to `gr.Tab`
|
| 165 |
- remove multi-inputs, as they are "[misleading](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/932)"
|
| 166 |
- change `visible` toggle to `interactive` toggle; now the UI will no longer jump around
|
| 167 |
+
- improve `Presets` application
|
| 168 |
+
- fix `Inpaint not masked` mode
|
| 169 |
- [X] Disable Refiner by default
|
| 170 |
- enable again in **Settings/UI Alternatives**
|
| 171 |
- [X] Disable Tree View by default
|
| 172 |
- enable again in **Settings/Extra Networks**
|
| 173 |
+
- [X] Hide Sampler Parameters by default
|
| 174 |
+
- enable again by adding **--adv-samplers** flag
|
| 175 |
+
- [X] Hide some X/Y/Z Plot options by default
|
| 176 |
+
- enable again by adding **--adv-xyz** flag
|
| 177 |
- [X] Run `text encoder` on CPU by default
|
| 178 |
- [X] Fix `pydantic` Errors
|
| 179 |
- [X] Fix `Soft Inpainting`
|
|
|
|
| 183 |
- [X] Update `protobuf`
|
| 184 |
- faster `insightface` loading
|
| 185 |
- [X] Update to latest PyTorch
|
| 186 |
+
- `torch==2.7.1+cu128`
|
| 187 |
- `xformers==0.0.30`
|
| 188 |
|
| 189 |
> [!Note]
|
|
|
|
| 204 |
- `--no-download-sd-model`: Do not download a default checkpoint
|
| 205 |
- can be removed after you download some checkpoints of your choice
|
| 206 |
- `--xformers`: Install the `xformers` package to speed up generation
|
|
|
|
| 207 |
- `--port`: Specify a server port to use
|
| 208 |
- defaults to `7860`
|
| 209 |
- `--api`: Enable [API](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API) access
|
|
|
|
| 477 |
> [!Note]
|
| 478 |
> `SageAttention` is based on quantization, so its quality might be slightly worse than others
|
| 479 |
|
| 480 |
+
> [!Important]
|
| 481 |
+
> When using `SageAttention 2`, both positive prompts and negative prompts are required; omitting negative prompts can cause `NaN` issues
|
| 482 |
+
|
| 483 |
<br>
|
| 484 |
|
| 485 |
## Issues & Requests
|
extensions-builtin/Lora/network.py
CHANGED
|
@@ -3,8 +3,6 @@ from __future__ import annotations
|
|
| 3 |
import enum
|
| 4 |
from collections import namedtuple
|
| 5 |
|
| 6 |
-
import torch.nn as nn
|
| 7 |
-
import torch.nn.functional as F
|
| 8 |
from modules import cache, errors, hashes, sd_models, shared
|
| 9 |
|
| 10 |
NetworkWeights = namedtuple("NetworkWeights", ["network_key", "sd_key", "w", "sd_module"])
|
|
@@ -33,12 +31,11 @@ class NetworkOnDisk:
|
|
| 33 |
|
| 34 |
def read_metadata():
|
| 35 |
metadata = sd_models.read_metadata_from_safetensors(filename)
|
| 36 |
-
metadata.pop("ssmd_cover_images", None) # cover images are too big to display in UI
|
| 37 |
return metadata
|
| 38 |
|
| 39 |
if self.is_safetensors:
|
| 40 |
try:
|
| 41 |
-
self.metadata = cache.cached_data_for_file("safetensors-metadata", "/
|
| 42 |
except Exception as e:
|
| 43 |
errors.display(e, f"reading lora {filename}")
|
| 44 |
|
|
@@ -53,7 +50,7 @@ class NetworkOnDisk:
|
|
| 53 |
|
| 54 |
self.hash: str = None
|
| 55 |
self.shorthash: str = None
|
| 56 |
-
self.set_hash(self.metadata.get("sshs_model_hash") or hashes.sha256_from_cache(self.filename, "/
|
| 57 |
|
| 58 |
self.sd_version: "SDVersion" = self.detect_version()
|
| 59 |
|
|
@@ -76,14 +73,7 @@ class NetworkOnDisk:
|
|
| 76 |
|
| 77 |
def read_hash(self):
|
| 78 |
if not self.hash:
|
| 79 |
-
self.set_hash(
|
| 80 |
-
hashes.sha256(
|
| 81 |
-
self.filename,
|
| 82 |
-
"/".join(["lora", self.name]),
|
| 83 |
-
use_addnet_hash=self.is_safetensors,
|
| 84 |
-
)
|
| 85 |
-
or ""
|
| 86 |
-
)
|
| 87 |
|
| 88 |
def get_alias(self):
|
| 89 |
import networks
|
|
@@ -107,89 +97,3 @@ class Network: # LoraModule
|
|
| 107 |
|
| 108 |
self.mentioned_name = None
|
| 109 |
"""the text that was used to add the network to prompt - can be either name or an alias"""
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
class ModuleType:
|
| 113 |
-
def create_module(self, net: Network, weights: NetworkWeights) -> Network | None:
|
| 114 |
-
return None
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
class NetworkModule:
|
| 118 |
-
def __init__(self, net: Network, weights: NetworkWeights):
|
| 119 |
-
self.network = net
|
| 120 |
-
self.network_key = weights.network_key
|
| 121 |
-
self.sd_key = weights.sd_key
|
| 122 |
-
self.sd_module = weights.sd_module
|
| 123 |
-
|
| 124 |
-
if hasattr(self.sd_module, "weight"):
|
| 125 |
-
self.shape = self.sd_module.weight.shape
|
| 126 |
-
|
| 127 |
-
self.ops = None
|
| 128 |
-
self.extra_kwargs = {}
|
| 129 |
-
if isinstance(self.sd_module, nn.Conv2d):
|
| 130 |
-
self.ops = F.conv2d
|
| 131 |
-
self.extra_kwargs = {
|
| 132 |
-
"stride": self.sd_module.stride,
|
| 133 |
-
"padding": self.sd_module.padding,
|
| 134 |
-
}
|
| 135 |
-
elif isinstance(self.sd_module, nn.Linear):
|
| 136 |
-
self.ops = F.linear
|
| 137 |
-
elif isinstance(self.sd_module, nn.LayerNorm):
|
| 138 |
-
self.ops = F.layer_norm
|
| 139 |
-
self.extra_kwargs = {
|
| 140 |
-
"normalized_shape": self.sd_module.normalized_shape,
|
| 141 |
-
"eps": self.sd_module.eps,
|
| 142 |
-
}
|
| 143 |
-
elif isinstance(self.sd_module, nn.GroupNorm):
|
| 144 |
-
self.ops = F.group_norm
|
| 145 |
-
self.extra_kwargs = {
|
| 146 |
-
"num_groups": self.sd_module.num_groups,
|
| 147 |
-
"eps": self.sd_module.eps,
|
| 148 |
-
}
|
| 149 |
-
|
| 150 |
-
self.dim = None
|
| 151 |
-
self.bias = weights.w.get("bias")
|
| 152 |
-
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
|
| 153 |
-
self.scale = weights.w["scale"].item() if "scale" in weights.w else None
|
| 154 |
-
|
| 155 |
-
def multiplier(self):
|
| 156 |
-
if "transformer" in self.sd_key[:20]:
|
| 157 |
-
return self.network.te_multiplier
|
| 158 |
-
else:
|
| 159 |
-
return self.network.unet_multiplier
|
| 160 |
-
|
| 161 |
-
def calc_scale(self):
|
| 162 |
-
if self.scale is not None:
|
| 163 |
-
return self.scale
|
| 164 |
-
if self.dim is not None and self.alpha is not None:
|
| 165 |
-
return self.alpha / self.dim
|
| 166 |
-
|
| 167 |
-
return 1.0
|
| 168 |
-
|
| 169 |
-
def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
|
| 170 |
-
if self.bias is not None:
|
| 171 |
-
updown = updown.reshape(self.bias.shape)
|
| 172 |
-
updown += self.bias.to(orig_weight.device, dtype=updown.dtype)
|
| 173 |
-
updown = updown.reshape(output_shape)
|
| 174 |
-
|
| 175 |
-
if len(output_shape) == 4:
|
| 176 |
-
updown = updown.reshape(output_shape)
|
| 177 |
-
|
| 178 |
-
if orig_weight.size().numel() == updown.size().numel():
|
| 179 |
-
updown = updown.reshape(orig_weight.shape)
|
| 180 |
-
|
| 181 |
-
if ex_bias is not None:
|
| 182 |
-
ex_bias = ex_bias * self.multiplier()
|
| 183 |
-
|
| 184 |
-
return updown * self.calc_scale() * self.multiplier(), ex_bias
|
| 185 |
-
|
| 186 |
-
def calc_updown(self, target):
|
| 187 |
-
raise NotImplementedError
|
| 188 |
-
|
| 189 |
-
def forward(self, x, y):
|
| 190 |
-
"""A general forward implementation for all modules"""
|
| 191 |
-
if self.ops is None:
|
| 192 |
-
raise NotImplementedError
|
| 193 |
-
|
| 194 |
-
updown, ex_bias = self.calc_updown(self.sd_module.weight)
|
| 195 |
-
return y + self.ops(x, weight=updown, bias=ex_bias, **self.extra_kwargs)
|
|
|
|
| 3 |
import enum
|
| 4 |
from collections import namedtuple
|
| 5 |
|
|
|
|
|
|
|
| 6 |
from modules import cache, errors, hashes, sd_models, shared
|
| 7 |
|
| 8 |
NetworkWeights = namedtuple("NetworkWeights", ["network_key", "sd_key", "w", "sd_module"])
|
|
|
|
| 31 |
|
| 32 |
def read_metadata():
|
| 33 |
metadata = sd_models.read_metadata_from_safetensors(filename)
|
|
|
|
| 34 |
return metadata
|
| 35 |
|
| 36 |
if self.is_safetensors:
|
| 37 |
try:
|
| 38 |
+
self.metadata = cache.cached_data_for_file("safetensors-metadata", f"lora/{self.name}", filename, read_metadata)
|
| 39 |
except Exception as e:
|
| 40 |
errors.display(e, f"reading lora {filename}")
|
| 41 |
|
|
|
|
| 50 |
|
| 51 |
self.hash: str = None
|
| 52 |
self.shorthash: str = None
|
| 53 |
+
self.set_hash(self.metadata.get("sshs_model_hash") or hashes.sha256_from_cache(self.filename, f"lora/{self.name}", use_addnet_hash=self.is_safetensors) or "")
|
| 54 |
|
| 55 |
self.sd_version: "SDVersion" = self.detect_version()
|
| 56 |
|
|
|
|
| 73 |
|
| 74 |
def read_hash(self):
|
| 75 |
if not self.hash:
|
| 76 |
+
self.set_hash(hashes.sha256(self.filename, f"lora/{self.name}", use_addnet_hash=self.is_safetensors) or "")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
def get_alias(self):
|
| 79 |
import networks
|
|
|
|
| 97 |
|
| 98 |
self.mentioned_name = None
|
| 99 |
"""the text that was used to add the network to prompt - can be either name or an alias"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extensions-builtin/Lora/ui_edit_user_metadata.py
CHANGED
|
@@ -51,13 +51,13 @@ class LoraUserMetadataEditor(UserMetadataEditor):
|
|
| 51 |
|
| 52 |
def save_lora_user_metadata(
|
| 53 |
self,
|
| 54 |
-
name,
|
| 55 |
-
desc,
|
| 56 |
-
sd_version,
|
| 57 |
-
activation_text,
|
| 58 |
-
preferred_weight,
|
| 59 |
-
negative_text,
|
| 60 |
-
notes,
|
| 61 |
):
|
| 62 |
user_metadata = self.get_user_metadata(name)
|
| 63 |
user_metadata["description"] = desc
|
|
@@ -68,7 +68,6 @@ class LoraUserMetadataEditor(UserMetadataEditor):
|
|
| 68 |
user_metadata["notes"] = notes
|
| 69 |
|
| 70 |
self.write_user_metadata(name, user_metadata)
|
| 71 |
-
self.page.refresh()
|
| 72 |
|
| 73 |
def get_metadata_table(self, name):
|
| 74 |
table = super().get_metadata_table(name)
|
|
@@ -157,8 +156,8 @@ class LoraUserMetadataEditor(UserMetadataEditor):
|
|
| 157 |
self.create_default_editor_elems()
|
| 158 |
|
| 159 |
self.taginfo = gr.HighlightedText(label="Training dataset tags")
|
| 160 |
-
self.edit_activation_text = gr.
|
| 161 |
-
self.edit_negative_text = gr.
|
| 162 |
self.slider_preferred_weight = gr.Slider(
|
| 163 |
label="Preferred weight",
|
| 164 |
info="Set to 0 to use the default set in Settings",
|
|
|
|
| 51 |
|
| 52 |
def save_lora_user_metadata(
|
| 53 |
self,
|
| 54 |
+
name: str,
|
| 55 |
+
desc: str,
|
| 56 |
+
sd_version: str,
|
| 57 |
+
activation_text: str,
|
| 58 |
+
preferred_weight: float,
|
| 59 |
+
negative_text: str,
|
| 60 |
+
notes: str,
|
| 61 |
):
|
| 62 |
user_metadata = self.get_user_metadata(name)
|
| 63 |
user_metadata["description"] = desc
|
|
|
|
| 68 |
user_metadata["notes"] = notes
|
| 69 |
|
| 70 |
self.write_user_metadata(name, user_metadata)
|
|
|
|
| 71 |
|
| 72 |
def get_metadata_table(self, name):
|
| 73 |
table = super().get_metadata_table(name)
|
|
|
|
| 156 |
self.create_default_editor_elems()
|
| 157 |
|
| 158 |
self.taginfo = gr.HighlightedText(label="Training dataset tags")
|
| 159 |
+
self.edit_activation_text = gr.Textbox(label="Positive Prompt", info="Will be added to the prompt after the LoRA syntax", lines=2)
|
| 160 |
+
self.edit_negative_text = gr.Textbox(label="Negative Prompt", info="Will be added to the negative prompt", lines=2)
|
| 161 |
self.slider_preferred_weight = gr.Slider(
|
| 162 |
label="Preferred weight",
|
| 163 |
info="Set to 0 to use the default set in Settings",
|
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
CHANGED
|
@@ -1,53 +1,53 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
|
|
|
| 3 |
onUiLoaded(async () => {
|
| 4 |
-
const elementIDs = {
|
| 5 |
-
img2imgTabs: "#mode_img2img .tab-nav",
|
| 6 |
-
inpaint: "#img2maskimg",
|
| 7 |
-
inpaintSketch: "#inpaint_sketch",
|
| 8 |
-
rangeGroup: "#img2img_column_size",
|
| 9 |
-
sketch: "#img2img_sketch"
|
| 10 |
-
};
|
| 11 |
-
|
| 12 |
-
const tabNameToElementId = {
|
| 13 |
-
"Inpaint sketch": elementIDs.inpaintSketch,
|
| 14 |
-
"Inpaint": elementIDs.inpaint,
|
| 15 |
-
"Sketch": elementIDs.sketch
|
| 16 |
-
};
|
| 17 |
-
|
| 18 |
/** Waits for an element to be present in the DOM */
|
| 19 |
-
const waitForElement = (id) =>
|
| 20 |
-
|
| 21 |
-
const
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
| 27 |
|
| 28 |
function getActiveTab(elements, all = false) {
|
|
|
|
|
|
|
| 29 |
const tabs = elements.img2imgTabs.querySelectorAll("button");
|
| 30 |
if (all) return tabs;
|
| 31 |
|
| 32 |
for (let tab of tabs) {
|
| 33 |
-
if (tab.classList.contains("selected"))
|
| 34 |
-
return tab;
|
| 35 |
}
|
| 36 |
}
|
| 37 |
|
| 38 |
// Get tab ID
|
| 39 |
function getTabId(elements) {
|
| 40 |
const activeTab = getActiveTab(elements);
|
|
|
|
| 41 |
return tabNameToElementId[activeTab.innerText];
|
| 42 |
}
|
| 43 |
|
| 44 |
// Wait until opts loaded
|
| 45 |
async function waitForOpts() {
|
| 46 |
for (; ;) {
|
| 47 |
-
if (window.opts && Object.keys(window.opts).length)
|
| 48 |
-
|
| 49 |
-
}
|
| 50 |
-
await new Promise(resolve => setTimeout(resolve, 100));
|
| 51 |
}
|
| 52 |
}
|
| 53 |
|
|
@@ -108,8 +108,7 @@
|
|
| 108 |
typeof userValue === "object" ||
|
| 109 |
userValue === "disable"
|
| 110 |
) {
|
| 111 |
-
result[key] =
|
| 112 |
-
userValue === undefined ? defaultValue : userValue;
|
| 113 |
} else if (isValidHotkey(userValue)) {
|
| 114 |
const normalizedUserValue = normalizeHotkey(userValue);
|
| 115 |
|
|
@@ -120,20 +119,20 @@
|
|
| 120 |
} else {
|
| 121 |
console.error(
|
| 122 |
`Hotkey: ${formatHotkeyForDisplay(
|
| 123 |
-
userValue
|
| 124 |
)} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay(
|
| 125 |
-
defaultValue
|
| 126 |
-
)}
|
| 127 |
);
|
| 128 |
result[key] = defaultValue;
|
| 129 |
}
|
| 130 |
} else {
|
| 131 |
console.error(
|
| 132 |
`Hotkey: ${formatHotkeyForDisplay(
|
| 133 |
-
userValue
|
| 134 |
)} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay(
|
| 135 |
-
defaultValue
|
| 136 |
-
)}
|
| 137 |
);
|
| 138 |
result[key] = defaultValue;
|
| 139 |
}
|
|
@@ -145,11 +144,10 @@
|
|
| 145 |
// Disables functions in the config object based on the provided list of function names
|
| 146 |
function disableFunctions(config, disabledFunctions) {
|
| 147 |
// Bind the hasOwnProperty method to the functionMap object to avoid errors
|
| 148 |
-
const hasOwnProperty =
|
| 149 |
-
Object.prototype.hasOwnProperty.bind(functionMap);
|
| 150 |
|
| 151 |
// Loop through the disabledFunctions array and disable the corresponding functions in the config object
|
| 152 |
-
disabledFunctions.forEach(funcName => {
|
| 153 |
if (hasOwnProperty(funcName)) {
|
| 154 |
const key = functionMap[funcName];
|
| 155 |
config[key] = "disable";
|
|
@@ -179,16 +177,14 @@
|
|
| 179 |
if (!img || !imageARPreview) return;
|
| 180 |
|
| 181 |
imageARPreview.style.transform = "";
|
| 182 |
-
if (parseFloat(mainTab.style.width) >
|
| 183 |
const transformString = mainTab.style.transform;
|
| 184 |
const scaleMatch = transformString.match(
|
| 185 |
-
/scale\(([-+]?[0-9]*\.?[0-9]+)\)
|
| 186 |
);
|
| 187 |
let zoom = 1; // default zoom
|
| 188 |
|
| 189 |
-
if (scaleMatch && scaleMatch[1])
|
| 190 |
-
zoom = Number(scaleMatch[1]);
|
| 191 |
-
}
|
| 192 |
|
| 193 |
imageARPreview.style.transformOrigin = "0 0";
|
| 194 |
imageARPreview.style.transform = `scale(${zoom})`;
|
|
@@ -200,7 +196,7 @@
|
|
| 200 |
|
| 201 |
setTimeout(() => {
|
| 202 |
img.style.display = "none";
|
| 203 |
-
},
|
| 204 |
}
|
| 205 |
|
| 206 |
const hotkeysConfigOpts = await waitForOpts();
|
|
@@ -229,39 +225,39 @@
|
|
| 229 |
"Moving canvas": "canvas_hotkey_move",
|
| 230 |
"Fullscreen": "canvas_hotkey_fullscreen",
|
| 231 |
"Reset Zoom": "canvas_hotkey_reset",
|
| 232 |
-
"Overlap": "canvas_hotkey_overlap"
|
| 233 |
};
|
| 234 |
|
| 235 |
// Loading the configuration from opts
|
| 236 |
const preHotkeysConfig = createHotkeyConfig(
|
| 237 |
defaultHotkeysConfig,
|
| 238 |
-
hotkeysConfigOpts
|
| 239 |
);
|
| 240 |
|
| 241 |
// Disable functions that are not needed by the user
|
| 242 |
const hotkeysConfig = disableFunctions(
|
| 243 |
preHotkeysConfig,
|
| 244 |
-
preHotkeysConfig.canvas_disabled_functions
|
| 245 |
);
|
| 246 |
|
| 247 |
let isMoving = false;
|
| 248 |
-
let mouseX, mouseY;
|
| 249 |
let activeElement;
|
|
|
|
| 250 |
|
| 251 |
const elements = Object.fromEntries(
|
| 252 |
-
Object.keys(elementIDs).map(id => [
|
| 253 |
id,
|
| 254 |
-
gradioApp().querySelector(elementIDs[id])
|
| 255 |
-
])
|
| 256 |
);
|
| 257 |
const elemData = {};
|
| 258 |
|
| 259 |
// Apply functionality to the range inputs. Restore redmask and correct for long images.
|
| 260 |
-
const rangeInputs = elements.rangeGroup
|
| 261 |
-
Array.from(elements.rangeGroup.querySelectorAll("input"))
|
| 262 |
-
[
|
| 263 |
gradioApp().querySelector("#img2img_width input[type='range']"),
|
| 264 |
-
gradioApp().querySelector("#img2img_height input[type='range']")
|
| 265 |
];
|
| 266 |
|
| 267 |
for (const input of rangeInputs) {
|
|
@@ -272,7 +268,7 @@
|
|
| 272 |
const targetElement = gradioApp().querySelector(elemId);
|
| 273 |
|
| 274 |
if (!targetElement) {
|
| 275 |
-
console.log(
|
| 276 |
return;
|
| 277 |
}
|
| 278 |
|
|
@@ -281,14 +277,13 @@
|
|
| 281 |
elemData[elemId] = {
|
| 282 |
zoom: 1,
|
| 283 |
panX: 0,
|
| 284 |
-
panY: 0
|
| 285 |
};
|
| 286 |
let fullScreenMode = false;
|
| 287 |
|
| 288 |
// Create tooltip
|
| 289 |
function createTooltip() {
|
| 290 |
-
const toolTipElement =
|
| 291 |
-
targetElement.querySelector(".image-container");
|
| 292 |
const tooltip = document.createElement("div");
|
| 293 |
tooltip.className = "canvas-tooltip";
|
| 294 |
|
|
@@ -306,39 +301,37 @@
|
|
| 306 |
{
|
| 307 |
configKey: "canvas_hotkey_zoom",
|
| 308 |
action: "Zoom canvas",
|
| 309 |
-
keySuffix: " + wheel"
|
| 310 |
},
|
| 311 |
{
|
| 312 |
configKey: "canvas_hotkey_adjust",
|
| 313 |
action: "Adjust brush size",
|
| 314 |
-
keySuffix: " + wheel"
|
| 315 |
},
|
| 316 |
{ configKey: "canvas_hotkey_reset", action: "Reset zoom" },
|
| 317 |
{
|
| 318 |
configKey: "canvas_hotkey_fullscreen",
|
| 319 |
-
action: "Fullscreen mode"
|
| 320 |
},
|
| 321 |
{ configKey: "canvas_hotkey_move", action: "Move canvas" },
|
| 322 |
-
{ configKey: "canvas_hotkey_overlap", action: "Overlap" }
|
| 323 |
];
|
| 324 |
|
| 325 |
// Create hotkeys array with disabled property based on the config values
|
| 326 |
-
const hotkeys = hotkeysInfo.map(info => {
|
| 327 |
const configValue = hotkeysConfig[info.configKey];
|
| 328 |
-
const key = info.keySuffix
|
| 329 |
-
`${configValue}${info.keySuffix}`
|
| 330 |
-
configValue.charAt(configValue.length - 1);
|
| 331 |
return {
|
| 332 |
key,
|
| 333 |
action: info.action,
|
| 334 |
-
disabled: configValue === "disable"
|
| 335 |
};
|
| 336 |
});
|
| 337 |
|
| 338 |
for (const hotkey of hotkeys) {
|
| 339 |
-
if (hotkey.disabled)
|
| 340 |
-
continue;
|
| 341 |
-
}
|
| 342 |
|
| 343 |
const p = document.createElement("p");
|
| 344 |
p.innerHTML = `<b>${hotkey.key}</b> - ${hotkey.action}`;
|
|
@@ -353,16 +346,14 @@
|
|
| 353 |
toolTipElement.appendChild(tooltip);
|
| 354 |
}
|
| 355 |
|
| 356 |
-
//Show tool tip if setting enable
|
| 357 |
-
if (hotkeysConfig.canvas_show_tooltip)
|
| 358 |
-
createTooltip();
|
| 359 |
-
}
|
| 360 |
|
| 361 |
// In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
|
| 362 |
function fixCanvas() {
|
| 363 |
-
const activeTab = getActiveTab(elements)
|
| 364 |
|
| 365 |
-
if (activeTab !== "img2img") {
|
| 366 |
const img = targetElement.querySelector(`${elemId} img`);
|
| 367 |
|
| 368 |
if (img && img.style.display !== "none") {
|
|
@@ -377,12 +368,10 @@
|
|
| 377 |
elemData[elemId] = {
|
| 378 |
zoomLevel: 1,
|
| 379 |
panX: 0,
|
| 380 |
-
panY: 0
|
| 381 |
};
|
| 382 |
|
| 383 |
-
if (isExtension)
|
| 384 |
-
targetElement.style.overflow = "hidden";
|
| 385 |
-
}
|
| 386 |
|
| 387 |
targetElement.isZoomed = false;
|
| 388 |
|
|
@@ -390,16 +379,16 @@
|
|
| 390 |
targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
|
| 391 |
|
| 392 |
const canvas = gradioApp().querySelector(
|
| 393 |
-
`${elemId} canvas[key="interface"]
|
| 394 |
);
|
| 395 |
|
| 396 |
toggleOverlap("off");
|
| 397 |
fullScreenMode = false;
|
| 398 |
|
| 399 |
-
const closeBtn = targetElement.querySelector(
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
|
| 404 |
if (canvas && isExtension) {
|
| 405 |
const parentElement = targetElement.closest('[id^="component-"]');
|
|
@@ -411,14 +400,13 @@
|
|
| 411 |
fitToElement();
|
| 412 |
return;
|
| 413 |
}
|
| 414 |
-
|
| 415 |
}
|
| 416 |
|
| 417 |
if (
|
| 418 |
canvas &&
|
| 419 |
!isExtension &&
|
| 420 |
-
parseFloat(canvas.style.width) >
|
| 421 |
-
parseFloat(targetElement.style.width) >
|
| 422 |
) {
|
| 423 |
fitToElement();
|
| 424 |
return;
|
|
@@ -435,11 +423,8 @@
|
|
| 435 |
targetElement.style.zIndex =
|
| 436 |
targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
|
| 437 |
|
| 438 |
-
if (forced === "off")
|
| 439 |
-
|
| 440 |
-
} else if (forced === "on") {
|
| 441 |
-
targetElement.style.zIndex = zIndex2;
|
| 442 |
-
}
|
| 443 |
}
|
| 444 |
|
| 445 |
// Adjust the brush size based on the deltaY value from a mouse wheel event
|
|
@@ -447,21 +432,18 @@
|
|
| 447 |
elemId,
|
| 448 |
deltaY,
|
| 449 |
withoutValue = false,
|
| 450 |
-
percentage = 5
|
| 451 |
) {
|
| 452 |
const input =
|
| 453 |
gradioApp().querySelector(
|
| 454 |
-
`${elemId} input[aria-label='Brush radius']
|
| 455 |
) ||
|
| 456 |
-
gradioApp().querySelector(
|
| 457 |
-
`${elemId} button[aria-label="Use brush"]`
|
| 458 |
-
);
|
| 459 |
|
| 460 |
if (input) {
|
| 461 |
input.click();
|
| 462 |
if (!withoutValue) {
|
| 463 |
-
const maxValue =
|
| 464 |
-
parseFloat(input.getAttribute("max")) || 100;
|
| 465 |
const changeAmount = maxValue * (percentage / 100);
|
| 466 |
const newValue =
|
| 467 |
parseFloat(input.value) +
|
|
@@ -474,7 +456,7 @@
|
|
| 474 |
|
| 475 |
// Reset zoom when uploading a new image
|
| 476 |
const fileInput = gradioApp().querySelector(
|
| 477 |
-
`${elemId} input[type="file"][accept="image/*"].svelte-116rqfv
|
| 478 |
);
|
| 479 |
fileInput.addEventListener("click", resetZoom);
|
| 480 |
|
|
@@ -482,18 +464,23 @@
|
|
| 482 |
function updateZoom(newZoomLevel, mouseX, mouseY) {
|
| 483 |
newZoomLevel = Math.max(0.1, Math.min(newZoomLevel, 15));
|
| 484 |
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 489 |
|
| 490 |
targetElement.style.transformOrigin = "0 0";
|
| 491 |
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
|
| 492 |
|
| 493 |
toggleOverlap("on");
|
| 494 |
-
if (isExtension)
|
| 495 |
-
targetElement.style.overflow = "visible";
|
| 496 |
-
}
|
| 497 |
|
| 498 |
return newZoomLevel;
|
| 499 |
}
|
|
@@ -502,27 +489,26 @@
|
|
| 502 |
function changeZoomLevel(operation, e) {
|
| 503 |
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) {
|
| 504 |
e.preventDefault();
|
|
|
|
|
|
|
| 505 |
|
| 506 |
let zoomPosX, zoomPosY;
|
| 507 |
let delta = 0.2;
|
| 508 |
-
if (elemData[elemId].zoomLevel > 7)
|
| 509 |
-
|
| 510 |
-
} else if (elemData[elemId].zoomLevel > 2) {
|
| 511 |
-
delta = 0.6;
|
| 512 |
-
}
|
| 513 |
|
| 514 |
zoomPosX = e.clientX;
|
| 515 |
zoomPosY = e.clientY;
|
| 516 |
|
| 517 |
fullScreenMode = false;
|
| 518 |
elemData[elemId].zoomLevel = updateZoom(
|
| 519 |
-
elemData[elemId].zoomLevel +
|
| 520 |
-
(operation === "+" ? delta : -delta),
|
| 521 |
zoomPosX - targetElement.getBoundingClientRect().left,
|
| 522 |
-
zoomPosY - targetElement.getBoundingClientRect().top
|
| 523 |
);
|
| 524 |
|
| 525 |
-
targetElement.isZoomed =
|
|
|
|
| 526 |
}
|
| 527 |
}
|
| 528 |
|
|
@@ -533,17 +519,14 @@
|
|
| 533 |
*/
|
| 534 |
|
| 535 |
function fitToElement() {
|
| 536 |
-
//Reset Zoom
|
| 537 |
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
|
| 538 |
|
| 539 |
let parentElement;
|
| 540 |
|
| 541 |
-
if (isExtension)
|
| 542 |
parentElement = targetElement.closest('[id^="component-"]');
|
| 543 |
-
|
| 544 |
-
parentElement = targetElement.parentElement;
|
| 545 |
-
}
|
| 546 |
-
|
| 547 |
|
| 548 |
// Get element and screen dimensions
|
| 549 |
const elementWidth = targetElement.offsetWidth;
|
|
@@ -569,8 +552,7 @@
|
|
| 569 |
const originYValue = parseFloat(originY);
|
| 570 |
|
| 571 |
const offsetX =
|
| 572 |
-
(screenWidth - elementWidth * scale) / 2 -
|
| 573 |
-
originXValue * (1 - scale);
|
| 574 |
const offsetY =
|
| 575 |
(screenHeight - elementHeight * scale) / 2.5 -
|
| 576 |
originYValue * (1 - scale);
|
|
@@ -596,18 +578,15 @@
|
|
| 596 |
// Fullscreen mode
|
| 597 |
function fitToScreen() {
|
| 598 |
const canvas = gradioApp().querySelector(
|
| 599 |
-
`${elemId} canvas[key="interface"]
|
| 600 |
);
|
| 601 |
|
| 602 |
if (!canvas) return;
|
| 603 |
|
| 604 |
-
if (canvas.offsetWidth >
|
| 605 |
-
targetElement.style.width =
|
| 606 |
-
}
|
| 607 |
|
| 608 |
-
if (isExtension)
|
| 609 |
-
targetElement.style.overflow = "visible";
|
| 610 |
-
}
|
| 611 |
|
| 612 |
if (fullScreenMode) {
|
| 613 |
resetZoom();
|
|
@@ -615,8 +594,8 @@
|
|
| 615 |
return;
|
| 616 |
}
|
| 617 |
|
| 618 |
-
//Reset Zoom
|
| 619 |
-
targetElement.style.transform =
|
| 620 |
|
| 621 |
// Get scrollbar width to right-align the image
|
| 622 |
const scrollbarWidth =
|
|
@@ -670,24 +649,31 @@
|
|
| 670 |
// Handle keydown events
|
| 671 |
function handleKeyDown(event) {
|
| 672 |
// Disable key locks to make pasting from the buffer work correctly
|
| 673 |
-
if (
|
|
|
|
|
|
|
|
|
|
|
|
|
| 674 |
return;
|
| 675 |
}
|
| 676 |
|
| 677 |
// before activating shortcut, ensure user is not actively typing in an input field
|
| 678 |
if (!hotkeysConfig.canvas_blur_prompt) {
|
| 679 |
-
if (
|
|
|
|
|
|
|
|
|
|
| 680 |
return;
|
| 681 |
-
}
|
| 682 |
}
|
| 683 |
|
| 684 |
-
|
| 685 |
const hotkeyActions = {
|
| 686 |
[hotkeysConfig.canvas_hotkey_reset]: resetZoom,
|
| 687 |
[hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
|
| 688 |
[hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen,
|
| 689 |
-
[hotkeysConfig.canvas_hotkey_shrink_brush]: () =>
|
| 690 |
-
|
|
|
|
|
|
|
| 691 |
};
|
| 692 |
|
| 693 |
const action = hotkeyActions[event.code];
|
|
@@ -699,15 +685,8 @@
|
|
| 699 |
if (
|
| 700 |
isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) ||
|
| 701 |
isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust)
|
| 702 |
-
)
|
| 703 |
event.preventDefault();
|
| 704 |
-
}
|
| 705 |
-
}
|
| 706 |
-
|
| 707 |
-
// Get Mouse position
|
| 708 |
-
function getMousePosition(e) {
|
| 709 |
-
mouseX = e.offsetX;
|
| 710 |
-
mouseY = e.offsetY;
|
| 711 |
}
|
| 712 |
|
| 713 |
// Simulation of the function to put a long image into the screen.
|
|
@@ -716,31 +695,40 @@
|
|
| 716 |
|
| 717 |
targetElement.isExpanded = false;
|
| 718 |
function autoExpand() {
|
| 719 |
-
const canvas = document.querySelector(
|
|
|
|
|
|
|
| 720 |
if (canvas) {
|
| 721 |
-
if (
|
| 722 |
-
targetElement
|
|
|
|
|
|
|
| 723 |
setTimeout(() => {
|
| 724 |
fitToScreen();
|
| 725 |
resetZoom();
|
| 726 |
-
targetElement.style.visibility = "visible";
|
| 727 |
targetElement.isExpanded = true;
|
| 728 |
-
},
|
| 729 |
}
|
| 730 |
}
|
| 731 |
}
|
| 732 |
|
| 733 |
-
|
| 734 |
-
|
| 735 |
-
//observers
|
| 736 |
// Creating an observer with a callback function to handle DOM changes
|
| 737 |
-
const observer = new MutationObserver((mutationsList
|
| 738 |
-
for (
|
| 739 |
// If the style attribute of the canvas has changed, by observation it happens only when the picture changes
|
| 740 |
-
if (
|
| 741 |
-
mutation.
|
|
|
|
|
|
|
|
|
|
| 742 |
targetElement.isExpanded = false;
|
| 743 |
-
setTimeout(resetZoom,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 744 |
}
|
| 745 |
}
|
| 746 |
});
|
|
@@ -749,7 +737,11 @@
|
|
| 749 |
if (hotkeysConfig.canvas_auto_expand) {
|
| 750 |
targetElement.addEventListener("mousemove", autoExpand);
|
| 751 |
// Set up an observer to track attribute changes
|
| 752 |
-
observer.observe(targetElement, {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 753 |
}
|
| 754 |
|
| 755 |
// Handle events only inside the targetElement
|
|
@@ -778,44 +770,53 @@
|
|
| 778 |
targetElement.addEventListener("mouseleave", handleMouseLeave);
|
| 779 |
|
| 780 |
// Reset zoom when click on another tab
|
| 781 |
-
elements.img2imgTabs
|
| 782 |
-
|
| 783 |
-
|
| 784 |
-
|
| 785 |
-
|
| 786 |
-
|
| 787 |
-
|
|
|
|
| 788 |
|
| 789 |
-
targetElement.addEventListener(
|
| 790 |
-
|
| 791 |
-
|
| 792 |
-
|
|
|
|
|
|
|
| 793 |
|
| 794 |
-
|
| 795 |
-
|
| 796 |
-
|
| 797 |
|
| 798 |
-
|
| 799 |
-
|
| 800 |
-
|
| 801 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 802 |
|
| 803 |
// Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
|
| 804 |
function handleMoveKeyDown(e) {
|
| 805 |
-
|
| 806 |
// Disable key locks to make pasting from the buffer work correctly
|
| 807 |
-
if (
|
|
|
|
|
|
|
|
|
|
|
|
|
| 808 |
return;
|
| 809 |
}
|
| 810 |
|
| 811 |
// before activating shortcut, ensure user is not actively typing in an input field
|
| 812 |
if (!hotkeysConfig.canvas_blur_prompt) {
|
| 813 |
-
if (e.target.nodeName ===
|
| 814 |
return;
|
| 815 |
-
}
|
| 816 |
}
|
| 817 |
|
| 818 |
-
|
| 819 |
if (e.code === hotkeysConfig.canvas_hotkey_move) {
|
| 820 |
if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) {
|
| 821 |
e.preventDefault();
|
|
@@ -826,21 +827,26 @@
|
|
| 826 |
}
|
| 827 |
|
| 828 |
function handleMoveKeyUp(e) {
|
| 829 |
-
if (e.code === hotkeysConfig.canvas_hotkey_move)
|
| 830 |
-
isMoving = false;
|
| 831 |
-
}
|
| 832 |
}
|
| 833 |
|
| 834 |
document.addEventListener("keydown", handleMoveKeyDown);
|
| 835 |
document.addEventListener("keyup", handleMoveKeyUp);
|
| 836 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 837 |
// Detect zoom level and update the pan speed.
|
| 838 |
function updatePanPosition(movementX, movementY) {
|
| 839 |
let panSpeed = 2;
|
| 840 |
|
| 841 |
-
if (elemData[elemId].zoomLevel > 8)
|
| 842 |
-
panSpeed = 3.5;
|
| 843 |
-
}
|
| 844 |
|
| 845 |
elemData[elemId].panX += movementX * panSpeed;
|
| 846 |
elemData[elemId].panY += movementY * panSpeed;
|
|
@@ -857,10 +863,7 @@
|
|
| 857 |
updatePanPosition(e.movementX, e.movementY);
|
| 858 |
targetElement.style.pointerEvents = "none";
|
| 859 |
|
| 860 |
-
if (isExtension)
|
| 861 |
-
targetElement.style.overflow = "visible";
|
| 862 |
-
}
|
| 863 |
-
|
| 864 |
} else {
|
| 865 |
targetElement.style.pointerEvents = "auto";
|
| 866 |
}
|
|
@@ -874,26 +877,36 @@
|
|
| 874 |
// Checks for extension
|
| 875 |
function checkForOutBox() {
|
| 876 |
const parentElement = targetElement.closest('[id^="component-"]');
|
| 877 |
-
if (
|
|
|
|
|
|
|
|
|
|
| 878 |
resetZoom();
|
| 879 |
targetElement.isExpanded = true;
|
| 880 |
}
|
| 881 |
|
| 882 |
-
if (
|
|
|
|
|
|
|
|
|
|
| 883 |
resetZoom();
|
| 884 |
}
|
| 885 |
|
| 886 |
-
if (
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 887 |
resetZoom();
|
| 888 |
}
|
| 889 |
}
|
| 890 |
|
| 891 |
-
if (isExtension)
|
| 892 |
targetElement.addEventListener("mousemove", checkForOutBox);
|
| 893 |
-
}
|
| 894 |
-
|
| 895 |
|
| 896 |
-
window.addEventListener(
|
| 897 |
resetZoom();
|
| 898 |
|
| 899 |
if (isExtension) {
|
|
@@ -903,8 +916,6 @@
|
|
| 903 |
});
|
| 904 |
|
| 905 |
gradioApp().addEventListener("mousemove", handleMoveByKey);
|
| 906 |
-
|
| 907 |
-
|
| 908 |
}
|
| 909 |
|
| 910 |
applyZoomAndPan(elementIDs.sketch, false);
|
|
@@ -924,17 +935,20 @@
|
|
| 924 |
}
|
| 925 |
|
| 926 |
if (!mainEl) return;
|
| 927 |
-
mainEl.addEventListener(
|
| 928 |
-
|
| 929 |
-
|
| 930 |
-
|
| 931 |
-
|
| 932 |
-
|
| 933 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 934 |
};
|
| 935 |
|
| 936 |
window.applyZoomAndPan = applyZoomAndPan; // Only 1 elements, argument elementID, for example applyZoomAndPan("#txt2img_controlnet_ControlNet_input_image")
|
| 937 |
window.applyZoomAndPanIntegration = applyZoomAndPanIntegration; // for any extension
|
| 938 |
});
|
| 939 |
-
|
| 940 |
})();
|
|
|
|
| 1 |
+
const elementIDs = {
|
| 2 |
+
img2imgTabs: "#mode_img2img .tab-nav",
|
| 3 |
+
inpaint: "#img2maskimg",
|
| 4 |
+
inpaintSketch: "#inpaint_sketch",
|
| 5 |
+
rangeGroup: "#img2img_column_size",
|
| 6 |
+
sketch: "#img2img_sketch",
|
| 7 |
+
};
|
| 8 |
+
|
| 9 |
+
const tabNameToElementId = {
|
| 10 |
+
"Inpaint sketch": elementIDs.inpaintSketch,
|
| 11 |
+
"Inpaint": elementIDs.inpaint,
|
| 12 |
+
"Sketch": elementIDs.sketch,
|
| 13 |
+
};
|
| 14 |
|
| 15 |
+
(function () {
|
| 16 |
onUiLoaded(async () => {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
/** Waits for an element to be present in the DOM */
|
| 18 |
+
const waitForElement = (id) =>
|
| 19 |
+
new Promise((resolve) => {
|
| 20 |
+
const checkForElement = () => {
|
| 21 |
+
const element = document.querySelector(id);
|
| 22 |
+
if (element) return resolve(element);
|
| 23 |
+
setTimeout(checkForElement, 100);
|
| 24 |
+
};
|
| 25 |
+
checkForElement();
|
| 26 |
+
});
|
| 27 |
|
| 28 |
function getActiveTab(elements, all = false) {
|
| 29 |
+
if (!elements.img2imgTabs) return null;
|
| 30 |
+
|
| 31 |
const tabs = elements.img2imgTabs.querySelectorAll("button");
|
| 32 |
if (all) return tabs;
|
| 33 |
|
| 34 |
for (let tab of tabs) {
|
| 35 |
+
if (tab.classList.contains("selected")) return tab;
|
|
|
|
| 36 |
}
|
| 37 |
}
|
| 38 |
|
| 39 |
// Get tab ID
|
| 40 |
function getTabId(elements) {
|
| 41 |
const activeTab = getActiveTab(elements);
|
| 42 |
+
if (!activeTab) return null;
|
| 43 |
return tabNameToElementId[activeTab.innerText];
|
| 44 |
}
|
| 45 |
|
| 46 |
// Wait until opts loaded
|
| 47 |
async function waitForOpts() {
|
| 48 |
for (; ;) {
|
| 49 |
+
if (window.opts && Object.keys(window.opts).length) return window.opts;
|
| 50 |
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
|
|
|
|
|
| 51 |
}
|
| 52 |
}
|
| 53 |
|
|
|
|
| 108 |
typeof userValue === "object" ||
|
| 109 |
userValue === "disable"
|
| 110 |
) {
|
| 111 |
+
result[key] = userValue === undefined ? defaultValue : userValue;
|
|
|
|
| 112 |
} else if (isValidHotkey(userValue)) {
|
| 113 |
const normalizedUserValue = normalizeHotkey(userValue);
|
| 114 |
|
|
|
|
| 119 |
} else {
|
| 120 |
console.error(
|
| 121 |
`Hotkey: ${formatHotkeyForDisplay(
|
| 122 |
+
userValue,
|
| 123 |
)} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay(
|
| 124 |
+
defaultValue,
|
| 125 |
+
)}`,
|
| 126 |
);
|
| 127 |
result[key] = defaultValue;
|
| 128 |
}
|
| 129 |
} else {
|
| 130 |
console.error(
|
| 131 |
`Hotkey: ${formatHotkeyForDisplay(
|
| 132 |
+
userValue,
|
| 133 |
)} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay(
|
| 134 |
+
defaultValue,
|
| 135 |
+
)}`,
|
| 136 |
);
|
| 137 |
result[key] = defaultValue;
|
| 138 |
}
|
|
|
|
| 144 |
// Disables functions in the config object based on the provided list of function names
|
| 145 |
function disableFunctions(config, disabledFunctions) {
|
| 146 |
// Bind the hasOwnProperty method to the functionMap object to avoid errors
|
| 147 |
+
const hasOwnProperty = Object.prototype.hasOwnProperty.bind(functionMap);
|
|
|
|
| 148 |
|
| 149 |
// Loop through the disabledFunctions array and disable the corresponding functions in the config object
|
| 150 |
+
disabledFunctions.forEach((funcName) => {
|
| 151 |
if (hasOwnProperty(funcName)) {
|
| 152 |
const key = functionMap[funcName];
|
| 153 |
config[key] = "disable";
|
|
|
|
| 177 |
if (!img || !imageARPreview) return;
|
| 178 |
|
| 179 |
imageARPreview.style.transform = "";
|
| 180 |
+
if (parseFloat(mainTab.style.width) > 800) {
|
| 181 |
const transformString = mainTab.style.transform;
|
| 182 |
const scaleMatch = transformString.match(
|
| 183 |
+
/scale\(([-+]?[0-9]*\.?[0-9]+)\)/,
|
| 184 |
);
|
| 185 |
let zoom = 1; // default zoom
|
| 186 |
|
| 187 |
+
if (scaleMatch && scaleMatch[1]) zoom = Number(scaleMatch[1]);
|
|
|
|
|
|
|
| 188 |
|
| 189 |
imageARPreview.style.transformOrigin = "0 0";
|
| 190 |
imageARPreview.style.transform = `scale(${zoom})`;
|
|
|
|
| 196 |
|
| 197 |
setTimeout(() => {
|
| 198 |
img.style.display = "none";
|
| 199 |
+
}, 500);
|
| 200 |
}
|
| 201 |
|
| 202 |
const hotkeysConfigOpts = await waitForOpts();
|
|
|
|
| 225 |
"Moving canvas": "canvas_hotkey_move",
|
| 226 |
"Fullscreen": "canvas_hotkey_fullscreen",
|
| 227 |
"Reset Zoom": "canvas_hotkey_reset",
|
| 228 |
+
"Overlap": "canvas_hotkey_overlap",
|
| 229 |
};
|
| 230 |
|
| 231 |
// Loading the configuration from opts
|
| 232 |
const preHotkeysConfig = createHotkeyConfig(
|
| 233 |
defaultHotkeysConfig,
|
| 234 |
+
hotkeysConfigOpts,
|
| 235 |
);
|
| 236 |
|
| 237 |
// Disable functions that are not needed by the user
|
| 238 |
const hotkeysConfig = disableFunctions(
|
| 239 |
preHotkeysConfig,
|
| 240 |
+
preHotkeysConfig.canvas_disabled_functions,
|
| 241 |
);
|
| 242 |
|
| 243 |
let isMoving = false;
|
|
|
|
| 244 |
let activeElement;
|
| 245 |
+
let interactedWithAltKey = false;
|
| 246 |
|
| 247 |
const elements = Object.fromEntries(
|
| 248 |
+
Object.keys(elementIDs).map((id) => [
|
| 249 |
id,
|
| 250 |
+
gradioApp().querySelector(elementIDs[id]),
|
| 251 |
+
]),
|
| 252 |
);
|
| 253 |
const elemData = {};
|
| 254 |
|
| 255 |
// Apply functionality to the range inputs. Restore redmask and correct for long images.
|
| 256 |
+
const rangeInputs = elements.rangeGroup
|
| 257 |
+
? Array.from(elements.rangeGroup.querySelectorAll("input"))
|
| 258 |
+
: [
|
| 259 |
gradioApp().querySelector("#img2img_width input[type='range']"),
|
| 260 |
+
gradioApp().querySelector("#img2img_height input[type='range']"),
|
| 261 |
];
|
| 262 |
|
| 263 |
for (const input of rangeInputs) {
|
|
|
|
| 268 |
const targetElement = gradioApp().querySelector(elemId);
|
| 269 |
|
| 270 |
if (!targetElement) {
|
| 271 |
+
console.log(`Element ${elemId} not found...`);
|
| 272 |
return;
|
| 273 |
}
|
| 274 |
|
|
|
|
| 277 |
elemData[elemId] = {
|
| 278 |
zoom: 1,
|
| 279 |
panX: 0,
|
| 280 |
+
panY: 0,
|
| 281 |
};
|
| 282 |
let fullScreenMode = false;
|
| 283 |
|
| 284 |
// Create tooltip
|
| 285 |
function createTooltip() {
|
| 286 |
+
const toolTipElement = targetElement.querySelector(".image-container");
|
|
|
|
| 287 |
const tooltip = document.createElement("div");
|
| 288 |
tooltip.className = "canvas-tooltip";
|
| 289 |
|
|
|
|
| 301 |
{
|
| 302 |
configKey: "canvas_hotkey_zoom",
|
| 303 |
action: "Zoom canvas",
|
| 304 |
+
keySuffix: " + wheel",
|
| 305 |
},
|
| 306 |
{
|
| 307 |
configKey: "canvas_hotkey_adjust",
|
| 308 |
action: "Adjust brush size",
|
| 309 |
+
keySuffix: " + wheel",
|
| 310 |
},
|
| 311 |
{ configKey: "canvas_hotkey_reset", action: "Reset zoom" },
|
| 312 |
{
|
| 313 |
configKey: "canvas_hotkey_fullscreen",
|
| 314 |
+
action: "Fullscreen mode",
|
| 315 |
},
|
| 316 |
{ configKey: "canvas_hotkey_move", action: "Move canvas" },
|
| 317 |
+
{ configKey: "canvas_hotkey_overlap", action: "Overlap" },
|
| 318 |
];
|
| 319 |
|
| 320 |
// Create hotkeys array with disabled property based on the config values
|
| 321 |
+
const hotkeys = hotkeysInfo.map((info) => {
|
| 322 |
const configValue = hotkeysConfig[info.configKey];
|
| 323 |
+
const key = info.keySuffix
|
| 324 |
+
? `${configValue}${info.keySuffix}`
|
| 325 |
+
: configValue.charAt(configValue.length - 1);
|
| 326 |
return {
|
| 327 |
key,
|
| 328 |
action: info.action,
|
| 329 |
+
disabled: configValue === "disable",
|
| 330 |
};
|
| 331 |
});
|
| 332 |
|
| 333 |
for (const hotkey of hotkeys) {
|
| 334 |
+
if (hotkey.disabled) continue;
|
|
|
|
|
|
|
| 335 |
|
| 336 |
const p = document.createElement("p");
|
| 337 |
p.innerHTML = `<b>${hotkey.key}</b> - ${hotkey.action}`;
|
|
|
|
| 346 |
toolTipElement.appendChild(tooltip);
|
| 347 |
}
|
| 348 |
|
| 349 |
+
// Show tool tip if setting enable
|
| 350 |
+
if (hotkeysConfig.canvas_show_tooltip) createTooltip();
|
|
|
|
|
|
|
| 351 |
|
| 352 |
// In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
|
| 353 |
function fixCanvas() {
|
| 354 |
+
const activeTab = getActiveTab(elements)?.textContent.trim();
|
| 355 |
|
| 356 |
+
if (activeTab && activeTab !== "img2img") {
|
| 357 |
const img = targetElement.querySelector(`${elemId} img`);
|
| 358 |
|
| 359 |
if (img && img.style.display !== "none") {
|
|
|
|
| 368 |
elemData[elemId] = {
|
| 369 |
zoomLevel: 1,
|
| 370 |
panX: 0,
|
| 371 |
+
panY: 0,
|
| 372 |
};
|
| 373 |
|
| 374 |
+
if (isExtension) targetElement.style.overflow = "hidden";
|
|
|
|
|
|
|
| 375 |
|
| 376 |
targetElement.isZoomed = false;
|
| 377 |
|
|
|
|
| 379 |
targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
|
| 380 |
|
| 381 |
const canvas = gradioApp().querySelector(
|
| 382 |
+
`${elemId} canvas[key="interface"]`,
|
| 383 |
);
|
| 384 |
|
| 385 |
toggleOverlap("off");
|
| 386 |
fullScreenMode = false;
|
| 387 |
|
| 388 |
+
const closeBtn = targetElement.querySelector(
|
| 389 |
+
"button[aria-label='Remove Image']",
|
| 390 |
+
);
|
| 391 |
+
if (closeBtn) closeBtn.addEventListener("click", resetZoom);
|
| 392 |
|
| 393 |
if (canvas && isExtension) {
|
| 394 |
const parentElement = targetElement.closest('[id^="component-"]');
|
|
|
|
| 400 |
fitToElement();
|
| 401 |
return;
|
| 402 |
}
|
|
|
|
| 403 |
}
|
| 404 |
|
| 405 |
if (
|
| 406 |
canvas &&
|
| 407 |
!isExtension &&
|
| 408 |
+
parseFloat(canvas.style.width) > 800 &&
|
| 409 |
+
parseFloat(targetElement.style.width) > 800
|
| 410 |
) {
|
| 411 |
fitToElement();
|
| 412 |
return;
|
|
|
|
| 423 |
targetElement.style.zIndex =
|
| 424 |
targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
|
| 425 |
|
| 426 |
+
if (forced === "off") targetElement.style.zIndex = zIndex1;
|
| 427 |
+
else if (forced === "on") targetElement.style.zIndex = zIndex2;
|
|
|
|
|
|
|
|
|
|
| 428 |
}
|
| 429 |
|
| 430 |
// Adjust the brush size based on the deltaY value from a mouse wheel event
|
|
|
|
| 432 |
elemId,
|
| 433 |
deltaY,
|
| 434 |
withoutValue = false,
|
| 435 |
+
percentage = 5,
|
| 436 |
) {
|
| 437 |
const input =
|
| 438 |
gradioApp().querySelector(
|
| 439 |
+
`${elemId} input[aria-label='Brush radius']`,
|
| 440 |
) ||
|
| 441 |
+
gradioApp().querySelector(`${elemId} button[aria-label="Use brush"]`);
|
|
|
|
|
|
|
| 442 |
|
| 443 |
if (input) {
|
| 444 |
input.click();
|
| 445 |
if (!withoutValue) {
|
| 446 |
+
const maxValue = parseFloat(input.getAttribute("max")) || 100;
|
|
|
|
| 447 |
const changeAmount = maxValue * (percentage / 100);
|
| 448 |
const newValue =
|
| 449 |
parseFloat(input.value) +
|
|
|
|
| 456 |
|
| 457 |
// Reset zoom when uploading a new image
|
| 458 |
const fileInput = gradioApp().querySelector(
|
| 459 |
+
`${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`,
|
| 460 |
);
|
| 461 |
fileInput.addEventListener("click", resetZoom);
|
| 462 |
|
|
|
|
| 464 |
function updateZoom(newZoomLevel, mouseX, mouseY) {
|
| 465 |
newZoomLevel = Math.max(0.1, Math.min(newZoomLevel, 15));
|
| 466 |
|
| 467 |
+
// Check if we're close to the original zoom level (1.0)
|
| 468 |
+
if (Math.abs(newZoomLevel - 1.0) < 0.01) {
|
| 469 |
+
newZoomLevel = 1;
|
| 470 |
+
elemData[elemId].panX = 0;
|
| 471 |
+
elemData[elemId].panY = 0;
|
| 472 |
+
} else {
|
| 473 |
+
elemData[elemId].panX +=
|
| 474 |
+
mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel;
|
| 475 |
+
elemData[elemId].panY +=
|
| 476 |
+
mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel;
|
| 477 |
+
}
|
| 478 |
|
| 479 |
targetElement.style.transformOrigin = "0 0";
|
| 480 |
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
|
| 481 |
|
| 482 |
toggleOverlap("on");
|
| 483 |
+
if (isExtension) targetElement.style.overflow = "visible";
|
|
|
|
|
|
|
| 484 |
|
| 485 |
return newZoomLevel;
|
| 486 |
}
|
|
|
|
| 489 |
function changeZoomLevel(operation, e) {
|
| 490 |
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) {
|
| 491 |
e.preventDefault();
|
| 492 |
+
if (hotkeysConfig.canvas_hotkey_zoom === "Alt")
|
| 493 |
+
interactedWithAltKey = true;
|
| 494 |
|
| 495 |
let zoomPosX, zoomPosY;
|
| 496 |
let delta = 0.2;
|
| 497 |
+
if (elemData[elemId].zoomLevel > 7) delta = 0.9;
|
| 498 |
+
else if (elemData[elemId].zoomLevel > 2) delta = 0.6;
|
|
|
|
|
|
|
|
|
|
| 499 |
|
| 500 |
zoomPosX = e.clientX;
|
| 501 |
zoomPosY = e.clientY;
|
| 502 |
|
| 503 |
fullScreenMode = false;
|
| 504 |
elemData[elemId].zoomLevel = updateZoom(
|
| 505 |
+
elemData[elemId].zoomLevel + (operation === "+" ? delta : -delta),
|
|
|
|
| 506 |
zoomPosX - targetElement.getBoundingClientRect().left,
|
| 507 |
+
zoomPosY - targetElement.getBoundingClientRect().top,
|
| 508 |
);
|
| 509 |
|
| 510 |
+
targetElement.isZoomed =
|
| 511 |
+
Math.abs(elemData[elemId].zoomLevel - 1.0) > 0.01;
|
| 512 |
}
|
| 513 |
}
|
| 514 |
|
|
|
|
| 519 |
*/
|
| 520 |
|
| 521 |
function fitToElement() {
|
| 522 |
+
// Reset Zoom
|
| 523 |
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
|
| 524 |
|
| 525 |
let parentElement;
|
| 526 |
|
| 527 |
+
if (isExtension)
|
| 528 |
parentElement = targetElement.closest('[id^="component-"]');
|
| 529 |
+
else parentElement = targetElement.parentElement;
|
|
|
|
|
|
|
|
|
|
| 530 |
|
| 531 |
// Get element and screen dimensions
|
| 532 |
const elementWidth = targetElement.offsetWidth;
|
|
|
|
| 552 |
const originYValue = parseFloat(originY);
|
| 553 |
|
| 554 |
const offsetX =
|
| 555 |
+
(screenWidth - elementWidth * scale) / 2 - originXValue * (1 - scale);
|
|
|
|
| 556 |
const offsetY =
|
| 557 |
(screenHeight - elementHeight * scale) / 2.5 -
|
| 558 |
originYValue * (1 - scale);
|
|
|
|
| 578 |
// Fullscreen mode
|
| 579 |
function fitToScreen() {
|
| 580 |
const canvas = gradioApp().querySelector(
|
| 581 |
+
`${elemId} canvas[key="interface"]`,
|
| 582 |
);
|
| 583 |
|
| 584 |
if (!canvas) return;
|
| 585 |
|
| 586 |
+
if (canvas.offsetWidth > 800 || isExtension)
|
| 587 |
+
targetElement.style.width = canvas.offsetWidth + 16 + "px";
|
|
|
|
| 588 |
|
| 589 |
+
if (isExtension) targetElement.style.overflow = "visible";
|
|
|
|
|
|
|
| 590 |
|
| 591 |
if (fullScreenMode) {
|
| 592 |
resetZoom();
|
|
|
|
| 594 |
return;
|
| 595 |
}
|
| 596 |
|
| 597 |
+
// Reset Zoom
|
| 598 |
+
targetElement.style.transform = 'translate(0px, 0px) scale(1.0)';
|
| 599 |
|
| 600 |
// Get scrollbar width to right-align the image
|
| 601 |
const scrollbarWidth =
|
|
|
|
| 649 |
// Handle keydown events
|
| 650 |
function handleKeyDown(event) {
|
| 651 |
// Disable key locks to make pasting from the buffer work correctly
|
| 652 |
+
if (
|
| 653 |
+
(event.ctrlKey && event.code === "KeyV") ||
|
| 654 |
+
(event.ctrlKey && event.code === "KeyC") ||
|
| 655 |
+
event.code === "F5"
|
| 656 |
+
) {
|
| 657 |
return;
|
| 658 |
}
|
| 659 |
|
| 660 |
// before activating shortcut, ensure user is not actively typing in an input field
|
| 661 |
if (!hotkeysConfig.canvas_blur_prompt) {
|
| 662 |
+
if (
|
| 663 |
+
event.target.nodeName === "TEXTAREA" ||
|
| 664 |
+
event.target.nodeName === "INPUT"
|
| 665 |
+
)
|
| 666 |
return;
|
|
|
|
| 667 |
}
|
| 668 |
|
|
|
|
| 669 |
const hotkeyActions = {
|
| 670 |
[hotkeysConfig.canvas_hotkey_reset]: resetZoom,
|
| 671 |
[hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
|
| 672 |
[hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen,
|
| 673 |
+
[hotkeysConfig.canvas_hotkey_shrink_brush]: () =>
|
| 674 |
+
adjustBrushSize(elemId, 10),
|
| 675 |
+
[hotkeysConfig.canvas_hotkey_grow_brush]: () =>
|
| 676 |
+
adjustBrushSize(elemId, -10),
|
| 677 |
};
|
| 678 |
|
| 679 |
const action = hotkeyActions[event.code];
|
|
|
|
| 685 |
if (
|
| 686 |
isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) ||
|
| 687 |
isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust)
|
| 688 |
+
)
|
| 689 |
event.preventDefault();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 690 |
}
|
| 691 |
|
| 692 |
// Simulation of the function to put a long image into the screen.
|
|
|
|
| 695 |
|
| 696 |
targetElement.isExpanded = false;
|
| 697 |
function autoExpand() {
|
| 698 |
+
const canvas = document.querySelector(
|
| 699 |
+
`${elemId} canvas[key="interface"]`,
|
| 700 |
+
);
|
| 701 |
if (canvas) {
|
| 702 |
+
if (
|
| 703 |
+
hasHorizontalScrollbar(targetElement) &&
|
| 704 |
+
targetElement.isExpanded === false
|
| 705 |
+
) {
|
| 706 |
setTimeout(() => {
|
| 707 |
fitToScreen();
|
| 708 |
resetZoom();
|
|
|
|
| 709 |
targetElement.isExpanded = true;
|
| 710 |
+
}, 25);
|
| 711 |
}
|
| 712 |
}
|
| 713 |
}
|
| 714 |
|
| 715 |
+
// Observers
|
|
|
|
|
|
|
| 716 |
// Creating an observer with a callback function to handle DOM changes
|
| 717 |
+
const observer = new MutationObserver((mutationsList) => {
|
| 718 |
+
for (const mutation of mutationsList) {
|
| 719 |
// If the style attribute of the canvas has changed, by observation it happens only when the picture changes
|
| 720 |
+
if (
|
| 721 |
+
mutation.type === "attributes" &&
|
| 722 |
+
mutation.attributeName === "style" &&
|
| 723 |
+
mutation.target.tagName.toLowerCase() === "canvas"
|
| 724 |
+
) {
|
| 725 |
targetElement.isExpanded = false;
|
| 726 |
+
setTimeout(resetZoom, 25);
|
| 727 |
+
setTimeout(autoExpand, 25);
|
| 728 |
+
setTimeout(() => {
|
| 729 |
+
const btn = targetElement.querySelector("button[aria-label='Undo']");
|
| 730 |
+
btn.click();
|
| 731 |
+
}, 25);
|
| 732 |
}
|
| 733 |
}
|
| 734 |
});
|
|
|
|
| 737 |
if (hotkeysConfig.canvas_auto_expand) {
|
| 738 |
targetElement.addEventListener("mousemove", autoExpand);
|
| 739 |
// Set up an observer to track attribute changes
|
| 740 |
+
observer.observe(targetElement, {
|
| 741 |
+
attributes: true,
|
| 742 |
+
childList: true,
|
| 743 |
+
subtree: true,
|
| 744 |
+
});
|
| 745 |
}
|
| 746 |
|
| 747 |
// Handle events only inside the targetElement
|
|
|
|
| 770 |
targetElement.addEventListener("mouseleave", handleMouseLeave);
|
| 771 |
|
| 772 |
// Reset zoom when click on another tab
|
| 773 |
+
if (elements.img2imgTabs) {
|
| 774 |
+
elements.img2imgTabs.addEventListener("click", resetZoom);
|
| 775 |
+
elements.img2imgTabs.addEventListener("click", () => {
|
| 776 |
+
// targetElement.style.width = "";
|
| 777 |
+
if (parseInt(targetElement.style.width) > 800)
|
| 778 |
+
setTimeout(fitToElement, 0);
|
| 779 |
+
});
|
| 780 |
+
}
|
| 781 |
|
| 782 |
+
targetElement.addEventListener(
|
| 783 |
+
"wheel",
|
| 784 |
+
(e) => {
|
| 785 |
+
// change zoom level
|
| 786 |
+
const operation = (e.deltaY || -e.wheelDelta) > 0 ? "-" : "+";
|
| 787 |
+
changeZoomLevel(operation, e);
|
| 788 |
|
| 789 |
+
// Handle brush size adjustment with ctrl key pressed
|
| 790 |
+
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) {
|
| 791 |
+
e.preventDefault();
|
| 792 |
|
| 793 |
+
if (hotkeysConfig.canvas_hotkey_adjust === "Alt")
|
| 794 |
+
interactedWithAltKey = true;
|
| 795 |
+
|
| 796 |
+
// Increase or decrease brush size based on scroll direction
|
| 797 |
+
adjustBrushSize(elemId, e.deltaY);
|
| 798 |
+
}
|
| 799 |
+
},
|
| 800 |
+
{ passive: false },
|
| 801 |
+
);
|
| 802 |
|
| 803 |
// Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
|
| 804 |
function handleMoveKeyDown(e) {
|
|
|
|
| 805 |
// Disable key locks to make pasting from the buffer work correctly
|
| 806 |
+
if (
|
| 807 |
+
(e.ctrlKey && e.code === "KeyV") ||
|
| 808 |
+
(e.ctrlKey && event.code === "KeyC") ||
|
| 809 |
+
e.code === "F5"
|
| 810 |
+
) {
|
| 811 |
return;
|
| 812 |
}
|
| 813 |
|
| 814 |
// before activating shortcut, ensure user is not actively typing in an input field
|
| 815 |
if (!hotkeysConfig.canvas_blur_prompt) {
|
| 816 |
+
if (e.target.nodeName === "TEXTAREA" || e.target.nodeName === "INPUT")
|
| 817 |
return;
|
|
|
|
| 818 |
}
|
| 819 |
|
|
|
|
| 820 |
if (e.code === hotkeysConfig.canvas_hotkey_move) {
|
| 821 |
if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) {
|
| 822 |
e.preventDefault();
|
|
|
|
| 827 |
}
|
| 828 |
|
| 829 |
function handleMoveKeyUp(e) {
|
| 830 |
+
if (e.code === hotkeysConfig.canvas_hotkey_move) isMoving = false;
|
|
|
|
|
|
|
| 831 |
}
|
| 832 |
|
| 833 |
document.addEventListener("keydown", handleMoveKeyDown);
|
| 834 |
document.addEventListener("keyup", handleMoveKeyUp);
|
| 835 |
|
| 836 |
+
/** Prevent firefox from opening main menu when alt is used as a hotkey for zoom or brush size */
|
| 837 |
+
function handleAltKeyUp(e) {
|
| 838 |
+
if (e.key !== "Alt" || !interactedWithAltKey) return;
|
| 839 |
+
e.preventDefault();
|
| 840 |
+
interactedWithAltKey = false;
|
| 841 |
+
}
|
| 842 |
+
|
| 843 |
+
document.addEventListener("keyup", handleAltKeyUp);
|
| 844 |
+
|
| 845 |
// Detect zoom level and update the pan speed.
|
| 846 |
function updatePanPosition(movementX, movementY) {
|
| 847 |
let panSpeed = 2;
|
| 848 |
|
| 849 |
+
if (elemData[elemId].zoomLevel > 8) panSpeed = 3.5;
|
|
|
|
|
|
|
| 850 |
|
| 851 |
elemData[elemId].panX += movementX * panSpeed;
|
| 852 |
elemData[elemId].panY += movementY * panSpeed;
|
|
|
|
| 863 |
updatePanPosition(e.movementX, e.movementY);
|
| 864 |
targetElement.style.pointerEvents = "none";
|
| 865 |
|
| 866 |
+
if (isExtension) targetElement.style.overflow = "visible";
|
|
|
|
|
|
|
|
|
|
| 867 |
} else {
|
| 868 |
targetElement.style.pointerEvents = "auto";
|
| 869 |
}
|
|
|
|
| 877 |
// Checks for extension
|
| 878 |
function checkForOutBox() {
|
| 879 |
const parentElement = targetElement.closest('[id^="component-"]');
|
| 880 |
+
if (
|
| 881 |
+
parentElement.offsetWidth < targetElement.offsetWidth &&
|
| 882 |
+
!targetElement.isExpanded
|
| 883 |
+
) {
|
| 884 |
resetZoom();
|
| 885 |
targetElement.isExpanded = true;
|
| 886 |
}
|
| 887 |
|
| 888 |
+
if (
|
| 889 |
+
parentElement.offsetWidth < targetElement.offsetWidth &&
|
| 890 |
+
elemData[elemId].zoomLevel == 1
|
| 891 |
+
) {
|
| 892 |
resetZoom();
|
| 893 |
}
|
| 894 |
|
| 895 |
+
if (
|
| 896 |
+
parentElement.offsetWidth < targetElement.offsetWidth &&
|
| 897 |
+
targetElement.offsetWidth * elemData[elemId].zoomLevel >
|
| 898 |
+
parentElement.offsetWidth &&
|
| 899 |
+
elemData[elemId].zoomLevel < 1 &&
|
| 900 |
+
!targetElement.isZoomed
|
| 901 |
+
) {
|
| 902 |
resetZoom();
|
| 903 |
}
|
| 904 |
}
|
| 905 |
|
| 906 |
+
if (isExtension)
|
| 907 |
targetElement.addEventListener("mousemove", checkForOutBox);
|
|
|
|
|
|
|
| 908 |
|
| 909 |
+
window.addEventListener("resize", (e) => {
|
| 910 |
resetZoom();
|
| 911 |
|
| 912 |
if (isExtension) {
|
|
|
|
| 916 |
});
|
| 917 |
|
| 918 |
gradioApp().addEventListener("mousemove", handleMoveByKey);
|
|
|
|
|
|
|
| 919 |
}
|
| 920 |
|
| 921 |
applyZoomAndPan(elementIDs.sketch, false);
|
|
|
|
| 935 |
}
|
| 936 |
|
| 937 |
if (!mainEl) return;
|
| 938 |
+
mainEl.addEventListener(
|
| 939 |
+
"click",
|
| 940 |
+
async () => {
|
| 941 |
+
for (const elementID of elementIDs) {
|
| 942 |
+
const el = await waitForElement(elementID);
|
| 943 |
+
if (!el) break;
|
| 944 |
+
applyZoomAndPan(elementID);
|
| 945 |
+
}
|
| 946 |
+
},
|
| 947 |
+
{ once: true },
|
| 948 |
+
);
|
| 949 |
};
|
| 950 |
|
| 951 |
window.applyZoomAndPan = applyZoomAndPan; // Only 1 elements, argument elementID, for example applyZoomAndPan("#txt2img_controlnet_ControlNet_input_image")
|
| 952 |
window.applyZoomAndPanIntegration = applyZoomAndPanIntegration; // for any extension
|
| 953 |
});
|
|
|
|
| 954 |
})();
|
extensions-builtin/forge_legacy_preprocessors/annotator/densepose/densepose.py
CHANGED
|
@@ -2,7 +2,7 @@ from typing import Tuple
|
|
| 2 |
import math
|
| 3 |
import numpy as np
|
| 4 |
from enum import IntEnum
|
| 5 |
-
from typing import List,
|
| 6 |
import torch
|
| 7 |
from torch.nn import functional as F
|
| 8 |
import logging
|
|
|
|
| 2 |
import math
|
| 3 |
import numpy as np
|
| 4 |
from enum import IntEnum
|
| 5 |
+
from typing import List, Union
|
| 6 |
import torch
|
| 7 |
from torch.nn import functional as F
|
| 8 |
import logging
|
extensions-builtin/forge_legacy_preprocessors/annotator/hed/__init__.py
CHANGED
|
@@ -11,7 +11,6 @@ import torch
|
|
| 11 |
import numpy as np
|
| 12 |
|
| 13 |
from einops import rearrange
|
| 14 |
-
import os
|
| 15 |
from modules import devices
|
| 16 |
from annotator.annotator_path import models_path
|
| 17 |
from annotator.util import safe_step, nms
|
|
|
|
| 11 |
import numpy as np
|
| 12 |
|
| 13 |
from einops import rearrange
|
|
|
|
| 14 |
from modules import devices
|
| 15 |
from annotator.annotator_path import models_path
|
| 16 |
from annotator.util import safe_step, nms
|
extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/multi_depth_model_woauxi.py
CHANGED
|
@@ -2,7 +2,6 @@ from . import network_auxi as network
|
|
| 2 |
from .net_tools import get_func
|
| 3 |
import torch
|
| 4 |
import torch.nn as nn
|
| 5 |
-
from modules import devices
|
| 6 |
|
| 7 |
|
| 8 |
class RelDepthModel(nn.Module):
|
|
|
|
| 2 |
from .net_tools import get_func
|
| 3 |
import torch
|
| 4 |
import torch.nn as nn
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class RelDepthModel(nn.Module):
|
extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/util/visualizer.py
CHANGED
|
@@ -1,11 +1,9 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
import os
|
| 3 |
import sys
|
| 4 |
import ntpath
|
| 5 |
import time
|
| 6 |
from . import util, html
|
| 7 |
from subprocess import Popen, PIPE
|
| 8 |
-
import torch
|
| 9 |
|
| 10 |
|
| 11 |
if sys.version_info[0] == 2:
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import sys
|
| 3 |
import ntpath
|
| 4 |
import time
|
| 5 |
from . import util, html
|
| 6 |
from subprocess import Popen, PIPE
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
if sys.version_info[0] == 2:
|
extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/blocks.py
CHANGED
|
@@ -5,7 +5,6 @@ from .vit import (
|
|
| 5 |
_make_pretrained_vitb_rn50_384,
|
| 6 |
_make_pretrained_vitl16_384,
|
| 7 |
_make_pretrained_vitb16_384,
|
| 8 |
-
forward_vit,
|
| 9 |
)
|
| 10 |
|
| 11 |
|
|
|
|
| 5 |
_make_pretrained_vitb_rn50_384,
|
| 6 |
_make_pretrained_vitl16_384,
|
| 7 |
_make_pretrained_vitb16_384,
|
|
|
|
| 8 |
)
|
| 9 |
|
| 10 |
|
extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/dpt_depth.py
CHANGED
|
@@ -1,10 +1,8 @@
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
-
import torch.nn.functional as F
|
| 4 |
|
| 5 |
from .base_model import BaseModel
|
| 6 |
from .blocks import (
|
| 7 |
-
FeatureFusionBlock,
|
| 8 |
FeatureFusionBlock_custom,
|
| 9 |
Interpolate,
|
| 10 |
_make_encoder,
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
|
|
|
| 3 |
|
| 4 |
from .base_model import BaseModel
|
| 5 |
from .blocks import (
|
|
|
|
| 6 |
FeatureFusionBlock_custom,
|
| 7 |
Interpolate,
|
| 8 |
_make_encoder,
|
extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/midas_net_custom.py
CHANGED
|
@@ -8,7 +8,6 @@ import torch.nn as nn
|
|
| 8 |
|
| 9 |
from .base_model import BaseModel
|
| 10 |
from .blocks import (
|
| 11 |
-
FeatureFusionBlock,
|
| 12 |
FeatureFusionBlock_custom,
|
| 13 |
Interpolate,
|
| 14 |
_make_encoder,
|
|
|
|
| 8 |
|
| 9 |
from .base_model import BaseModel
|
| 10 |
from .blocks import (
|
|
|
|
| 11 |
FeatureFusionBlock_custom,
|
| 12 |
Interpolate,
|
| 13 |
_make_encoder,
|
extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/__init__.py
CHANGED
|
@@ -49,6 +49,6 @@ def apply_mlsd(input_image, thr_v, thr_d):
|
|
| 49 |
cv2.line(
|
| 50 |
img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1
|
| 51 |
)
|
| 52 |
-
except Exception
|
| 53 |
pass
|
| 54 |
return img_output[:, :, 0]
|
|
|
|
| 49 |
cv2.line(
|
| 50 |
img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1
|
| 51 |
)
|
| 52 |
+
except Exception:
|
| 53 |
pass
|
| 54 |
return img_output[:, :, 0]
|
extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_large.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import sys
|
| 3 |
import torch
|
| 4 |
import torch.nn as nn
|
| 5 |
import torch.utils.model_zoo as model_zoo
|
|
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
import torch.utils.model_zoo as model_zoo
|
extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_tiny.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import sys
|
| 3 |
import torch
|
| 4 |
import torch.nn as nn
|
| 5 |
import torch.utils.model_zoo as model_zoo
|
|
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
import torch.utils.model_zoo as model_zoo
|
extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/utils.py
CHANGED
|
@@ -9,7 +9,6 @@ Copyright 2021-present NAVER Corp.
|
|
| 9 |
Apache License v2.0
|
| 10 |
"""
|
| 11 |
|
| 12 |
-
import os
|
| 13 |
import numpy as np
|
| 14 |
import cv2
|
| 15 |
import torch
|
|
@@ -648,7 +647,7 @@ def pred_squares(
|
|
| 648 |
score_array = score_array[sorted_idx]
|
| 649 |
squares = squares[sorted_idx]
|
| 650 |
|
| 651 |
-
except Exception
|
| 652 |
pass
|
| 653 |
|
| 654 |
"""return list
|
|
|
|
| 9 |
Apache License v2.0
|
| 10 |
"""
|
| 11 |
|
|
|
|
| 12 |
import numpy as np
|
| 13 |
import cv2
|
| 14 |
import torch
|
|
|
|
| 647 |
score_array = score_array[sorted_idx]
|
| 648 |
squares = squares[sorted_idx]
|
| 649 |
|
| 650 |
+
except Exception:
|
| 651 |
pass
|
| 652 |
|
| 653 |
"""return list
|
extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmcv/ops/fused_bias_leakyrelu.py
CHANGED
|
@@ -179,7 +179,7 @@ class FusedBiasLeakyReLUFunction(Function):
|
|
| 179 |
|
| 180 |
|
| 181 |
class FusedBiasLeakyReLU(nn.Module):
|
| 182 |
-
"""Fused bias leaky ReLU.
|
| 183 |
|
| 184 |
This function is introduced in the StyleGAN2:
|
| 185 |
http://arxiv.org/abs/1912.04958
|
|
@@ -213,7 +213,7 @@ class FusedBiasLeakyReLU(nn.Module):
|
|
| 213 |
|
| 214 |
|
| 215 |
def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=2**0.5):
|
| 216 |
-
"""Fused bias leaky ReLU function.
|
| 217 |
|
| 218 |
This function is introduced in the StyleGAN2:
|
| 219 |
http://arxiv.org/abs/1912.04958
|
|
|
|
| 179 |
|
| 180 |
|
| 181 |
class FusedBiasLeakyReLU(nn.Module):
|
| 182 |
+
r"""Fused bias leaky ReLU.
|
| 183 |
|
| 184 |
This function is introduced in the StyleGAN2:
|
| 185 |
http://arxiv.org/abs/1912.04958
|
|
|
|
| 213 |
|
| 214 |
|
| 215 |
def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=2**0.5):
|
| 216 |
+
r"""Fused bias leaky ReLU function.
|
| 217 |
|
| 218 |
This function is introduced in the StyleGAN2:
|
| 219 |
http://arxiv.org/abs/1912.04958
|
extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmseg/apis/inference.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
import matplotlib.pyplot as plt
|
| 2 |
import annotator.mmpkg.mmcv as mmcv
|
| 3 |
import torch
|
| 4 |
from annotator.mmpkg.mmcv.parallel import collate, scatter
|
|
|
|
|
|
|
| 1 |
import annotator.mmpkg.mmcv as mmcv
|
| 2 |
import torch
|
| 3 |
from annotator.mmpkg.mmcv.parallel import collate, scatter
|
extensions-builtin/forge_legacy_preprocessors/annotator/openpose/body.py
CHANGED
|
@@ -1,13 +1,10 @@
|
|
| 1 |
import cv2
|
| 2 |
import numpy as np
|
| 3 |
import math
|
| 4 |
-
import time
|
| 5 |
from scipy.ndimage import gaussian_filter
|
| 6 |
import matplotlib.pyplot as plt
|
| 7 |
-
import matplotlib
|
| 8 |
import torch
|
| 9 |
-
from
|
| 10 |
-
from typing import NamedTuple, List, Union
|
| 11 |
|
| 12 |
from . import util
|
| 13 |
from .model import bodypose_model
|
|
|
|
| 1 |
import cv2
|
| 2 |
import numpy as np
|
| 3 |
import math
|
|
|
|
| 4 |
from scipy.ndimage import gaussian_filter
|
| 5 |
import matplotlib.pyplot as plt
|
|
|
|
| 6 |
import torch
|
| 7 |
+
from typing import List
|
|
|
|
| 8 |
|
| 9 |
from . import util
|
| 10 |
from .model import bodypose_model
|
extensions-builtin/forge_legacy_preprocessors/annotator/openpose/face.py
CHANGED
|
@@ -3,7 +3,6 @@ import numpy as np
|
|
| 3 |
from torchvision.transforms import ToTensor, ToPILImage
|
| 4 |
import torch
|
| 5 |
import torch.nn.functional as F
|
| 6 |
-
import cv2
|
| 7 |
|
| 8 |
from . import util
|
| 9 |
from torch.nn import Conv2d, Module, ReLU, MaxPool2d, init
|
|
|
|
| 3 |
from torchvision.transforms import ToTensor, ToPILImage
|
| 4 |
import torch
|
| 5 |
import torch.nn.functional as F
|
|
|
|
| 6 |
|
| 7 |
from . import util
|
| 8 |
from torch.nn import Conv2d, Module, ReLU, MaxPool2d, init
|
extensions-builtin/forge_legacy_preprocessors/annotator/openpose/hand.py
CHANGED
|
@@ -1,11 +1,6 @@
|
|
| 1 |
import cv2
|
| 2 |
-
import json
|
| 3 |
import numpy as np
|
| 4 |
-
import math
|
| 5 |
-
import time
|
| 6 |
from scipy.ndimage import gaussian_filter
|
| 7 |
-
import matplotlib.pyplot as plt
|
| 8 |
-
import matplotlib
|
| 9 |
import torch
|
| 10 |
from skimage.measure import label
|
| 11 |
|
|
|
|
| 1 |
import cv2
|
|
|
|
| 2 |
import numpy as np
|
|
|
|
|
|
|
| 3 |
from scipy.ndimage import gaussian_filter
|
|
|
|
|
|
|
| 4 |
import torch
|
| 5 |
from skimage.measure import label
|
| 6 |
|
extensions-builtin/forge_legacy_preprocessors/annotator/openpose/model.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
| 1 |
import torch
|
| 2 |
from collections import OrderedDict
|
| 3 |
|
| 4 |
-
import torch
|
| 5 |
import torch.nn as nn
|
| 6 |
|
| 7 |
|
|
|
|
| 1 |
import torch
|
| 2 |
from collections import OrderedDict
|
| 3 |
|
|
|
|
| 4 |
import torch.nn as nn
|
| 5 |
|
| 6 |
|
extensions-builtin/forge_legacy_preprocessors/annotator/openpose/types.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
from typing import NamedTuple, List, Optional
|
| 2 |
|
| 3 |
|
| 4 |
class Keypoint(NamedTuple):
|
|
|
|
| 1 |
+
from typing import NamedTuple, List, Optional
|
| 2 |
|
| 3 |
|
| 4 |
class Keypoint(NamedTuple):
|
extensions-builtin/forge_legacy_preprocessors/annotator/pidinet/model.py
CHANGED
|
@@ -5,8 +5,6 @@ Date: Feb 18, 2021
|
|
| 5 |
|
| 6 |
import math
|
| 7 |
|
| 8 |
-
import cv2
|
| 9 |
-
import numpy as np
|
| 10 |
import torch
|
| 11 |
import torch.nn as nn
|
| 12 |
import torch.nn.functional as F
|
|
|
|
| 5 |
|
| 6 |
import math
|
| 7 |
|
|
|
|
|
|
|
| 8 |
import torch
|
| 9 |
import torch.nn as nn
|
| 10 |
import torch.nn.functional as F
|
extensions-builtin/forge_legacy_preprocessors/annotator/teed/Fsmish.py
CHANGED
|
@@ -7,7 +7,6 @@ Wang, Xueliang, Honge Ren, and Achuan Wang.
|
|
| 7 |
|
| 8 |
# import pytorch
|
| 9 |
import torch
|
| 10 |
-
import torch.nn.functional as F
|
| 11 |
|
| 12 |
|
| 13 |
@torch.jit.script
|
|
|
|
| 7 |
|
| 8 |
# import pytorch
|
| 9 |
import torch
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
@torch.jit.script
|
extensions-builtin/forge_legacy_preprocessors/annotator/teed/Xsmish.py
CHANGED
|
@@ -7,8 +7,6 @@ smish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + sigmoid(x)))
|
|
| 7 |
"""
|
| 8 |
|
| 9 |
# import pytorch
|
| 10 |
-
import torch
|
| 11 |
-
import torch.nn.functional as F
|
| 12 |
from torch import nn
|
| 13 |
|
| 14 |
# import activation functions
|
|
|
|
| 7 |
"""
|
| 8 |
|
| 9 |
# import pytorch
|
|
|
|
|
|
|
| 10 |
from torch import nn
|
| 11 |
|
| 12 |
# import activation functions
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py
CHANGED
|
@@ -2,7 +2,6 @@ import timm
|
|
| 2 |
|
| 3 |
import torch.nn as nn
|
| 4 |
|
| 5 |
-
from pathlib import Path
|
| 6 |
from .utils import activations, forward_default, get_activation
|
| 7 |
|
| 8 |
from ..external.next_vit.classification.nextvit import *
|
|
|
|
| 2 |
|
| 3 |
import torch.nn as nn
|
| 4 |
|
|
|
|
| 5 |
from .utils import activations, forward_default, get_activation
|
| 6 |
|
| 7 |
from ..external.next_vit.classification.nextvit import *
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py
CHANGED
|
@@ -5,10 +5,6 @@ from .backbones.beit import (
|
|
| 5 |
_make_pretrained_beitl16_512,
|
| 6 |
_make_pretrained_beitl16_384,
|
| 7 |
_make_pretrained_beitb16_384,
|
| 8 |
-
forward_beit,
|
| 9 |
-
)
|
| 10 |
-
from .backbones.swin_common import (
|
| 11 |
-
forward_swin,
|
| 12 |
)
|
| 13 |
from .backbones.swin2 import (
|
| 14 |
_make_pretrained_swin2l24_384,
|
|
@@ -20,13 +16,11 @@ from .backbones.swin import (
|
|
| 20 |
)
|
| 21 |
from .backbones.levit import (
|
| 22 |
_make_pretrained_levit_384,
|
| 23 |
-
forward_levit,
|
| 24 |
)
|
| 25 |
from .backbones.vit import (
|
| 26 |
_make_pretrained_vitb_rn50_384,
|
| 27 |
_make_pretrained_vitl16_384,
|
| 28 |
_make_pretrained_vitb16_384,
|
| 29 |
-
forward_vit,
|
| 30 |
)
|
| 31 |
|
| 32 |
|
|
|
|
| 5 |
_make_pretrained_beitl16_512,
|
| 6 |
_make_pretrained_beitl16_384,
|
| 7 |
_make_pretrained_beitb16_384,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
)
|
| 9 |
from .backbones.swin2 import (
|
| 10 |
_make_pretrained_swin2l24_384,
|
|
|
|
| 16 |
)
|
| 17 |
from .backbones.levit import (
|
| 18 |
_make_pretrained_levit_384,
|
|
|
|
| 19 |
)
|
| 20 |
from .backbones.vit import (
|
| 21 |
_make_pretrained_vitb_rn50_384,
|
| 22 |
_make_pretrained_vitl16_384,
|
| 23 |
_make_pretrained_vitb16_384,
|
|
|
|
| 24 |
)
|
| 25 |
|
| 26 |
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py
CHANGED
|
@@ -8,7 +8,6 @@ import torch.nn as nn
|
|
| 8 |
|
| 9 |
from .base_model import BaseModel
|
| 10 |
from .blocks import (
|
| 11 |
-
FeatureFusionBlock,
|
| 12 |
FeatureFusionBlock_custom,
|
| 13 |
Interpolate,
|
| 14 |
_make_encoder,
|
|
|
|
| 8 |
|
| 9 |
from .base_model import BaseModel
|
| 10 |
from .blocks import (
|
|
|
|
| 11 |
FeatureFusionBlock_custom,
|
| 12 |
Interpolate,
|
| 13 |
_make_encoder,
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py
CHANGED
|
@@ -1,14 +1,11 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
from __future__ import print_function
|
| 3 |
|
| 4 |
-
import roslib
|
| 5 |
|
| 6 |
# roslib.load_manifest('my_package')
|
| 7 |
import sys
|
| 8 |
import rospy
|
| 9 |
import cv2
|
| 10 |
-
import numpy as np
|
| 11 |
-
from std_msgs.msg import String
|
| 12 |
from sensor_msgs.msg import Image
|
| 13 |
from cv_bridge import CvBridge, CvBridgeError
|
| 14 |
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
from __future__ import print_function
|
| 3 |
|
|
|
|
| 4 |
|
| 5 |
# roslib.load_manifest('my_package')
|
| 6 |
import sys
|
| 7 |
import rospy
|
| 8 |
import cv2
|
|
|
|
|
|
|
| 9 |
from sensor_msgs.msg import Image
|
| 10 |
from cv_bridge import CvBridge, CvBridgeError
|
| 11 |
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py
CHANGED
|
@@ -1,14 +1,11 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
from __future__ import print_function
|
| 3 |
|
| 4 |
-
import roslib
|
| 5 |
|
| 6 |
# roslib.load_manifest('my_package')
|
| 7 |
import sys
|
| 8 |
import rospy
|
| 9 |
import cv2
|
| 10 |
-
import numpy as np
|
| 11 |
-
from std_msgs.msg import String
|
| 12 |
from sensor_msgs.msg import Image
|
| 13 |
from cv_bridge import CvBridge, CvBridgeError
|
| 14 |
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
from __future__ import print_function
|
| 3 |
|
|
|
|
| 4 |
|
| 5 |
# roslib.load_manifest('my_package')
|
| 6 |
import sys
|
| 7 |
import rospy
|
| 8 |
import cv2
|
|
|
|
|
|
|
| 9 |
from sensor_msgs.msg import Image
|
| 10 |
from cv_bridge import CvBridge, CvBridgeError
|
| 11 |
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py
CHANGED
|
@@ -1,13 +1,10 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
|
| 3 |
|
| 4 |
-
import roslib
|
| 5 |
|
| 6 |
# roslib.load_manifest('my_package')
|
| 7 |
-
import sys
|
| 8 |
import rospy
|
| 9 |
import cv2
|
| 10 |
-
from std_msgs.msg import String
|
| 11 |
from sensor_msgs.msg import Image
|
| 12 |
from cv_bridge import CvBridge, CvBridgeError
|
| 13 |
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
|
| 3 |
|
|
|
|
| 4 |
|
| 5 |
# roslib.load_manifest('my_package')
|
|
|
|
| 6 |
import rospy
|
| 7 |
import cv2
|
|
|
|
| 8 |
from sensor_msgs.msg import Image
|
| 9 |
from cv_bridge import CvBridge, CvBridgeError
|
| 10 |
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py
CHANGED
|
@@ -2,16 +2,10 @@
|
|
| 2 |
|
| 3 |
import os
|
| 4 |
import ntpath
|
| 5 |
-
import glob
|
| 6 |
import torch
|
| 7 |
-
import utils
|
| 8 |
-
import cv2
|
| 9 |
import numpy as np
|
| 10 |
-
from torchvision.transforms import Compose, Normalize
|
| 11 |
-
from torchvision import transforms
|
| 12 |
|
| 13 |
from shutil import copyfile
|
| 14 |
-
import fileinput
|
| 15 |
import sys
|
| 16 |
|
| 17 |
sys.path.append(os.getcwd() + "/..")
|
|
@@ -46,7 +40,6 @@ def restore_file():
|
|
| 46 |
modify_file()
|
| 47 |
|
| 48 |
from midas.midas_net import MidasNet
|
| 49 |
-
from midas.transforms import Resize, NormalizeImage, PrepareForNet
|
| 50 |
|
| 51 |
restore_file()
|
| 52 |
|
|
|
|
| 2 |
|
| 3 |
import os
|
| 4 |
import ntpath
|
|
|
|
| 5 |
import torch
|
|
|
|
|
|
|
| 6 |
import numpy as np
|
|
|
|
|
|
|
| 7 |
|
| 8 |
from shutil import copyfile
|
|
|
|
| 9 |
import sys
|
| 10 |
|
| 11 |
sys.path.append(os.getcwd() + "/..")
|
|
|
|
| 40 |
modify_file()
|
| 41 |
|
| 42 |
from midas.midas_net import MidasNet
|
|
|
|
| 43 |
|
| 44 |
restore_file()
|
| 45 |
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_onnx.py
CHANGED
|
@@ -4,14 +4,12 @@ import os
|
|
| 4 |
import glob
|
| 5 |
import utils
|
| 6 |
import cv2
|
| 7 |
-
import sys
|
| 8 |
import numpy as np
|
| 9 |
import argparse
|
| 10 |
|
| 11 |
-
import onnx
|
| 12 |
import onnxruntime as rt
|
| 13 |
|
| 14 |
-
from transforms import Resize,
|
| 15 |
|
| 16 |
|
| 17 |
def run(input_path, output_path, model_path, model_type="large"):
|
|
|
|
| 4 |
import glob
|
| 5 |
import utils
|
| 6 |
import cv2
|
|
|
|
| 7 |
import numpy as np
|
| 8 |
import argparse
|
| 9 |
|
|
|
|
| 10 |
import onnxruntime as rt
|
| 11 |
|
| 12 |
+
from transforms import Resize, PrepareForNet
|
| 13 |
|
| 14 |
|
| 15 |
def run(input_path, output_path, model_path, model_type="large"):
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py
CHANGED
|
@@ -8,7 +8,7 @@ import argparse
|
|
| 8 |
|
| 9 |
import tensorflow as tf
|
| 10 |
|
| 11 |
-
from transforms import Resize,
|
| 12 |
|
| 13 |
|
| 14 |
def run(input_path, output_path, model_path, model_type="large"):
|
|
|
|
| 8 |
|
| 9 |
import tensorflow as tf
|
| 10 |
|
| 11 |
+
from transforms import Resize, PrepareForNet
|
| 12 |
|
| 13 |
|
| 14 |
def run(input_path, output_path, model_path, model_type="large"):
|
extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/utils/misc.py
CHANGED
|
@@ -24,11 +24,7 @@
|
|
| 24 |
|
| 25 |
"""Miscellaneous utility functions."""
|
| 26 |
|
| 27 |
-
from scipy import ndimage
|
| 28 |
|
| 29 |
-
import base64
|
| 30 |
-
import math
|
| 31 |
-
import re
|
| 32 |
from io import BytesIO
|
| 33 |
|
| 34 |
import matplotlib
|
|
|
|
| 24 |
|
| 25 |
"""Miscellaneous utility functions."""
|
| 26 |
|
|
|
|
| 27 |
|
|
|
|
|
|
|
|
|
|
| 28 |
from io import BytesIO
|
| 29 |
|
| 30 |
import matplotlib
|
extensions-builtin/forge_legacy_preprocessors/install.py
CHANGED
|
@@ -32,7 +32,7 @@ def try_install_from_wheel(pkg_name: str, wheel_url: str):
|
|
| 32 |
|
| 33 |
try:
|
| 34 |
launch.run_pip(
|
| 35 |
-
f"install
|
| 36 |
f"Legacy Preprocessor Requirement: {pkg_name}",
|
| 37 |
)
|
| 38 |
except Exception as e:
|
|
|
|
| 32 |
|
| 33 |
try:
|
| 34 |
launch.run_pip(
|
| 35 |
+
f"install {wheel_url}",
|
| 36 |
f"Legacy Preprocessor Requirement: {pkg_name}",
|
| 37 |
)
|
| 38 |
except Exception as e:
|
extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor.py
CHANGED
|
@@ -3,7 +3,6 @@ import cv2
|
|
| 3 |
import numpy as np
|
| 4 |
import torch
|
| 5 |
import math
|
| 6 |
-
import functools
|
| 7 |
|
| 8 |
from dataclasses import dataclass
|
| 9 |
from transformers.models.clip.modeling_clip import CLIPVisionModelOutput
|
|
@@ -849,9 +848,9 @@ class InsightFaceModel:
|
|
| 849 |
img, remove_pad = resize_image_with_pad(img, res)
|
| 850 |
face_info = self.model.get(img)
|
| 851 |
if not face_info:
|
| 852 |
-
raise Exception(
|
| 853 |
if len(face_info) > 1:
|
| 854 |
-
print("Insightface: More than one face is detected in the image. "
|
| 855 |
# only use the maximum face
|
| 856 |
face_info = sorted(
|
| 857 |
face_info,
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
import torch
|
| 5 |
import math
|
|
|
|
| 6 |
|
| 7 |
from dataclasses import dataclass
|
| 8 |
from transformers.models.clip.modeling_clip import CLIPVisionModelOutput
|
|
|
|
| 848 |
img, remove_pad = resize_image_with_pad(img, res)
|
| 849 |
face_info = self.model.get(img)
|
| 850 |
if not face_info:
|
| 851 |
+
raise Exception("Insightface: No face found in image.")
|
| 852 |
if len(face_info) > 1:
|
| 853 |
+
print("Insightface: More than one face is detected in the image. " "Only the biggest one will be used.")
|
| 854 |
# only use the maximum face
|
| 855 |
face_info = sorted(
|
| 856 |
face_info,
|
extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
|
|
|
|
|
| 1 |
from legacy_preprocessors.preprocessor import *
|
| 2 |
|
| 3 |
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
from legacy_preprocessors.preprocessor import *
|
| 4 |
|
| 5 |
|
extensions-builtin/sd_forge_controlnet/lib_controlnet/controlnet_ui/controlnet_ui_group.py
CHANGED
|
@@ -384,7 +384,7 @@ class ControlNetUiGroup:
|
|
| 384 |
with gr.Row(elem_classes=["controlnet_control_type", "controlnet_row"]):
|
| 385 |
self.type_filter = gr.Radio(
|
| 386 |
global_state.get_all_preprocessor_tags(),
|
| 387 |
-
label=
|
| 388 |
value="All",
|
| 389 |
elem_id=f"{elem_id_tabname}_{tabname}_controlnet_type_filter_radio",
|
| 390 |
elem_classes="controlnet_control_type_filter_group",
|
|
@@ -420,7 +420,7 @@ class ControlNetUiGroup:
|
|
| 420 |
|
| 421 |
with gr.Row(elem_classes=["controlnet_weight_steps", "controlnet_row"]):
|
| 422 |
self.weight = gr.Slider(
|
| 423 |
-
label=
|
| 424 |
value=self.default_unit.weight,
|
| 425 |
minimum=0.0,
|
| 426 |
maximum=2.0,
|
|
@@ -960,6 +960,7 @@ class ControlNetUiGroup:
|
|
| 960 |
@staticmethod
|
| 961 |
def reset():
|
| 962 |
ControlNetUiGroup.a1111_context = A1111Context()
|
|
|
|
| 963 |
ControlNetUiGroup.callbacks_registered = False
|
| 964 |
ControlNetUiGroup.all_ui_groups = []
|
| 965 |
|
|
|
|
| 384 |
with gr.Row(elem_classes=["controlnet_control_type", "controlnet_row"]):
|
| 385 |
self.type_filter = gr.Radio(
|
| 386 |
global_state.get_all_preprocessor_tags(),
|
| 387 |
+
label="Control Type",
|
| 388 |
value="All",
|
| 389 |
elem_id=f"{elem_id_tabname}_{tabname}_controlnet_type_filter_radio",
|
| 390 |
elem_classes="controlnet_control_type_filter_group",
|
|
|
|
| 420 |
|
| 421 |
with gr.Row(elem_classes=["controlnet_weight_steps", "controlnet_row"]):
|
| 422 |
self.weight = gr.Slider(
|
| 423 |
+
label="Control Weight",
|
| 424 |
value=self.default_unit.weight,
|
| 425 |
minimum=0.0,
|
| 426 |
maximum=2.0,
|
|
|
|
| 960 |
@staticmethod
|
| 961 |
def reset():
|
| 962 |
ControlNetUiGroup.a1111_context = A1111Context()
|
| 963 |
+
ControlNetUiGroup.all_callbacks_registered = False
|
| 964 |
ControlNetUiGroup.callbacks_registered = False
|
| 965 |
ControlNetUiGroup.all_ui_groups = []
|
| 966 |
|
extensions-builtin/sd_forge_controlnet/lib_controlnet/external_code.py
CHANGED
|
@@ -128,7 +128,7 @@ def pixel_perfect_resolution(
|
|
| 128 |
else:
|
| 129 |
estimation = max(k0, k1) * float(min(raw_H, raw_W))
|
| 130 |
|
| 131 |
-
logger.debug(
|
| 132 |
logger.debug(f"resize_mode = {resize_mode}")
|
| 133 |
logger.debug(f"raw_H = {raw_H}")
|
| 134 |
logger.debug(f"raw_W = {raw_W}")
|
|
|
|
| 128 |
else:
|
| 129 |
estimation = max(k0, k1) * float(min(raw_H, raw_W))
|
| 130 |
|
| 131 |
+
logger.debug("Pixel Perfect Computation:")
|
| 132 |
logger.debug(f"resize_mode = {resize_mode}")
|
| 133 |
logger.debug(f"raw_H = {raw_H}")
|
| 134 |
logger.debug(f"raw_W = {raw_W}")
|
extensions-builtin/sd_forge_controlnet/scripts/controlnet.py
CHANGED
|
@@ -1,37 +1,28 @@
|
|
| 1 |
-
from modules import shared, scripts, script_callbacks, masking, images
|
| 2 |
-
from modules_forge.supported_controlnet import ControlModelPatcher
|
| 3 |
-
from modules_forge.shared import try_load_supported_control_model
|
| 4 |
-
from modules_forge.forge_util import HWC3, numpy_to_pytorch
|
| 5 |
-
from modules.processing import (
|
| 6 |
-
StableDiffusionProcessingImg2Img,
|
| 7 |
-
StableDiffusionProcessingTxt2Img,
|
| 8 |
-
StableDiffusionProcessing,
|
| 9 |
-
)
|
| 10 |
-
|
| 11 |
-
from typing import Optional
|
| 12 |
-
from PIL import Image
|
| 13 |
-
import gradio as gr
|
| 14 |
-
import numpy as np
|
| 15 |
import functools
|
| 16 |
-
import
|
| 17 |
-
import cv2
|
| 18 |
|
| 19 |
-
|
| 20 |
-
from
|
| 21 |
-
from lib_controlnet.utils import (
|
| 22 |
-
align_dim_latent,
|
| 23 |
-
crop_and_resize_image,
|
| 24 |
-
judge_image_type,
|
| 25 |
-
prepare_mask,
|
| 26 |
-
set_numpy_seed,
|
| 27 |
-
)
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
from lib_controlnet.controlnet_ui.controlnet_ui_group import ControlNetUiGroup
|
| 30 |
from lib_controlnet.enums import HiResFixOption
|
| 31 |
-
from lib_controlnet.
|
| 32 |
from lib_controlnet.infotext import Infotext
|
| 33 |
from lib_controlnet.logging import logger
|
|
|
|
|
|
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
global_state.update_controlnet_filenames()
|
| 37 |
|
|
@@ -80,9 +71,7 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 80 |
with gr.Tab(label=f"ControlNet Unit {i + 1}", id=i):
|
| 81 |
group = ControlNetUiGroup(is_img2img, default_unit)
|
| 82 |
ui_groups.append(group)
|
| 83 |
-
controls.append(
|
| 84 |
-
group.render(f"ControlNet-{i}", elem_id_tabname)
|
| 85 |
-
)
|
| 86 |
|
| 87 |
for i, ui_group in enumerate(ui_groups):
|
| 88 |
infotext.register_unit(i, ui_group)
|
|
@@ -93,64 +82,36 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 93 |
|
| 94 |
return controls
|
| 95 |
|
| 96 |
-
def get_enabled_units(self, units):
|
| 97 |
-
|
| 98 |
-
units = [
|
| 99 |
-
ControlNetUnit.from_dict(unit) if isinstance(unit, dict) else unit
|
| 100 |
-
for unit in units
|
| 101 |
-
]
|
| 102 |
assert all(isinstance(unit, ControlNetUnit) for unit in units)
|
| 103 |
enabled_units = [x for x in units if x.enabled]
|
| 104 |
return enabled_units
|
| 105 |
|
| 106 |
@staticmethod
|
| 107 |
-
def try_crop_image_with_a1111_mask(
|
| 108 |
-
p: StableDiffusionProcessing,
|
| 109 |
-
input_image: np.ndarray,
|
| 110 |
-
resize_mode: external_code.ResizeMode,
|
| 111 |
-
preprocessor,
|
| 112 |
-
) -> np.ndarray:
|
| 113 |
a1111_mask_image: Optional[Image.Image] = getattr(p, "image_mask", None)
|
| 114 |
-
is_only_masked_inpaint: bool = (
|
| 115 |
-
issubclass(type(p), StableDiffusionProcessingImg2Img)
|
| 116 |
-
and p.inpaint_full_res
|
| 117 |
-
and a1111_mask_image is not None
|
| 118 |
-
)
|
| 119 |
|
| 120 |
-
if
|
| 121 |
-
preprocessor.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab
|
| 122 |
-
and is_only_masked_inpaint
|
| 123 |
-
):
|
| 124 |
logger.info("Crop input image based on A1111 mask.")
|
| 125 |
input_image = [input_image[:, :, i] for i in range(input_image.shape[2])]
|
| 126 |
input_image = [Image.fromarray(x) for x in input_image]
|
| 127 |
|
| 128 |
mask = prepare_mask(a1111_mask_image, p)
|
| 129 |
|
| 130 |
-
crop_region = masking.get_crop_region(
|
| 131 |
-
|
| 132 |
-
)
|
| 133 |
-
crop_region = masking.expand_crop_region(
|
| 134 |
-
crop_region, p.width, p.height, mask.width, mask.height
|
| 135 |
-
)
|
| 136 |
|
| 137 |
-
input_image = [
|
| 138 |
-
images.resize_image(resize_mode.int_value(), i, mask.width, mask.height)
|
| 139 |
-
for i in input_image
|
| 140 |
-
]
|
| 141 |
input_image = [x.crop(crop_region) for x in input_image]
|
| 142 |
-
input_image = [
|
| 143 |
-
images.resize_image(
|
| 144 |
-
external_code.ResizeMode.OUTER_FIT.int_value(), x, p.width, p.height
|
| 145 |
-
)
|
| 146 |
-
for x in input_image
|
| 147 |
-
]
|
| 148 |
input_image = [np.asarray(x)[:, :, 0] for x in input_image]
|
| 149 |
input_image = np.stack(input_image, axis=2)
|
| 150 |
|
| 151 |
return input_image
|
| 152 |
|
| 153 |
-
def get_input_data(self, p, unit, preprocessor, h, w):
|
| 154 |
resize_mode = external_code.resize_mode_from_value(unit.resize_mode)
|
| 155 |
image_list = []
|
| 156 |
|
|
@@ -159,6 +120,9 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 159 |
a1111_i2i_image = getattr(p, "init_images", [None])[0]
|
| 160 |
a1111_i2i_mask = getattr(p, "image_mask", None)
|
| 161 |
|
|
|
|
|
|
|
|
|
|
| 162 |
using_a1111_data = False
|
| 163 |
|
| 164 |
if unit.image is None:
|
|
@@ -198,16 +162,11 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 198 |
(image.shape[1], image.shape[0]),
|
| 199 |
interpolation=cv2.INTER_NEAREST,
|
| 200 |
)
|
| 201 |
-
mask = self.try_crop_image_with_a1111_mask(
|
| 202 |
-
p, mask, resize_mode, preprocessor
|
| 203 |
-
)
|
| 204 |
|
| 205 |
image_list = [[image, mask]]
|
| 206 |
|
| 207 |
-
if
|
| 208 |
-
resize_mode == external_code.ResizeMode.OUTER_FIT
|
| 209 |
-
and preprocessor.expand_mask_when_resize_and_fill
|
| 210 |
-
):
|
| 211 |
new_image_list = []
|
| 212 |
for input_image, input_mask in image_list:
|
| 213 |
if input_mask is None:
|
|
@@ -232,16 +191,12 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 232 |
return image_list, resize_mode
|
| 233 |
|
| 234 |
@staticmethod
|
| 235 |
-
def get_target_dimensions(
|
| 236 |
-
p: StableDiffusionProcessing,
|
| 237 |
-
) -> tuple[int, int, int, int]:
|
| 238 |
"""Returns (h, w, hr_h, hr_w)."""
|
| 239 |
h = align_dim_latent(p.height)
|
| 240 |
w = align_dim_latent(p.width)
|
| 241 |
|
| 242 |
-
high_res_fix = getattr(p, "enable_hr", False) and isinstance(
|
| 243 |
-
p, StableDiffusionProcessingTxt2Img
|
| 244 |
-
)
|
| 245 |
|
| 246 |
if high_res_fix:
|
| 247 |
if p.hr_resize_x == 0 and p.hr_resize_y == 0:
|
|
@@ -258,20 +213,11 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 258 |
return h, w, hr_y, hr_x
|
| 259 |
|
| 260 |
@torch.no_grad()
|
| 261 |
-
def process_unit_after_click_generate(
|
| 262 |
-
self,
|
| 263 |
-
p: StableDiffusionProcessing,
|
| 264 |
-
unit: ControlNetUnit,
|
| 265 |
-
params: ControlNetCachedParameters,
|
| 266 |
-
*args,
|
| 267 |
-
**kwargs,
|
| 268 |
-
) -> bool:
|
| 269 |
|
| 270 |
h, w, hr_y, hr_x = self.get_target_dimensions(p)
|
| 271 |
|
| 272 |
-
has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(
|
| 273 |
-
p, "enable_hr", False
|
| 274 |
-
)
|
| 275 |
|
| 276 |
if unit.use_preview_as_input:
|
| 277 |
unit.module = "None"
|
|
@@ -322,9 +268,7 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 322 |
control_masks.append(input_mask)
|
| 323 |
|
| 324 |
if len(input_list) > 1 and not preprocessor_output_is_image:
|
| 325 |
-
logger.info(
|
| 326 |
-
"Batch wise input only support controlnet, control-lora, and t2i adapters!"
|
| 327 |
-
)
|
| 328 |
break
|
| 329 |
|
| 330 |
if has_high_res_fix:
|
|
@@ -335,14 +279,7 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 335 |
alignment_indices = [i % len(preprocessor_outputs) for i in range(p.batch_size)]
|
| 336 |
|
| 337 |
def attach_extra_result_image(img: np.ndarray, is_high_res: bool = False):
|
| 338 |
-
if (
|
| 339 |
-
not shared.opts.data.get("control_net_no_detectmap", False)
|
| 340 |
-
and (
|
| 341 |
-
(is_high_res and hr_option.high_res_enabled)
|
| 342 |
-
or (not is_high_res and hr_option.low_res_enabled)
|
| 343 |
-
)
|
| 344 |
-
and unit.save_detected_map
|
| 345 |
-
):
|
| 346 |
p.extra_result_images.append(img)
|
| 347 |
|
| 348 |
if preprocessor_output_is_image:
|
|
@@ -350,35 +287,21 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 350 |
params.control_cond_for_hr_fix = []
|
| 351 |
|
| 352 |
for preprocessor_output in preprocessor_outputs:
|
| 353 |
-
control_cond = crop_and_resize_image(
|
| 354 |
-
|
| 355 |
-
)
|
| 356 |
-
attach_extra_result_image(
|
| 357 |
-
external_code.visualize_inpaint_mask(control_cond)
|
| 358 |
-
)
|
| 359 |
-
params.control_cond.append(
|
| 360 |
-
numpy_to_pytorch(control_cond).movedim(-1, 1)
|
| 361 |
-
)
|
| 362 |
|
| 363 |
-
params.control_cond = torch.cat(params.control_cond, dim=0)[
|
| 364 |
-
alignment_indices
|
| 365 |
-
].contiguous()
|
| 366 |
|
| 367 |
if has_high_res_fix:
|
| 368 |
for preprocessor_output in preprocessor_outputs:
|
| 369 |
-
control_cond_for_hr_fix = crop_and_resize_image(
|
| 370 |
-
preprocessor_output, resize_mode, hr_y, hr_x
|
| 371 |
-
)
|
| 372 |
attach_extra_result_image(
|
| 373 |
external_code.visualize_inpaint_mask(control_cond_for_hr_fix),
|
| 374 |
is_high_res=True,
|
| 375 |
)
|
| 376 |
-
params.control_cond_for_hr_fix.append(
|
| 377 |
-
|
| 378 |
-
)
|
| 379 |
-
params.control_cond_for_hr_fix = torch.cat(
|
| 380 |
-
params.control_cond_for_hr_fix, dim=0
|
| 381 |
-
)[alignment_indices].contiguous()
|
| 382 |
else:
|
| 383 |
params.control_cond_for_hr_fix = params.control_cond
|
| 384 |
else:
|
|
@@ -392,30 +315,20 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 392 |
|
| 393 |
for input_mask in control_masks:
|
| 394 |
fill_border = preprocessor.fill_mask_with_one_when_resize_and_fill
|
| 395 |
-
control_mask = crop_and_resize_image(
|
| 396 |
-
input_mask, resize_mode, h, w, fill_border
|
| 397 |
-
)
|
| 398 |
attach_extra_result_image(control_mask)
|
| 399 |
control_mask = numpy_to_pytorch(control_mask).movedim(-1, 1)[:, :1]
|
| 400 |
params.control_mask.append(control_mask)
|
| 401 |
|
| 402 |
if has_high_res_fix:
|
| 403 |
-
control_mask_for_hr_fix = crop_and_resize_image(
|
| 404 |
-
input_mask, resize_mode, hr_y, hr_x, fill_border
|
| 405 |
-
)
|
| 406 |
attach_extra_result_image(control_mask_for_hr_fix, is_high_res=True)
|
| 407 |
-
control_mask_for_hr_fix = numpy_to_pytorch(
|
| 408 |
-
control_mask_for_hr_fix
|
| 409 |
-
).movedim(-1, 1)[:, :1]
|
| 410 |
params.control_mask_for_hr_fix.append(control_mask_for_hr_fix)
|
| 411 |
|
| 412 |
-
params.control_mask = torch.cat(params.control_mask, dim=0)[
|
| 413 |
-
alignment_indices
|
| 414 |
-
].contiguous()
|
| 415 |
if has_high_res_fix:
|
| 416 |
-
params.control_mask_for_hr_fix = torch.cat(
|
| 417 |
-
params.control_mask_for_hr_fix, dim=0
|
| 418 |
-
)[alignment_indices].contiguous()
|
| 419 |
else:
|
| 420 |
params.control_mask_for_hr_fix = params.control_mask
|
| 421 |
|
|
@@ -434,31 +347,18 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 434 |
|
| 435 |
params.preprocessor = preprocessor
|
| 436 |
|
| 437 |
-
params.preprocessor.process_after_running_preprocessors(
|
| 438 |
-
|
| 439 |
-
)
|
| 440 |
-
params.model.process_after_running_preprocessors(
|
| 441 |
-
process=p, params=params, **kwargs
|
| 442 |
-
)
|
| 443 |
|
| 444 |
logger.info(f"{type(params.model).__name__}: {model_filename}")
|
| 445 |
return True
|
| 446 |
|
| 447 |
@torch.no_grad()
|
| 448 |
-
def process_unit_before_every_sampling(
|
| 449 |
-
self,
|
| 450 |
-
p: StableDiffusionProcessing,
|
| 451 |
-
unit: ControlNetUnit,
|
| 452 |
-
params: ControlNetCachedParameters,
|
| 453 |
-
*args,
|
| 454 |
-
**kwargs,
|
| 455 |
-
):
|
| 456 |
|
| 457 |
is_hr_pass = getattr(p, "is_hr_pass", False)
|
| 458 |
|
| 459 |
-
has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(
|
| 460 |
-
p, "enable_hr", False
|
| 461 |
-
)
|
| 462 |
|
| 463 |
if has_high_res_fix:
|
| 464 |
hr_option = HiResFixOption.from_value(unit.hr_option)
|
|
@@ -466,11 +366,11 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 466 |
hr_option = HiResFixOption.BOTH
|
| 467 |
|
| 468 |
if has_high_res_fix and is_hr_pass and (not hr_option.high_res_enabled):
|
| 469 |
-
logger.info(
|
| 470 |
return
|
| 471 |
|
| 472 |
if has_high_res_fix and (not is_hr_pass) and (not hr_option.low_res_enabled):
|
| 473 |
-
logger.info(
|
| 474 |
return
|
| 475 |
|
| 476 |
if is_hr_pass:
|
|
@@ -543,16 +443,13 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 543 |
params.model.positive_advanced_weighting = soft_weighting.copy()
|
| 544 |
params.model.negative_advanced_weighting = soft_weighting.copy()
|
| 545 |
|
| 546 |
-
cond, mask = params.preprocessor.process_before_every_sampling(
|
| 547 |
-
p, cond, mask, *args, **kwargs
|
| 548 |
-
)
|
| 549 |
|
| 550 |
params.model.advanced_mask_weighting = mask
|
| 551 |
|
| 552 |
params.model.process_before_every_sampling(p, cond, mask, *args, **kwargs)
|
| 553 |
|
| 554 |
logger.info(f"ControlNet Method {params.preprocessor.name} patched.")
|
| 555 |
-
return
|
| 556 |
|
| 557 |
@staticmethod
|
| 558 |
def bound_check_params(unit: ControlNetUnit) -> None:
|
|
@@ -567,35 +464,16 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 567 |
preprocessor = global_state.get_preprocessor(unit.module)
|
| 568 |
|
| 569 |
if unit.processor_res < 0:
|
| 570 |
-
unit.processor_res = int(
|
| 571 |
-
preprocessor.slider_resolution.gradio_update_kwargs.get("value", 512)
|
| 572 |
-
)
|
| 573 |
-
|
| 574 |
if unit.threshold_a < 0:
|
| 575 |
-
unit.threshold_a = int(
|
| 576 |
-
preprocessor.slider_1.gradio_update_kwargs.get("value", 1.0)
|
| 577 |
-
)
|
| 578 |
-
|
| 579 |
if unit.threshold_b < 0:
|
| 580 |
-
unit.threshold_b = int(
|
| 581 |
-
preprocessor.slider_2.gradio_update_kwargs.get("value", 1.0)
|
| 582 |
-
)
|
| 583 |
-
|
| 584 |
-
return
|
| 585 |
|
| 586 |
@torch.no_grad()
|
| 587 |
-
def process_unit_after_every_sampling(
|
| 588 |
-
self,
|
| 589 |
-
p: StableDiffusionProcessing,
|
| 590 |
-
unit: ControlNetUnit,
|
| 591 |
-
params: ControlNetCachedParameters,
|
| 592 |
-
*args,
|
| 593 |
-
**kwargs,
|
| 594 |
-
):
|
| 595 |
-
|
| 596 |
params.preprocessor.process_after_every_sampling(p, params, *args, **kwargs)
|
| 597 |
params.model.process_after_every_sampling(p, params, *args, **kwargs)
|
| 598 |
-
return
|
| 599 |
|
| 600 |
@torch.no_grad()
|
| 601 |
def process(self, p, *args, **kwargs):
|
|
@@ -614,19 +492,15 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
| 614 |
if i not in self.current_params:
|
| 615 |
logger.warning(f"ControlNet Unit {i + 1} is skipped...")
|
| 616 |
continue
|
| 617 |
-
self.process_unit_before_every_sampling(
|
| 618 |
-
p, unit, self.current_params[i], *args, **kwargs
|
| 619 |
-
)
|
| 620 |
|
| 621 |
@torch.no_grad()
|
| 622 |
def postprocess_batch_list(self, p, pp, *args, **kwargs):
|
| 623 |
for i, unit in enumerate(self.get_enabled_units(args)):
|
| 624 |
if i in self.current_params:
|
| 625 |
-
self.process_unit_after_every_sampling(
|
| 626 |
-
p, unit, self.current_params[i], pp, *args, **kwargs
|
| 627 |
-
)
|
| 628 |
|
| 629 |
-
def postprocess(self,
|
| 630 |
self.current_params = {}
|
| 631 |
|
| 632 |
|
|
@@ -689,4 +563,6 @@ script_callbacks.on_ui_settings(on_ui_settings)
|
|
| 689 |
script_callbacks.on_infotext_pasted(Infotext.on_infotext_pasted)
|
| 690 |
script_callbacks.on_after_component(ControlNetUiGroup.on_after_component)
|
| 691 |
script_callbacks.on_before_reload(ControlNetUiGroup.reset)
|
| 692 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import functools
|
| 2 |
+
from typing import Optional, TYPE_CHECKING
|
|
|
|
| 3 |
|
| 4 |
+
if TYPE_CHECKING:
|
| 5 |
+
from modules_forge.supported_preprocessor import Preprocessor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
+
import cv2
|
| 8 |
+
import gradio as gr
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
from lib_controlnet import external_code, global_state
|
| 12 |
+
from lib_controlnet.api import controlnet_api
|
| 13 |
from lib_controlnet.controlnet_ui.controlnet_ui_group import ControlNetUiGroup
|
| 14 |
from lib_controlnet.enums import HiResFixOption
|
| 15 |
+
from lib_controlnet.external_code import ControlNetUnit
|
| 16 |
from lib_controlnet.infotext import Infotext
|
| 17 |
from lib_controlnet.logging import logger
|
| 18 |
+
from lib_controlnet.utils import align_dim_latent, crop_and_resize_image, judge_image_type, prepare_mask, set_numpy_seed
|
| 19 |
+
from PIL import Image, ImageOps
|
| 20 |
|
| 21 |
+
from modules import images, masking, script_callbacks, scripts, shared
|
| 22 |
+
from modules.processing import StableDiffusionProcessing, StableDiffusionProcessingImg2Img, StableDiffusionProcessingTxt2Img
|
| 23 |
+
from modules_forge.forge_util import HWC3, numpy_to_pytorch
|
| 24 |
+
from modules_forge.shared import try_load_supported_control_model
|
| 25 |
+
from modules_forge.supported_controlnet import ControlModelPatcher
|
| 26 |
|
| 27 |
global_state.update_controlnet_filenames()
|
| 28 |
|
|
|
|
| 71 |
with gr.Tab(label=f"ControlNet Unit {i + 1}", id=i):
|
| 72 |
group = ControlNetUiGroup(is_img2img, default_unit)
|
| 73 |
ui_groups.append(group)
|
| 74 |
+
controls.append(group.render(f"ControlNet-{i}", elem_id_tabname))
|
|
|
|
|
|
|
| 75 |
|
| 76 |
for i, ui_group in enumerate(ui_groups):
|
| 77 |
infotext.register_unit(i, ui_group)
|
|
|
|
| 82 |
|
| 83 |
return controls
|
| 84 |
|
| 85 |
+
def get_enabled_units(self, units: list[ControlNetUnit]): # Parse dict from API calls
|
| 86 |
+
units = [ControlNetUnit.from_dict(unit) if isinstance(unit, dict) else unit for unit in units]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
assert all(isinstance(unit, ControlNetUnit) for unit in units)
|
| 88 |
enabled_units = [x for x in units if x.enabled]
|
| 89 |
return enabled_units
|
| 90 |
|
| 91 |
@staticmethod
|
| 92 |
+
def try_crop_image_with_a1111_mask(p: StableDiffusionProcessing, input_image: np.ndarray, resize_mode: external_code.ResizeMode, preprocessor: "Preprocessor") -> np.ndarray:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
a1111_mask_image: Optional[Image.Image] = getattr(p, "image_mask", None)
|
| 94 |
+
is_only_masked_inpaint: bool = issubclass(type(p), StableDiffusionProcessingImg2Img) and p.inpaint_full_res and a1111_mask_image is not None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
+
if preprocessor.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab and is_only_masked_inpaint:
|
|
|
|
|
|
|
|
|
|
| 97 |
logger.info("Crop input image based on A1111 mask.")
|
| 98 |
input_image = [input_image[:, :, i] for i in range(input_image.shape[2])]
|
| 99 |
input_image = [Image.fromarray(x) for x in input_image]
|
| 100 |
|
| 101 |
mask = prepare_mask(a1111_mask_image, p)
|
| 102 |
|
| 103 |
+
crop_region = masking.get_crop_region(np.array(mask), p.inpaint_full_res_padding)
|
| 104 |
+
crop_region = masking.expand_crop_region(crop_region, p.width, p.height, mask.width, mask.height)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
+
input_image = [images.resize_image(resize_mode.int_value(), i, mask.width, mask.height) for i in input_image]
|
|
|
|
|
|
|
|
|
|
| 107 |
input_image = [x.crop(crop_region) for x in input_image]
|
| 108 |
+
input_image = [images.resize_image(external_code.ResizeMode.OUTER_FIT.int_value(), x, p.width, p.height) for x in input_image]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
input_image = [np.asarray(x)[:, :, 0] for x in input_image]
|
| 110 |
input_image = np.stack(input_image, axis=2)
|
| 111 |
|
| 112 |
return input_image
|
| 113 |
|
| 114 |
+
def get_input_data(self, p: StableDiffusionProcessing, unit: ControlNetUnit, preprocessor: "Preprocessor", h: int, w: int):
|
| 115 |
resize_mode = external_code.resize_mode_from_value(unit.resize_mode)
|
| 116 |
image_list = []
|
| 117 |
|
|
|
|
| 120 |
a1111_i2i_image = getattr(p, "init_images", [None])[0]
|
| 121 |
a1111_i2i_mask = getattr(p, "image_mask", None)
|
| 122 |
|
| 123 |
+
if a1111_i2i_mask is not None and getattr(p, "inpainting_mask_invert", False):
|
| 124 |
+
a1111_i2i_mask = ImageOps.invert(a1111_i2i_mask)
|
| 125 |
+
|
| 126 |
using_a1111_data = False
|
| 127 |
|
| 128 |
if unit.image is None:
|
|
|
|
| 162 |
(image.shape[1], image.shape[0]),
|
| 163 |
interpolation=cv2.INTER_NEAREST,
|
| 164 |
)
|
| 165 |
+
mask = self.try_crop_image_with_a1111_mask(p, mask, resize_mode, preprocessor)
|
|
|
|
|
|
|
| 166 |
|
| 167 |
image_list = [[image, mask]]
|
| 168 |
|
| 169 |
+
if resize_mode == external_code.ResizeMode.OUTER_FIT and preprocessor.expand_mask_when_resize_and_fill:
|
|
|
|
|
|
|
|
|
|
| 170 |
new_image_list = []
|
| 171 |
for input_image, input_mask in image_list:
|
| 172 |
if input_mask is None:
|
|
|
|
| 191 |
return image_list, resize_mode
|
| 192 |
|
| 193 |
@staticmethod
|
| 194 |
+
def get_target_dimensions(p: StableDiffusionProcessing) -> tuple[int, int, int, int]:
|
|
|
|
|
|
|
| 195 |
"""Returns (h, w, hr_h, hr_w)."""
|
| 196 |
h = align_dim_latent(p.height)
|
| 197 |
w = align_dim_latent(p.width)
|
| 198 |
|
| 199 |
+
high_res_fix = getattr(p, "enable_hr", False) and isinstance(p, StableDiffusionProcessingTxt2Img)
|
|
|
|
|
|
|
| 200 |
|
| 201 |
if high_res_fix:
|
| 202 |
if p.hr_resize_x == 0 and p.hr_resize_y == 0:
|
|
|
|
| 213 |
return h, w, hr_y, hr_x
|
| 214 |
|
| 215 |
@torch.no_grad()
|
| 216 |
+
def process_unit_after_click_generate(self, p: StableDiffusionProcessing, unit: ControlNetUnit, params: ControlNetCachedParameters, *args, **kwargs) -> bool:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
|
| 218 |
h, w, hr_y, hr_x = self.get_target_dimensions(p)
|
| 219 |
|
| 220 |
+
has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(p, "enable_hr", False)
|
|
|
|
|
|
|
| 221 |
|
| 222 |
if unit.use_preview_as_input:
|
| 223 |
unit.module = "None"
|
|
|
|
| 268 |
control_masks.append(input_mask)
|
| 269 |
|
| 270 |
if len(input_list) > 1 and not preprocessor_output_is_image:
|
| 271 |
+
logger.info("Batch wise input only support controlnet, control-lora, and t2i adapters!")
|
|
|
|
|
|
|
| 272 |
break
|
| 273 |
|
| 274 |
if has_high_res_fix:
|
|
|
|
| 279 |
alignment_indices = [i % len(preprocessor_outputs) for i in range(p.batch_size)]
|
| 280 |
|
| 281 |
def attach_extra_result_image(img: np.ndarray, is_high_res: bool = False):
|
| 282 |
+
if not shared.opts.data.get("control_net_no_detectmap", False) and ((is_high_res and hr_option.high_res_enabled) or (not is_high_res and hr_option.low_res_enabled)) and unit.save_detected_map:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 283 |
p.extra_result_images.append(img)
|
| 284 |
|
| 285 |
if preprocessor_output_is_image:
|
|
|
|
| 287 |
params.control_cond_for_hr_fix = []
|
| 288 |
|
| 289 |
for preprocessor_output in preprocessor_outputs:
|
| 290 |
+
control_cond = crop_and_resize_image(preprocessor_output, resize_mode, h, w)
|
| 291 |
+
attach_extra_result_image(external_code.visualize_inpaint_mask(control_cond))
|
| 292 |
+
params.control_cond.append(numpy_to_pytorch(control_cond).movedim(-1, 1))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
|
| 294 |
+
params.control_cond = torch.cat(params.control_cond, dim=0)[alignment_indices].contiguous()
|
|
|
|
|
|
|
| 295 |
|
| 296 |
if has_high_res_fix:
|
| 297 |
for preprocessor_output in preprocessor_outputs:
|
| 298 |
+
control_cond_for_hr_fix = crop_and_resize_image(preprocessor_output, resize_mode, hr_y, hr_x)
|
|
|
|
|
|
|
| 299 |
attach_extra_result_image(
|
| 300 |
external_code.visualize_inpaint_mask(control_cond_for_hr_fix),
|
| 301 |
is_high_res=True,
|
| 302 |
)
|
| 303 |
+
params.control_cond_for_hr_fix.append(numpy_to_pytorch(control_cond_for_hr_fix).movedim(-1, 1))
|
| 304 |
+
params.control_cond_for_hr_fix = torch.cat(params.control_cond_for_hr_fix, dim=0)[alignment_indices].contiguous()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 305 |
else:
|
| 306 |
params.control_cond_for_hr_fix = params.control_cond
|
| 307 |
else:
|
|
|
|
| 315 |
|
| 316 |
for input_mask in control_masks:
|
| 317 |
fill_border = preprocessor.fill_mask_with_one_when_resize_and_fill
|
| 318 |
+
control_mask = crop_and_resize_image(input_mask, resize_mode, h, w, fill_border)
|
|
|
|
|
|
|
| 319 |
attach_extra_result_image(control_mask)
|
| 320 |
control_mask = numpy_to_pytorch(control_mask).movedim(-1, 1)[:, :1]
|
| 321 |
params.control_mask.append(control_mask)
|
| 322 |
|
| 323 |
if has_high_res_fix:
|
| 324 |
+
control_mask_for_hr_fix = crop_and_resize_image(input_mask, resize_mode, hr_y, hr_x, fill_border)
|
|
|
|
|
|
|
| 325 |
attach_extra_result_image(control_mask_for_hr_fix, is_high_res=True)
|
| 326 |
+
control_mask_for_hr_fix = numpy_to_pytorch(control_mask_for_hr_fix).movedim(-1, 1)[:, :1]
|
|
|
|
|
|
|
| 327 |
params.control_mask_for_hr_fix.append(control_mask_for_hr_fix)
|
| 328 |
|
| 329 |
+
params.control_mask = torch.cat(params.control_mask, dim=0)[alignment_indices].contiguous()
|
|
|
|
|
|
|
| 330 |
if has_high_res_fix:
|
| 331 |
+
params.control_mask_for_hr_fix = torch.cat(params.control_mask_for_hr_fix, dim=0)[alignment_indices].contiguous()
|
|
|
|
|
|
|
| 332 |
else:
|
| 333 |
params.control_mask_for_hr_fix = params.control_mask
|
| 334 |
|
|
|
|
| 347 |
|
| 348 |
params.preprocessor = preprocessor
|
| 349 |
|
| 350 |
+
params.preprocessor.process_after_running_preprocessors(process=p, params=params, **kwargs)
|
| 351 |
+
params.model.process_after_running_preprocessors(process=p, params=params, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 352 |
|
| 353 |
logger.info(f"{type(params.model).__name__}: {model_filename}")
|
| 354 |
return True
|
| 355 |
|
| 356 |
@torch.no_grad()
|
| 357 |
+
def process_unit_before_every_sampling(self, p: StableDiffusionProcessing, unit: ControlNetUnit, params: ControlNetCachedParameters, *args, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 358 |
|
| 359 |
is_hr_pass = getattr(p, "is_hr_pass", False)
|
| 360 |
|
| 361 |
+
has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(p, "enable_hr", False)
|
|
|
|
|
|
|
| 362 |
|
| 363 |
if has_high_res_fix:
|
| 364 |
hr_option = HiResFixOption.from_value(unit.hr_option)
|
|
|
|
| 366 |
hr_option = HiResFixOption.BOTH
|
| 367 |
|
| 368 |
if has_high_res_fix and is_hr_pass and (not hr_option.high_res_enabled):
|
| 369 |
+
logger.info("ControlNet Skipped High-res pass.")
|
| 370 |
return
|
| 371 |
|
| 372 |
if has_high_res_fix and (not is_hr_pass) and (not hr_option.low_res_enabled):
|
| 373 |
+
logger.info("ControlNet Skipped Low-res pass.")
|
| 374 |
return
|
| 375 |
|
| 376 |
if is_hr_pass:
|
|
|
|
| 443 |
params.model.positive_advanced_weighting = soft_weighting.copy()
|
| 444 |
params.model.negative_advanced_weighting = soft_weighting.copy()
|
| 445 |
|
| 446 |
+
cond, mask = params.preprocessor.process_before_every_sampling(p, cond, mask, *args, **kwargs)
|
|
|
|
|
|
|
| 447 |
|
| 448 |
params.model.advanced_mask_weighting = mask
|
| 449 |
|
| 450 |
params.model.process_before_every_sampling(p, cond, mask, *args, **kwargs)
|
| 451 |
|
| 452 |
logger.info(f"ControlNet Method {params.preprocessor.name} patched.")
|
|
|
|
| 453 |
|
| 454 |
@staticmethod
|
| 455 |
def bound_check_params(unit: ControlNetUnit) -> None:
|
|
|
|
| 464 |
preprocessor = global_state.get_preprocessor(unit.module)
|
| 465 |
|
| 466 |
if unit.processor_res < 0:
|
| 467 |
+
unit.processor_res = int(preprocessor.slider_resolution.gradio_update_kwargs.get("value", 512))
|
|
|
|
|
|
|
|
|
|
| 468 |
if unit.threshold_a < 0:
|
| 469 |
+
unit.threshold_a = int(preprocessor.slider_1.gradio_update_kwargs.get("value", 1.0))
|
|
|
|
|
|
|
|
|
|
| 470 |
if unit.threshold_b < 0:
|
| 471 |
+
unit.threshold_b = int(preprocessor.slider_2.gradio_update_kwargs.get("value", 1.0))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 472 |
|
| 473 |
@torch.no_grad()
|
| 474 |
+
def process_unit_after_every_sampling(self, p: StableDiffusionProcessing, unit: ControlNetUnit, params: ControlNetCachedParameters, *args, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 475 |
params.preprocessor.process_after_every_sampling(p, params, *args, **kwargs)
|
| 476 |
params.model.process_after_every_sampling(p, params, *args, **kwargs)
|
|
|
|
| 477 |
|
| 478 |
@torch.no_grad()
|
| 479 |
def process(self, p, *args, **kwargs):
|
|
|
|
| 492 |
if i not in self.current_params:
|
| 493 |
logger.warning(f"ControlNet Unit {i + 1} is skipped...")
|
| 494 |
continue
|
| 495 |
+
self.process_unit_before_every_sampling(p, unit, self.current_params[i], *args, **kwargs)
|
|
|
|
|
|
|
| 496 |
|
| 497 |
@torch.no_grad()
|
| 498 |
def postprocess_batch_list(self, p, pp, *args, **kwargs):
|
| 499 |
for i, unit in enumerate(self.get_enabled_units(args)):
|
| 500 |
if i in self.current_params:
|
| 501 |
+
self.process_unit_after_every_sampling(p, unit, self.current_params[i], pp, *args, **kwargs)
|
|
|
|
|
|
|
| 502 |
|
| 503 |
+
def postprocess(self, *args):
|
| 504 |
self.current_params = {}
|
| 505 |
|
| 506 |
|
|
|
|
| 563 |
script_callbacks.on_infotext_pasted(Infotext.on_infotext_pasted)
|
| 564 |
script_callbacks.on_after_component(ControlNetUiGroup.on_after_component)
|
| 565 |
script_callbacks.on_before_reload(ControlNetUiGroup.reset)
|
| 566 |
+
|
| 567 |
+
if shared.cmd_opts.api:
|
| 568 |
+
script_callbacks.on_app_started(controlnet_api)
|
extensions-builtin/sd_forge_multidiffusion/lib_multidiffusion/tiled_diffusion.py
ADDED
|
@@ -0,0 +1,539 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 1st Edit by. https://github.com/shiimizu/ComfyUI-TiledDiffusion
|
| 2 |
+
# 2nd Edit by. Forge Official
|
| 3 |
+
# 3rd Edit by. Panchovix
|
| 4 |
+
# 4th Edit by. Haoming02
|
| 5 |
+
# - Based on: https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111
|
| 6 |
+
|
| 7 |
+
from enum import Enum
|
| 8 |
+
from typing import Callable, Final, Union
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
from numpy import exp, pi, sqrt
|
| 13 |
+
from torch import Tensor
|
| 14 |
+
|
| 15 |
+
from ldm_patched.modules.controlnet import ControlNet, T2IAdapter
|
| 16 |
+
from ldm_patched.modules.model_base import BaseModel
|
| 17 |
+
from ldm_patched.modules.model_management import current_loaded_models, get_torch_device, load_models_gpu
|
| 18 |
+
from ldm_patched.modules.model_patcher import ModelPatcher
|
| 19 |
+
from ldm_patched.modules.utils import common_upscale
|
| 20 |
+
|
| 21 |
+
opt_C: Final[int] = 4
|
| 22 |
+
opt_f: Final[int] = 8
|
| 23 |
+
device: Final[torch.device] = get_torch_device()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class BlendMode(Enum):
|
| 27 |
+
FOREGROUND = "Foreground"
|
| 28 |
+
BACKGROUND = "Background"
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class BBox:
|
| 32 |
+
def __init__(self, x: int, y: int, w: int, h: int):
|
| 33 |
+
self.x = x
|
| 34 |
+
self.y = y
|
| 35 |
+
self.w = w
|
| 36 |
+
self.h = h
|
| 37 |
+
self.box = [x, y, x + w, y + h]
|
| 38 |
+
self.slicer = slice(None), slice(None), slice(y, y + h), slice(x, x + w)
|
| 39 |
+
|
| 40 |
+
def __getitem__(self, idx: int) -> int:
|
| 41 |
+
return self.box[idx]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def processing_interrupted():
|
| 45 |
+
from modules import shared
|
| 46 |
+
|
| 47 |
+
return shared.state.interrupted or shared.state.skipped
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def ceildiv(big: int, small: int) -> int:
|
| 51 |
+
return -(big // -small)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def repeat_to_batch_size(tensor: torch.Tensor, batch_size: int, dim: int = 0):
|
| 55 |
+
if dim == 0 and tensor.shape[dim] == 1:
|
| 56 |
+
return tensor.expand([batch_size] + [-1] * (len(tensor.shape) - 1))
|
| 57 |
+
if tensor.shape[dim] > batch_size:
|
| 58 |
+
return tensor.narrow(dim, 0, batch_size)
|
| 59 |
+
elif tensor.shape[dim] < batch_size:
|
| 60 |
+
return tensor.repeat(dim * [1] + [ceildiv(batch_size, tensor.shape[dim])] + [1] * (len(tensor.shape) - 1 - dim)).narrow(dim, 0, batch_size)
|
| 61 |
+
return tensor
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def split_bboxes(w: int, h: int, tile_w: int, tile_h: int, overlap: int = 16, init_weight: Union[Tensor, float] = 1.0) -> tuple[list[BBox], Tensor]:
|
| 65 |
+
cols = ceildiv((w - overlap), (tile_w - overlap))
|
| 66 |
+
rows = ceildiv((h - overlap), (tile_h - overlap))
|
| 67 |
+
dx = (w - tile_w) / (cols - 1) if cols > 1 else 0
|
| 68 |
+
dy = (h - tile_h) / (rows - 1) if rows > 1 else 0
|
| 69 |
+
|
| 70 |
+
bbox_list: list[BBox] = []
|
| 71 |
+
weight = torch.zeros((1, 1, h, w), device=device, dtype=torch.float32)
|
| 72 |
+
for row in range(rows):
|
| 73 |
+
y = min(int(row * dy), h - tile_h)
|
| 74 |
+
for col in range(cols):
|
| 75 |
+
x = min(int(col * dx), w - tile_w)
|
| 76 |
+
|
| 77 |
+
bbox = BBox(x, y, tile_w, tile_h)
|
| 78 |
+
bbox_list.append(bbox)
|
| 79 |
+
weight[bbox.slicer] += init_weight
|
| 80 |
+
|
| 81 |
+
return bbox_list, weight
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class AbstractDiffusion:
|
| 85 |
+
def __init__(self):
|
| 86 |
+
self.method = self.__class__.__name__
|
| 87 |
+
|
| 88 |
+
self.w: int = 0
|
| 89 |
+
self.h: int = 0
|
| 90 |
+
self.tile_width: int = None
|
| 91 |
+
self.tile_height: int = None
|
| 92 |
+
self.tile_overlap: int = None
|
| 93 |
+
self.tile_batch_size: int = None
|
| 94 |
+
|
| 95 |
+
self.x_buffer: Tensor = None
|
| 96 |
+
self._weights: Tensor = None
|
| 97 |
+
self._init_grid_bbox = None
|
| 98 |
+
self._init_done = None
|
| 99 |
+
|
| 100 |
+
self.step_count = 0
|
| 101 |
+
self.inner_loop_count = 0
|
| 102 |
+
self.kdiff_step = -1
|
| 103 |
+
|
| 104 |
+
self.enable_grid_bbox: bool = False
|
| 105 |
+
self.tile_w: int = None
|
| 106 |
+
self.tile_h: int = None
|
| 107 |
+
self.tile_bs: int = None
|
| 108 |
+
self.num_tiles: int = None
|
| 109 |
+
self.num_batches: int = None
|
| 110 |
+
self.batched_bboxes: list[list[BBox]] = []
|
| 111 |
+
|
| 112 |
+
self.enable_controlnet: bool = False
|
| 113 |
+
self.control_tensor_batch_dict = {}
|
| 114 |
+
self.control_tensor_batch: list[list[Tensor]] = [[]]
|
| 115 |
+
self.control_params: dict[tuple, list[list[Tensor]]] = {}
|
| 116 |
+
self.control_tensor_cpu: bool = False
|
| 117 |
+
self.control_tensor_custom: list[list[Tensor]] = []
|
| 118 |
+
|
| 119 |
+
self.refresh = False
|
| 120 |
+
self.weights = None
|
| 121 |
+
|
| 122 |
+
def reset(self):
|
| 123 |
+
tile_width = self.tile_width
|
| 124 |
+
tile_height = self.tile_height
|
| 125 |
+
tile_overlap = self.tile_overlap
|
| 126 |
+
tile_batch_size = self.tile_batch_size
|
| 127 |
+
compression = self.compression
|
| 128 |
+
width = self.width
|
| 129 |
+
height = self.height
|
| 130 |
+
overlap = self.overlap
|
| 131 |
+
self.__init__()
|
| 132 |
+
self.compression = compression
|
| 133 |
+
self.width = width
|
| 134 |
+
self.height = height
|
| 135 |
+
self.overlap = overlap
|
| 136 |
+
self.tile_width = tile_width
|
| 137 |
+
self.tile_height = tile_height
|
| 138 |
+
self.tile_overlap = tile_overlap
|
| 139 |
+
self.tile_batch_size = tile_batch_size
|
| 140 |
+
|
| 141 |
+
def repeat_tensor(self, x: Tensor, n: int, concat=False, concat_to=0) -> Tensor:
|
| 142 |
+
"""repeat the tensor on it's first dim"""
|
| 143 |
+
if n == 1:
|
| 144 |
+
return x
|
| 145 |
+
B = x.shape[0]
|
| 146 |
+
r_dims = len(x.shape) - 1
|
| 147 |
+
if B == 1:
|
| 148 |
+
shape = [n] + [-1] * r_dims
|
| 149 |
+
return x.expand(shape)
|
| 150 |
+
else:
|
| 151 |
+
if concat:
|
| 152 |
+
return torch.cat([x for _ in range(n)], dim=0)[:concat_to]
|
| 153 |
+
shape = [n] + [1] * r_dims
|
| 154 |
+
return x.repeat(shape)
|
| 155 |
+
|
| 156 |
+
def reset_buffer(self, x_in: Tensor):
|
| 157 |
+
if self.x_buffer is None or self.x_buffer.shape != x_in.shape:
|
| 158 |
+
self.x_buffer = torch.zeros_like(x_in, device=x_in.device, dtype=x_in.dtype)
|
| 159 |
+
else:
|
| 160 |
+
self.x_buffer.zero_()
|
| 161 |
+
|
| 162 |
+
def init_grid_bbox(self, tile_w: int, tile_h: int, overlap: int, tile_bs: int):
|
| 163 |
+
self.weights = torch.zeros((1, 1, self.h, self.w), device=device, dtype=torch.float32)
|
| 164 |
+
self.enable_grid_bbox = True
|
| 165 |
+
|
| 166 |
+
self.tile_w = min(tile_w, self.w)
|
| 167 |
+
self.tile_h = min(tile_h, self.h)
|
| 168 |
+
overlap = max(0, min(overlap, min(tile_w, tile_h) - 4))
|
| 169 |
+
bboxes, weights = split_bboxes(self.w, self.h, self.tile_w, self.tile_h, overlap, self.get_tile_weights())
|
| 170 |
+
self.weights += weights
|
| 171 |
+
self.num_tiles = len(bboxes)
|
| 172 |
+
self.num_batches = ceildiv(self.num_tiles, tile_bs)
|
| 173 |
+
self.tile_bs = ceildiv(len(bboxes), self.num_batches)
|
| 174 |
+
self.batched_bboxes = [bboxes[i * self.tile_bs : (i + 1) * self.tile_bs] for i in range(self.num_batches)]
|
| 175 |
+
|
| 176 |
+
def get_grid_bbox(self, tile_w: int, tile_h: int, overlap: int, tile_bs: int, w: int, h: int, device: torch.device, get_tile_weights: Callable = lambda: 1.0) -> list[list[BBox]]:
|
| 177 |
+
weights = torch.zeros((1, 1, h, w), device=device, dtype=torch.float32)
|
| 178 |
+
|
| 179 |
+
tile_w = min(tile_w, w)
|
| 180 |
+
tile_h = min(tile_h, h)
|
| 181 |
+
overlap = max(0, min(overlap, min(tile_w, tile_h) - 4))
|
| 182 |
+
bboxes, weights_ = split_bboxes(w, h, tile_w, tile_h, overlap, get_tile_weights())
|
| 183 |
+
weights += weights_
|
| 184 |
+
num_tiles = len(bboxes)
|
| 185 |
+
num_batches = ceildiv(num_tiles, tile_bs)
|
| 186 |
+
tile_bs = ceildiv(len(bboxes), num_batches)
|
| 187 |
+
batched_bboxes = [bboxes[i * tile_bs : (i + 1) * tile_bs] for i in range(num_batches)]
|
| 188 |
+
return batched_bboxes
|
| 189 |
+
|
| 190 |
+
def get_tile_weights(self) -> Union[Tensor, float]:
|
| 191 |
+
return 1.0
|
| 192 |
+
|
| 193 |
+
def init_noise_inverse(self, steps: int, retouch: float, get_cache_callback, set_cache_callback, renoise_strength: float, renoise_kernel: int):
|
| 194 |
+
self.noise_inverse_enabled = True
|
| 195 |
+
self.noise_inverse_steps = steps
|
| 196 |
+
self.noise_inverse_retouch = float(retouch)
|
| 197 |
+
self.noise_inverse_renoise_strength = float(renoise_strength)
|
| 198 |
+
self.noise_inverse_renoise_kernel = int(renoise_kernel)
|
| 199 |
+
self.noise_inverse_set_cache = set_cache_callback
|
| 200 |
+
self.noise_inverse_get_cache = get_cache_callback
|
| 201 |
+
|
| 202 |
+
def init_done(self):
|
| 203 |
+
"""
|
| 204 |
+
Call this after all `init_*`, settings are done, now perform:
|
| 205 |
+
- settings sanity check
|
| 206 |
+
- pre-computations, cache init
|
| 207 |
+
- anything thing needed before denoising starts
|
| 208 |
+
"""
|
| 209 |
+
|
| 210 |
+
self.total_bboxes = 0
|
| 211 |
+
if self.enable_grid_bbox:
|
| 212 |
+
self.total_bboxes += self.num_batches
|
| 213 |
+
assert self.total_bboxes > 0, "Nothing to paint! No background to draw and no custom bboxes were provided."
|
| 214 |
+
|
| 215 |
+
def prepare_controlnet_tensors(self, refresh: bool = False, tensor=None):
|
| 216 |
+
"""Crop the control tensor into tiles and cache them"""
|
| 217 |
+
if not refresh:
|
| 218 |
+
if self.control_tensor_batch is not None or self.control_params is not None:
|
| 219 |
+
return
|
| 220 |
+
tensors = [tensor]
|
| 221 |
+
self.org_control_tensor_batch = tensors
|
| 222 |
+
self.control_tensor_batch = []
|
| 223 |
+
for i in range(len(tensors)):
|
| 224 |
+
control_tile_list = []
|
| 225 |
+
control_tensor = tensors[i]
|
| 226 |
+
for bboxes in self.batched_bboxes:
|
| 227 |
+
single_batch_tensors = []
|
| 228 |
+
for bbox in bboxes:
|
| 229 |
+
if len(control_tensor.shape) == 3:
|
| 230 |
+
control_tensor.unsqueeze_(0)
|
| 231 |
+
control_tile = control_tensor[:, :, bbox[1] * opt_f : bbox[3] * opt_f, bbox[0] * opt_f : bbox[2] * opt_f]
|
| 232 |
+
single_batch_tensors.append(control_tile)
|
| 233 |
+
control_tile = torch.cat(single_batch_tensors, dim=0)
|
| 234 |
+
if self.control_tensor_cpu:
|
| 235 |
+
control_tile = control_tile.cpu()
|
| 236 |
+
control_tile_list.append(control_tile)
|
| 237 |
+
self.control_tensor_batch.append(control_tile_list)
|
| 238 |
+
|
| 239 |
+
def switch_controlnet_tensors(self, batch_id: int, x_batch_size: int, tile_batch_size: int, is_denoise=False):
|
| 240 |
+
if self.control_tensor_batch is None:
|
| 241 |
+
return
|
| 242 |
+
|
| 243 |
+
for param_id in range(len(self.control_tensor_batch)):
|
| 244 |
+
control_tile = self.control_tensor_batch[param_id][batch_id]
|
| 245 |
+
if x_batch_size > 1:
|
| 246 |
+
all_control_tile = []
|
| 247 |
+
for i in range(tile_batch_size):
|
| 248 |
+
this_control_tile = [control_tile[i].unsqueeze(0)] * x_batch_size
|
| 249 |
+
all_control_tile.append(torch.cat(this_control_tile, dim=0))
|
| 250 |
+
control_tile = torch.cat(all_control_tile, dim=0)
|
| 251 |
+
self.control_tensor_batch[param_id][batch_id] = control_tile
|
| 252 |
+
|
| 253 |
+
def process_controlnet(self, x_noisy, c_in: dict, cond_or_uncond: list, bboxes, batch_size: int, batch_id: int, shifts=None, shift_condition=None):
|
| 254 |
+
control: ControlNet = c_in["control"]
|
| 255 |
+
param_id = -1
|
| 256 |
+
tuple_key = tuple(cond_or_uncond) + tuple(x_noisy.shape)
|
| 257 |
+
while control is not None:
|
| 258 |
+
param_id += 1
|
| 259 |
+
|
| 260 |
+
if tuple_key not in self.control_params:
|
| 261 |
+
self.control_params[tuple_key] = [[None]]
|
| 262 |
+
|
| 263 |
+
while len(self.control_params[tuple_key]) <= param_id:
|
| 264 |
+
self.control_params[tuple_key].append([None])
|
| 265 |
+
|
| 266 |
+
while len(self.control_params[tuple_key][param_id]) <= batch_id:
|
| 267 |
+
self.control_params[tuple_key][param_id].append(None)
|
| 268 |
+
|
| 269 |
+
if self.refresh or control.cond_hint is None or not isinstance(self.control_params[tuple_key][param_id][batch_id], Tensor):
|
| 270 |
+
if control.cond_hint is not None:
|
| 271 |
+
del control.cond_hint
|
| 272 |
+
control.cond_hint = None
|
| 273 |
+
compression_ratio = control.compression_ratio
|
| 274 |
+
if control.vae is not None:
|
| 275 |
+
compression_ratio *= control.vae.downscale_ratio
|
| 276 |
+
else:
|
| 277 |
+
if control.latent_format is not None:
|
| 278 |
+
raise ValueError("This Controlnet needs a VAE but none was provided, please use a ControlNetApply node with a VAE input and connect it.")
|
| 279 |
+
PH, PW = self.h * compression_ratio, self.w * compression_ratio
|
| 280 |
+
|
| 281 |
+
device = getattr(control, "device", x_noisy.device)
|
| 282 |
+
dtype = getattr(control, "manual_cast_dtype", None)
|
| 283 |
+
if dtype is None:
|
| 284 |
+
dtype = getattr(getattr(control, "control_model", None), "dtype", None)
|
| 285 |
+
if dtype is None:
|
| 286 |
+
dtype = x_noisy.dtype
|
| 287 |
+
|
| 288 |
+
if isinstance(control, T2IAdapter):
|
| 289 |
+
width, height = control.scale_image_to(PW, PH)
|
| 290 |
+
cns = common_upscale(control.cond_hint_original, width, height, control.upscale_algorithm, "center").float().to(device=device)
|
| 291 |
+
if control.channels_in == 1 and control.cond_hint.shape[1] > 1:
|
| 292 |
+
cns = torch.mean(control.cond_hint, 1, keepdim=True)
|
| 293 |
+
elif control.__class__.__name__ == "ControlLLLiteAdvanced":
|
| 294 |
+
if getattr(control, "sub_idxs", None) is not None and control.cond_hint_original.shape[0] >= control.full_latent_length:
|
| 295 |
+
cns = common_upscale(control.cond_hint_original[control.sub_idxs], PW, PH, control.upscale_algorithm, "center").to(dtype=dtype, device=device)
|
| 296 |
+
else:
|
| 297 |
+
cns = common_upscale(control.cond_hint_original, PW, PH, control.upscale_algorithm, "center").to(dtype=dtype, device=device)
|
| 298 |
+
else:
|
| 299 |
+
cns = common_upscale(control.cond_hint_original, PW, PH, control.upscale_algorithm, "center").to(dtype=dtype, device=device)
|
| 300 |
+
if getattr(control, "vae", None) is not None:
|
| 301 |
+
loaded_models_ = current_loaded_models(only_currently_used=True)
|
| 302 |
+
cns = control.vae.encode(cns.movedim(1, -1))
|
| 303 |
+
load_models_gpu(loaded_models_)
|
| 304 |
+
if getattr(control, "latent_format", None) is not None:
|
| 305 |
+
cns = control.latent_format.process_in(cns)
|
| 306 |
+
if len(getattr(control, "extra_concat_orig", ())) > 0:
|
| 307 |
+
to_concat = []
|
| 308 |
+
for c in control.extra_concat_orig:
|
| 309 |
+
c = c.to(device=device)
|
| 310 |
+
c = common_upscale(c, cns.shape[3], cns.shape[2], control.upscale_algorithm, "center")
|
| 311 |
+
to_concat.append(repeat_to_batch_size(c, cns.shape[0]))
|
| 312 |
+
cns = torch.cat([cns] + to_concat, dim=1)
|
| 313 |
+
|
| 314 |
+
cns = cns.to(device=device, dtype=dtype)
|
| 315 |
+
cf = control.compression_ratio
|
| 316 |
+
if cns.shape[0] != batch_size:
|
| 317 |
+
cns = repeat_to_batch_size(cns, batch_size)
|
| 318 |
+
if shifts is not None:
|
| 319 |
+
control.cns = cns
|
| 320 |
+
sh_h, sh_w = shifts
|
| 321 |
+
sh_h *= cf
|
| 322 |
+
sh_w *= cf
|
| 323 |
+
if (sh_h, sh_w) != (0, 0):
|
| 324 |
+
if sh_h == 0 or sh_w == 0:
|
| 325 |
+
cns = control.cns.roll(shifts=(sh_h, sh_w), dims=(-2, -1))
|
| 326 |
+
else:
|
| 327 |
+
if shift_condition:
|
| 328 |
+
cns = control.cns.roll(shifts=sh_h, dims=-2)
|
| 329 |
+
else:
|
| 330 |
+
cns = control.cns.roll(shifts=sh_w, dims=-1)
|
| 331 |
+
cns_slices = [cns[:, :, bbox[1] * cf : bbox[3] * cf, bbox[0] * cf : bbox[2] * cf] for bbox in bboxes]
|
| 332 |
+
control.cond_hint = torch.cat(cns_slices, dim=0).to(device=cns.device)
|
| 333 |
+
del cns_slices
|
| 334 |
+
del cns
|
| 335 |
+
self.control_params[tuple_key][param_id][batch_id] = control.cond_hint
|
| 336 |
+
else:
|
| 337 |
+
if hasattr(control, "cns") and shifts is not None:
|
| 338 |
+
cf = control.compression_ratio
|
| 339 |
+
cns = control.cns
|
| 340 |
+
sh_h, sh_w = shifts
|
| 341 |
+
sh_h *= cf
|
| 342 |
+
sh_w *= cf
|
| 343 |
+
if (sh_h, sh_w) != (0, 0):
|
| 344 |
+
if sh_h == 0 or sh_w == 0:
|
| 345 |
+
cns = control.cns.roll(shifts=(sh_h, sh_w), dims=(-2, -1))
|
| 346 |
+
else:
|
| 347 |
+
if shift_condition:
|
| 348 |
+
cns = control.cns.roll(shifts=sh_h, dims=-2)
|
| 349 |
+
else:
|
| 350 |
+
cns = control.cns.roll(shifts=sh_w, dims=-1)
|
| 351 |
+
cns_slices = [cns[:, :, bbox[1] * cf : bbox[3] * cf, bbox[0] * cf : bbox[2] * cf] for bbox in bboxes]
|
| 352 |
+
control.cond_hint = torch.cat(cns_slices, dim=0).to(device=cns.device)
|
| 353 |
+
del cns_slices
|
| 354 |
+
del cns
|
| 355 |
+
else:
|
| 356 |
+
control.cond_hint = self.control_params[tuple_key][param_id][batch_id]
|
| 357 |
+
control = control.previous_controlnet
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
class MultiDiffusion(AbstractDiffusion):
|
| 361 |
+
|
| 362 |
+
@torch.inference_mode()
|
| 363 |
+
def __call__(self, model_function: BaseModel.apply_model, args: dict):
|
| 364 |
+
x_in: Tensor = args["input"]
|
| 365 |
+
t_in: Tensor = args["timestep"]
|
| 366 |
+
c_in: dict = args["c"]
|
| 367 |
+
cond_or_uncond: list = args["cond_or_uncond"]
|
| 368 |
+
|
| 369 |
+
N, C, H, W = x_in.shape
|
| 370 |
+
|
| 371 |
+
self.refresh = False
|
| 372 |
+
if self.weights is None or self.h != H or self.w != W:
|
| 373 |
+
self.h, self.w = H, W
|
| 374 |
+
self.refresh = True
|
| 375 |
+
self.init_grid_bbox(self.tile_width, self.tile_height, self.tile_overlap, self.tile_batch_size)
|
| 376 |
+
self.init_done()
|
| 377 |
+
self.h, self.w = H, W
|
| 378 |
+
self.reset_buffer(x_in)
|
| 379 |
+
|
| 380 |
+
for batch_id, bboxes in enumerate(self.batched_bboxes):
|
| 381 |
+
if processing_interrupted():
|
| 382 |
+
return x_in
|
| 383 |
+
|
| 384 |
+
x_tile = torch.cat([x_in[bbox.slicer] for bbox in bboxes], dim=0)
|
| 385 |
+
t_tile = repeat_to_batch_size(t_in, x_tile.shape[0])
|
| 386 |
+
c_tile = {}
|
| 387 |
+
for k, v in c_in.items():
|
| 388 |
+
if isinstance(v, torch.Tensor):
|
| 389 |
+
if len(v.shape) == len(x_tile.shape):
|
| 390 |
+
bboxes_ = bboxes
|
| 391 |
+
if v.shape[-2:] != x_in.shape[-2:]:
|
| 392 |
+
cf = x_in.shape[-1] * self.compression // v.shape[-1]
|
| 393 |
+
bboxes_ = self.get_grid_bbox(
|
| 394 |
+
self.width // cf,
|
| 395 |
+
self.height // cf,
|
| 396 |
+
self.overlap // cf,
|
| 397 |
+
self.tile_batch_size,
|
| 398 |
+
v.shape[-1],
|
| 399 |
+
v.shape[-2],
|
| 400 |
+
x_in.device,
|
| 401 |
+
self.get_tile_weights,
|
| 402 |
+
)
|
| 403 |
+
v = torch.cat([v[bbox_.slicer] for bbox_ in bboxes_[batch_id]])
|
| 404 |
+
if v.shape[0] != x_tile.shape[0]:
|
| 405 |
+
v = repeat_to_batch_size(v, x_tile.shape[0])
|
| 406 |
+
c_tile[k] = v
|
| 407 |
+
|
| 408 |
+
if "control" in c_in:
|
| 409 |
+
self.process_controlnet(x_tile, c_in, cond_or_uncond, bboxes, N, batch_id)
|
| 410 |
+
c_tile["control"] = c_in["control"].get_control_orig(x_tile, t_tile, c_tile, len(cond_or_uncond))
|
| 411 |
+
|
| 412 |
+
x_tile_out = model_function(x_tile, t_tile, **c_tile)
|
| 413 |
+
|
| 414 |
+
for i, bbox in enumerate(bboxes):
|
| 415 |
+
self.x_buffer[bbox.slicer] += x_tile_out[i * N : (i + 1) * N, :, :, :]
|
| 416 |
+
del x_tile_out, x_tile, t_tile, c_tile
|
| 417 |
+
|
| 418 |
+
return torch.where(self.weights > 1, self.x_buffer / self.weights, self.x_buffer)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
class MixtureOfDiffusers(AbstractDiffusion):
|
| 422 |
+
"""
|
| 423 |
+
Mixture-of-Diffusers Implementation
|
| 424 |
+
https://github.com/albarji/mixture-of-diffusers
|
| 425 |
+
"""
|
| 426 |
+
|
| 427 |
+
def init_done(self):
|
| 428 |
+
super().init_done()
|
| 429 |
+
self.rescale_factor = 1 / self.weights
|
| 430 |
+
|
| 431 |
+
@staticmethod
|
| 432 |
+
def get_weight(tile_w: int, tile_h: int) -> Tensor:
|
| 433 |
+
"""
|
| 434 |
+
Copy from the original implementation of Mixture of Diffusers
|
| 435 |
+
https://github.com/albarji/mixture-of-diffusers/blob/master/mixdiff/tiling.py
|
| 436 |
+
This generates gaussian weights to smooth the noise of each tile.
|
| 437 |
+
This is critical for this method to work.
|
| 438 |
+
"""
|
| 439 |
+
f = lambda x, midpoint, var=0.01: exp(-(x - midpoint) * (x - midpoint) / (tile_w * tile_w) / (2 * var)) / sqrt(2 * pi * var)
|
| 440 |
+
x_probs = [f(x, (tile_w - 1) / 2) for x in range(tile_w)]
|
| 441 |
+
y_probs = [f(y, tile_h / 2) for y in range(tile_h)]
|
| 442 |
+
|
| 443 |
+
w = np.outer(y_probs, x_probs)
|
| 444 |
+
return torch.from_numpy(w).to(device, dtype=torch.float32)
|
| 445 |
+
|
| 446 |
+
def get_tile_weights(self) -> Tensor:
|
| 447 |
+
self.tile_weights = self.get_weight(self.tile_w, self.tile_h)
|
| 448 |
+
return self.tile_weights
|
| 449 |
+
|
| 450 |
+
@torch.inference_mode()
|
| 451 |
+
def __call__(self, model_function: BaseModel.apply_model, args: dict):
|
| 452 |
+
x_in: Tensor = args["input"]
|
| 453 |
+
t_in: Tensor = args["timestep"]
|
| 454 |
+
c_in: dict = args["c"]
|
| 455 |
+
cond_or_uncond: list = args["cond_or_uncond"]
|
| 456 |
+
|
| 457 |
+
N, C, H, W = x_in.shape
|
| 458 |
+
|
| 459 |
+
self.refresh = False
|
| 460 |
+
if self.weights is None or self.h != H or self.w != W:
|
| 461 |
+
self.h, self.w = H, W
|
| 462 |
+
self.refresh = True
|
| 463 |
+
self.init_grid_bbox(self.tile_width, self.tile_height, self.tile_overlap, self.tile_batch_size)
|
| 464 |
+
self.init_done()
|
| 465 |
+
self.h, self.w = H, W
|
| 466 |
+
self.reset_buffer(x_in)
|
| 467 |
+
|
| 468 |
+
for batch_id, bboxes in enumerate(self.batched_bboxes):
|
| 469 |
+
if processing_interrupted():
|
| 470 |
+
return x_in
|
| 471 |
+
x_tile_list = []
|
| 472 |
+
for bbox in bboxes:
|
| 473 |
+
x_tile_list.append(x_in[bbox.slicer])
|
| 474 |
+
|
| 475 |
+
x_tile = torch.cat(x_tile_list, dim=0)
|
| 476 |
+
t_tile = repeat_to_batch_size(t_in, x_tile.shape[0])
|
| 477 |
+
c_tile = {}
|
| 478 |
+
for k, v in c_in.items():
|
| 479 |
+
if isinstance(v, torch.Tensor):
|
| 480 |
+
if len(v.shape) == len(x_tile.shape):
|
| 481 |
+
bboxes_ = bboxes
|
| 482 |
+
if v.shape[-2:] != x_in.shape[-2:]:
|
| 483 |
+
cf = x_in.shape[-1] * self.compression // v.shape[-1]
|
| 484 |
+
bboxes_ = self.get_grid_bbox(
|
| 485 |
+
(tile_w := self.width // cf),
|
| 486 |
+
(tile_h := self.height // cf),
|
| 487 |
+
self.overlap // cf,
|
| 488 |
+
self.tile_batch_size,
|
| 489 |
+
v.shape[-1],
|
| 490 |
+
v.shape[-2],
|
| 491 |
+
x_in.device,
|
| 492 |
+
lambda: self.get_weight(tile_w, tile_h),
|
| 493 |
+
)
|
| 494 |
+
v = torch.cat([v[bbox_.slicer] for bbox_ in bboxes_[batch_id]])
|
| 495 |
+
if v.shape[0] != x_tile.shape[0]:
|
| 496 |
+
v = repeat_to_batch_size(v, x_tile.shape[0])
|
| 497 |
+
c_tile[k] = v
|
| 498 |
+
|
| 499 |
+
if "control" in c_in:
|
| 500 |
+
self.process_controlnet(x_tile, c_in, cond_or_uncond, bboxes, N, batch_id)
|
| 501 |
+
c_tile["control"] = c_in["control"].get_control_orig(x_tile, t_tile, c_tile, len(cond_or_uncond))
|
| 502 |
+
|
| 503 |
+
x_tile_out = model_function(x_tile, t_tile, **c_tile)
|
| 504 |
+
|
| 505 |
+
for i, bbox in enumerate(bboxes):
|
| 506 |
+
w = self.tile_weights * self.rescale_factor[bbox.slicer]
|
| 507 |
+
self.x_buffer[bbox.slicer] += x_tile_out[i * N : (i + 1) * N, :, :, :] * w
|
| 508 |
+
del x_tile_out, x_tile, t_tile, c_tile
|
| 509 |
+
|
| 510 |
+
return self.x_buffer
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
class TiledDiffusion:
|
| 514 |
+
|
| 515 |
+
@staticmethod
|
| 516 |
+
def apply(model: ModelPatcher, method: str, tile_width: int, tile_height: int, tile_overlap: int, tile_batch_size: int):
|
| 517 |
+
match method:
|
| 518 |
+
case "MultiDiffusion":
|
| 519 |
+
impl = MultiDiffusion()
|
| 520 |
+
case "Mixture of Diffusers":
|
| 521 |
+
impl = MixtureOfDiffusers()
|
| 522 |
+
case _:
|
| 523 |
+
raise SystemError
|
| 524 |
+
|
| 525 |
+
compression = 8
|
| 526 |
+
impl.tile_width = tile_width // compression
|
| 527 |
+
impl.tile_height = tile_height // compression
|
| 528 |
+
impl.tile_overlap = tile_overlap // compression
|
| 529 |
+
impl.tile_batch_size = tile_batch_size
|
| 530 |
+
|
| 531 |
+
impl.compression = compression
|
| 532 |
+
impl.width = tile_width
|
| 533 |
+
impl.height = tile_height
|
| 534 |
+
impl.overlap = tile_overlap
|
| 535 |
+
|
| 536 |
+
model = model.clone()
|
| 537 |
+
model.set_model_unet_function_wrapper(impl)
|
| 538 |
+
|
| 539 |
+
return model
|
extensions-builtin/sd_forge_multidiffusion/scripts/forge_multidiffusion.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from lib_multidiffusion.tiled_diffusion import TiledDiffusion
|
| 3 |
+
|
| 4 |
+
from modules import scripts
|
| 5 |
+
from modules.ui_components import InputAccordion
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class MultiDiffusionForForge(scripts.Script):
|
| 9 |
+
sorting_priority = 16
|
| 10 |
+
|
| 11 |
+
def title(self):
|
| 12 |
+
return "MultiDiffusion Integrated"
|
| 13 |
+
|
| 14 |
+
def show(self, is_img2img):
|
| 15 |
+
return scripts.AlwaysVisible if is_img2img else None
|
| 16 |
+
|
| 17 |
+
def ui(self, *args, **kwargs):
|
| 18 |
+
with InputAccordion(False, label=self.title()) as enabled:
|
| 19 |
+
method = gr.Radio(label="Method", choices=("MultiDiffusion", "Mixture of Diffusers"), value="Mixture of Diffusers")
|
| 20 |
+
with gr.Row():
|
| 21 |
+
tile_width = gr.Slider(label="Tile Width", minimum=256, maximum=2048, step=64, value=768)
|
| 22 |
+
tile_height = gr.Slider(label="Tile Height", minimum=256, maximum=2048, step=64, value=768)
|
| 23 |
+
with gr.Row():
|
| 24 |
+
tile_overlap = gr.Slider(label="Tile Overlap", minimum=0, maximum=1024, step=16, value=64)
|
| 25 |
+
tile_batch_size = gr.Slider(label="Tile Batch Size", minimum=1, maximum=8, step=1, value=1)
|
| 26 |
+
|
| 27 |
+
return enabled, method, tile_width, tile_height, tile_overlap, tile_batch_size
|
| 28 |
+
|
| 29 |
+
def process_before_every_sampling(self, p, enabled: bool, method: str, tile_width: int, tile_height: int, tile_overlap: int, tile_batch_size: int, **kwargs):
|
| 30 |
+
if not enabled:
|
| 31 |
+
return
|
| 32 |
+
|
| 33 |
+
unet = p.sd_model.forge_objects.unet
|
| 34 |
+
unet = TiledDiffusion.apply(unet, method, tile_width, tile_height, tile_overlap, tile_batch_size)
|
| 35 |
+
p.sd_model.forge_objects.unet = unet
|
| 36 |
+
|
| 37 |
+
p.extra_generation_params.update(
|
| 38 |
+
{
|
| 39 |
+
"multidiffusion_enabled": enabled,
|
| 40 |
+
"multidiffusion_method": method,
|
| 41 |
+
"multidiffusion_tile_width": tile_width,
|
| 42 |
+
"multidiffusion_tile_height": tile_height,
|
| 43 |
+
"multidiffusion_tile_overlap": tile_overlap,
|
| 44 |
+
"multidiffusion_tile_batch_size": tile_batch_size,
|
| 45 |
+
}
|
| 46 |
+
)
|
extensions-builtin/xyz/lib_xyz/builtins.py
CHANGED
|
@@ -31,44 +31,56 @@ from .utils import boolean_choice, str_permutations
|
|
| 31 |
builtin_options = [
|
| 32 |
AxisOption("Nothing", str, do_nothing, format_value=format_nothing),
|
| 33 |
AxisOption("Seed", int, apply_field("seed")),
|
| 34 |
-
AxisOption("Var. seed", int, apply_field("subseed")),
|
| 35 |
-
AxisOption("Var. strength", float, apply_field("subseed_strength")),
|
| 36 |
AxisOption("Steps", int, apply_field("steps")),
|
| 37 |
-
AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")),
|
| 38 |
AxisOption("CFG Scale", float, apply_field("cfg_scale")),
|
| 39 |
-
AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")),
|
| 40 |
AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value),
|
| 41 |
AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list),
|
| 42 |
AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names),
|
| 43 |
-
AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names),
|
| 44 |
AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names),
|
|
|
|
| 45 |
AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)),
|
| 46 |
-
AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")),
|
| 47 |
AxisOption("Size", str, apply_size),
|
| 48 |
-
AxisOption("Sigma Churn", float, apply_field("s_churn")),
|
| 49 |
-
AxisOption("Sigma min", float, apply_field("s_tmin")),
|
| 50 |
-
AxisOption("Sigma max", float, apply_field("s_tmax")),
|
| 51 |
-
AxisOption("Sigma noise", float, apply_field("s_noise")),
|
| 52 |
-
AxisOption("Schedule type", str, apply_field("scheduler"), choices=lambda: [x.label for x in sd_schedulers.schedulers]),
|
| 53 |
-
AxisOption("Schedule min sigma", float, apply_override("sigma_min")),
|
| 54 |
-
AxisOption("Schedule max sigma", float, apply_override("sigma_max")),
|
| 55 |
-
AxisOption("Schedule rho", float, apply_override("rho")),
|
| 56 |
-
AxisOption("Eta", float, apply_field("eta")),
|
| 57 |
-
AxisOption("Clip skip", int, apply_clip_skip),
|
| 58 |
AxisOption("Denoising", float, apply_field("denoising_strength")),
|
| 59 |
-
|
| 60 |
-
AxisOption("Extra noise", float, apply_override("img2img_extra_noise")),
|
| 61 |
-
AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
|
| 62 |
-
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
|
| 63 |
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ["None"] + list(sd_vae.vae_dict)),
|
| 64 |
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
|
| 65 |
-
AxisOption("UniPC Order", int, apply_uni_pc_order, cost=0.5),
|
| 66 |
-
AxisOption("Face restore", str, apply_face_restore, format_value=format_value),
|
| 67 |
-
AxisOption("Token merging ratio", float, apply_override("token_merging_ratio")),
|
| 68 |
-
AxisOption("Token merging ratio high-res", float, apply_override("token_merging_ratio_hr")),
|
| 69 |
-
AxisOption("Always discard next-to-last sigma", str, apply_override("always_discard_next_to_last_sigma", boolean=True), choices=boolean_choice(reverse=True)),
|
| 70 |
-
AxisOption("SGM noise multiplier", str, apply_override("sgm_noise_multiplier", boolean=True), choices=boolean_choice(reverse=True)),
|
| 71 |
-
AxisOption("Refiner checkpoint", str, apply_field("refiner_checkpoint"), format_value=format_remove_path, confirm=confirm_checkpoints_or_none, cost=1.0, choices=lambda: ["None"] + sorted(sd_models.checkpoints_list, key=str.casefold)),
|
| 72 |
-
AxisOption("Refiner switch at", float, apply_field("refiner_switch_at")),
|
| 73 |
-
AxisOption("RNG source", str, apply_override("randn_source"), choices=lambda: ["GPU", "CPU", "NV"]),
|
| 74 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
builtin_options = [
|
| 32 |
AxisOption("Nothing", str, do_nothing, format_value=format_nothing),
|
| 33 |
AxisOption("Seed", int, apply_field("seed")),
|
|
|
|
|
|
|
| 34 |
AxisOption("Steps", int, apply_field("steps")),
|
| 35 |
+
AxisOptionTxt2Img("Hires. steps", int, apply_field("hr_second_pass_steps")),
|
| 36 |
AxisOption("CFG Scale", float, apply_field("cfg_scale")),
|
|
|
|
| 37 |
AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value),
|
| 38 |
AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list),
|
| 39 |
AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names),
|
| 40 |
+
AxisOptionTxt2Img("Hires. sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names),
|
| 41 |
AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names),
|
| 42 |
+
AxisOption("Schedule type", str, apply_field("scheduler"), choices=lambda: [x.label for x in sd_schedulers.schedulers]),
|
| 43 |
AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)),
|
|
|
|
| 44 |
AxisOption("Size", str, apply_size),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
AxisOption("Denoising", float, apply_field("denoising_strength")),
|
| 46 |
+
AxisOptionTxt2Img("Hires. upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
|
|
|
|
|
|
|
|
|
|
| 47 |
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ["None"] + list(sd_vae.vae_dict)),
|
| 48 |
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
]
|
| 50 |
+
|
| 51 |
+
if shared.cmd_opts.adv_xyz:
|
| 52 |
+
builtin_options.extend(
|
| 53 |
+
[
|
| 54 |
+
AxisOption("Var. seed", int, apply_field("subseed")),
|
| 55 |
+
AxisOption("Var. strength", float, apply_field("subseed_strength")),
|
| 56 |
+
AxisOption("Clip skip", int, apply_clip_skip),
|
| 57 |
+
AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")),
|
| 58 |
+
AxisOption("Extra noise", float, apply_override("img2img_extra_noise")),
|
| 59 |
+
AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")),
|
| 60 |
+
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
|
| 61 |
+
AxisOption("Face restore", str, apply_face_restore, format_value=format_value),
|
| 62 |
+
AxisOption("SkipEarly", float, apply_field("skip_early_cond")),
|
| 63 |
+
AxisOption("NGMS", float, apply_field("s_min_uncond")),
|
| 64 |
+
AxisOption("Token merging ratio", float, apply_override("token_merging_ratio")),
|
| 65 |
+
AxisOption("Always discard next-to-last sigma", str, apply_override("always_discard_next_to_last_sigma", boolean=True), choices=boolean_choice(reverse=True)),
|
| 66 |
+
AxisOption("SGM noise multiplier", str, apply_override("sgm_noise_multiplier", boolean=True), choices=boolean_choice(reverse=True)),
|
| 67 |
+
AxisOption("Refiner checkpoint", str, apply_field("refiner_checkpoint"), format_value=format_remove_path, confirm=confirm_checkpoints_or_none, cost=1.0, choices=lambda: ["None"] + sorted(sd_models.checkpoints_list, key=str.casefold)),
|
| 68 |
+
AxisOption("Refiner switch at", float, apply_field("refiner_switch_at")),
|
| 69 |
+
AxisOption("RNG source", str, apply_override("randn_source"), choices=lambda: ["GPU", "CPU", "NV"]),
|
| 70 |
+
]
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
if shared.cmd_opts.adv_samplers:
|
| 74 |
+
builtin_options.extend(
|
| 75 |
+
[
|
| 76 |
+
AxisOption("Sigma Churn", float, apply_field("s_churn")),
|
| 77 |
+
AxisOption("Sigma min", float, apply_field("s_tmin")),
|
| 78 |
+
AxisOption("Sigma max", float, apply_field("s_tmax")),
|
| 79 |
+
AxisOption("Sigma noise", float, apply_field("s_noise")),
|
| 80 |
+
AxisOption("Schedule min sigma", float, apply_override("sigma_min")),
|
| 81 |
+
AxisOption("Schedule max sigma", float, apply_override("sigma_max")),
|
| 82 |
+
AxisOption("Schedule rho", float, apply_override("rho")),
|
| 83 |
+
AxisOption("Eta", float, apply_field("eta")),
|
| 84 |
+
AxisOption("UniPC Order", int, apply_uni_pc_order, cost=0.5),
|
| 85 |
+
]
|
| 86 |
+
)
|
html/extra-networks-no-cards.html
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
<div class='nocards'>
|
| 2 |
-
<
|
| 3 |
-
|
| 4 |
<ul>
|
| 5 |
{dirs}
|
| 6 |
</ul>
|
|
|
|
| 1 |
<div class='nocards'>
|
| 2 |
+
<h2>Nothing here... Add some contents to the following folder(s):</h2>
|
|
|
|
| 3 |
<ul>
|
| 4 |
{dirs}
|
| 5 |
</ul>
|
html/extra-networks-pane.html
CHANGED
|
@@ -30,6 +30,9 @@
|
|
| 30 |
{tree_html}
|
| 31 |
</div>
|
| 32 |
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards'>
|
|
|
|
|
|
|
|
|
|
| 33 |
{items_html}
|
| 34 |
</div>
|
| 35 |
</div>
|
|
|
|
| 30 |
{tree_html}
|
| 31 |
</div>
|
| 32 |
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards'>
|
| 33 |
+
<div id='{tabname}_{extra_networks_tabname}_dirs' class='extra-network-dirs'>
|
| 34 |
+
{dir_btns_html}
|
| 35 |
+
</div>
|
| 36 |
{items_html}
|
| 37 |
</div>
|
| 38 |
</div>
|
javascript/extraNetworks.js
CHANGED
|
@@ -77,12 +77,12 @@ function setupExtraNetworksForTab(tabname) {
|
|
| 77 |
sortKey = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1);
|
| 78 |
let sortKeyStore = sortKey + "-" + (reverse ? "Descending" : "Ascending") + "-" + cards.length;
|
| 79 |
|
| 80 |
-
if (sortKeyStore == sort_mode.dataset.sortkey && !force)
|
| 81 |
return;
|
| 82 |
-
|
| 83 |
sort_mode.dataset.sortkey = sortKeyStore;
|
| 84 |
|
| 85 |
-
cards.forEach(
|
| 86 |
card.originalParentElement = card.parentElement;
|
| 87 |
});
|
| 88 |
let sortedCards = Array.from(cards);
|
|
@@ -95,18 +95,18 @@ function setupExtraNetworksForTab(tabname) {
|
|
| 95 |
|
| 96 |
return (a < b ? -1 : (a > b ? 1 : 0));
|
| 97 |
});
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
cards.forEach(
|
| 102 |
card.remove();
|
| 103 |
});
|
| 104 |
-
sortedCards.forEach(
|
| 105 |
card.originalParentElement.appendChild(card);
|
| 106 |
});
|
| 107 |
};
|
| 108 |
|
| 109 |
-
search.addEventListener("input", applyFilter);
|
| 110 |
applySort();
|
| 111 |
applyFilter();
|
| 112 |
extraNetworksApplySort[tabname_full] = applySort;
|
|
@@ -197,7 +197,7 @@ function setupExtraNetworks() {
|
|
| 197 |
setupExtraNetworksForTab('img2img');
|
| 198 |
}
|
| 199 |
|
| 200 |
-
const re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)
|
| 201 |
const re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g;
|
| 202 |
|
| 203 |
const re_extranet_neg = /\(([^:^>]+:[\d.]+)\)/;
|
|
@@ -273,6 +273,15 @@ function saveCardPreview(event, tabname, filename) {
|
|
| 273 |
event.preventDefault();
|
| 274 |
}
|
| 275 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 276 |
function extraNetworksTreeProcessFileClick(event, btn, tabname, extra_networks_tabname) {
|
| 277 |
/**
|
| 278 |
* Processes `onclick` events when user clicks on files in tree.
|
|
@@ -590,6 +599,9 @@ function extraNetworksEditUserMetadata(event, tabname, extraPage, cardName) {
|
|
| 590 |
}
|
| 591 |
|
| 592 |
function extraNetworksRefreshSingleCard(page, tabname, name) {
|
|
|
|
|
|
|
|
|
|
| 593 |
requestGet("./sd_extra_networks/get-single-card", { page: page, tabname: tabname, name: name }, function (data) {
|
| 594 |
if (data && data.html) {
|
| 595 |
let card = gradioApp().querySelector(`#${tabname}_${page.replace(" ", "_")}_cards > .card[data-name="${name}"]`);
|
|
|
|
| 77 |
sortKey = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1);
|
| 78 |
let sortKeyStore = sortKey + "-" + (reverse ? "Descending" : "Ascending") + "-" + cards.length;
|
| 79 |
|
| 80 |
+
if (sortKeyStore == sort_mode.dataset.sortkey && !force)
|
| 81 |
return;
|
| 82 |
+
|
| 83 |
sort_mode.dataset.sortkey = sortKeyStore;
|
| 84 |
|
| 85 |
+
cards.forEach((card) => {
|
| 86 |
card.originalParentElement = card.parentElement;
|
| 87 |
});
|
| 88 |
let sortedCards = Array.from(cards);
|
|
|
|
| 95 |
|
| 96 |
return (a < b ? -1 : (a > b ? 1 : 0));
|
| 97 |
});
|
| 98 |
+
|
| 99 |
+
if (reverse) sortedCards.reverse();
|
| 100 |
+
|
| 101 |
+
cards.forEach((card) => {
|
| 102 |
card.remove();
|
| 103 |
});
|
| 104 |
+
sortedCards.forEach((card) => {
|
| 105 |
card.originalParentElement.appendChild(card);
|
| 106 |
});
|
| 107 |
};
|
| 108 |
|
| 109 |
+
search.addEventListener("input", () => { applyFilter(); });
|
| 110 |
applySort();
|
| 111 |
applyFilter();
|
| 112 |
extraNetworksApplySort[tabname_full] = applySort;
|
|
|
|
| 197 |
setupExtraNetworksForTab('img2img');
|
| 198 |
}
|
| 199 |
|
| 200 |
+
const re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/s;
|
| 201 |
const re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g;
|
| 202 |
|
| 203 |
const re_extranet_neg = /\(([^:^>]+:[\d.]+)\)/;
|
|
|
|
| 273 |
event.preventDefault();
|
| 274 |
}
|
| 275 |
|
| 276 |
+
function extraNetworksSearchButton(tabname, extra_networks_tabname, event) {
|
| 277 |
+
const searchTextarea = gradioApp().querySelector("#" + tabname + "_" + extra_networks_tabname + "_extra_search");
|
| 278 |
+
const button = event.target;
|
| 279 |
+
const text = button.classList.contains("search-all") ? "" : button.textContent.trim();
|
| 280 |
+
|
| 281 |
+
searchTextarea.value = text;
|
| 282 |
+
updateInput(searchTextarea);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
function extraNetworksTreeProcessFileClick(event, btn, tabname, extra_networks_tabname) {
|
| 286 |
/**
|
| 287 |
* Processes `onclick` events when user clicks on files in tree.
|
|
|
|
| 599 |
}
|
| 600 |
|
| 601 |
function extraNetworksRefreshSingleCard(page, tabname, name) {
|
| 602 |
+
const refreshButton = document.getElementById(`${tabname}_${page}_extra_refresh`);
|
| 603 |
+
refreshButton.click();
|
| 604 |
+
|
| 605 |
requestGet("./sd_extra_networks/get-single-card", { page: page, tabname: tabname, name: name }, function (data) {
|
| 606 |
if (data && data.html) {
|
| 607 |
let card = gradioApp().querySelector(`#${tabname}_${page.replace(" ", "_")}_cards > .card[data-name="${name}"]`);
|
javascript/localization.js
CHANGED
|
@@ -3,9 +3,6 @@
|
|
| 3 |
|
| 4 |
var ignore_ids_for_localization = {
|
| 5 |
setting_sd_model_checkpoint: 'OPTION',
|
| 6 |
-
modelmerger_primary_model_name: 'OPTION',
|
| 7 |
-
modelmerger_secondary_model_name: 'OPTION',
|
| 8 |
-
modelmerger_tertiary_model_name: 'OPTION',
|
| 9 |
txt2img_styles: 'OPTION',
|
| 10 |
img2img_styles: 'OPTION',
|
| 11 |
setting_random_artist_categories: 'OPTION',
|
|
|
|
| 3 |
|
| 4 |
var ignore_ids_for_localization = {
|
| 5 |
setting_sd_model_checkpoint: 'OPTION',
|
|
|
|
|
|
|
|
|
|
| 6 |
txt2img_styles: 'OPTION',
|
| 7 |
img2img_styles: 'OPTION',
|
| 8 |
setting_random_artist_categories: 'OPTION',
|
javascript/ui.js
CHANGED
|
@@ -10,10 +10,9 @@ function set_theme(theme) {
|
|
| 10 |
function all_gallery_buttons() {
|
| 11 |
let allGalleryButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnails > .thumbnail-item.thumbnail-small');
|
| 12 |
let visibleGalleryButtons = [];
|
| 13 |
-
allGalleryButtons.forEach(
|
| 14 |
-
if (elem.parentElement.offsetParent)
|
| 15 |
visibleGalleryButtons.push(elem);
|
| 16 |
-
}
|
| 17 |
});
|
| 18 |
return visibleGalleryButtons;
|
| 19 |
}
|
|
@@ -26,20 +25,15 @@ function selected_gallery_index() {
|
|
| 26 |
return all_gallery_buttons().findIndex(elem => elem.classList.contains('selected'));
|
| 27 |
}
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
return [null];
|
| 32 |
-
}
|
| 33 |
-
if (gallery.length == 1) {
|
| 34 |
-
return [gallery[0]];
|
| 35 |
-
}
|
| 36 |
|
| 37 |
-
|
|
|
|
|
|
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
index = 0;
|
| 42 |
-
}
|
| 43 |
|
| 44 |
return [gallery[index]];
|
| 45 |
}
|
|
@@ -262,16 +256,6 @@ onUiLoaded(function () {
|
|
| 262 |
});
|
| 263 |
|
| 264 |
|
| 265 |
-
function modelmerger() {
|
| 266 |
-
let id = randomId();
|
| 267 |
-
requestProgress(id, gradioApp().getElementById('modelmerger_results_panel'), null, function () { });
|
| 268 |
-
|
| 269 |
-
let res = create_submit_args(arguments);
|
| 270 |
-
res[0] = id;
|
| 271 |
-
return res;
|
| 272 |
-
}
|
| 273 |
-
|
| 274 |
-
|
| 275 |
function ask_for_style_name(_, prompt_text, negative_prompt_text) {
|
| 276 |
let name_ = prompt('Style name:');
|
| 277 |
return [name_, prompt_text, negative_prompt_text];
|
|
@@ -337,6 +321,7 @@ var txt2img_textarea, img2img_textarea = undefined;
|
|
| 337 |
|
| 338 |
function restart_reload() {
|
| 339 |
document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
|
|
|
|
| 340 |
|
| 341 |
let requestPing = function () {
|
| 342 |
requestGet("./internal/ping", {}, function (data) {
|
|
|
|
| 10 |
function all_gallery_buttons() {
|
| 11 |
let allGalleryButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnails > .thumbnail-item.thumbnail-small');
|
| 12 |
let visibleGalleryButtons = [];
|
| 13 |
+
allGalleryButtons.forEach((elem) => {
|
| 14 |
+
if (elem.parentElement.offsetParent)
|
| 15 |
visibleGalleryButtons.push(elem);
|
|
|
|
| 16 |
});
|
| 17 |
return visibleGalleryButtons;
|
| 18 |
}
|
|
|
|
| 25 |
return all_gallery_buttons().findIndex(elem => elem.classList.contains('selected'));
|
| 26 |
}
|
| 27 |
|
| 28 |
+
let t2i_gallery_index = 0;
|
| 29 |
+
let i2i_gallery_index = 0;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
function extract_image_from_gallery(gallery, tabname = null) {
|
| 32 |
+
if (gallery.length === 0) return [null];
|
| 33 |
+
if (gallery.length === 1) return [gallery[0]];
|
| 34 |
|
| 35 |
+
const index = (tabname === null) ? 0
|
| 36 |
+
: (tabname === "txt2img" ? t2i_gallery_index : i2i_gallery_index);
|
|
|
|
|
|
|
| 37 |
|
| 38 |
return [gallery[index]];
|
| 39 |
}
|
|
|
|
| 256 |
});
|
| 257 |
|
| 258 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
function ask_for_style_name(_, prompt_text, negative_prompt_text) {
|
| 260 |
let name_ = prompt('Style name:');
|
| 261 |
return [name_, prompt_text, negative_prompt_text];
|
|
|
|
| 321 |
|
| 322 |
function restart_reload() {
|
| 323 |
document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
|
| 324 |
+
if (opts.no_flashbang) document.body.style.backgroundColor = "black";
|
| 325 |
|
| 326 |
let requestPing = function () {
|
| 327 |
requestGet("./internal/ping", {}, function (data) {
|
ldm_patched/ldm/modules/attention.py
CHANGED
|
@@ -210,11 +210,11 @@ if isSage2 and args.sageattn2_api is not SageAttentionAPIs.Automatic:
|
|
| 210 |
from sageattention import sageattn_qk_int8_pv_fp16_triton, sageattn_qk_int8_pv_fp16_cuda, sageattn_qk_int8_pv_fp8_cuda
|
| 211 |
|
| 212 |
if args.sageattn2_api is SageAttentionAPIs.Triton16:
|
| 213 |
-
sageattn = sageattn_qk_int8_pv_fp16_triton
|
| 214 |
if args.sageattn2_api is SageAttentionAPIs.CUDA16:
|
| 215 |
sageattn = partial(sageattn_qk_int8_pv_fp16_cuda, qk_quant_gran="per_warp", pv_accum_dtype="fp16+fp32")
|
| 216 |
if args.sageattn2_api is SageAttentionAPIs.CUDA8:
|
| 217 |
-
sageattn = partial(sageattn_qk_int8_pv_fp8_cuda, qk_quant_gran="
|
| 218 |
|
| 219 |
|
| 220 |
def attention_sage(q, k, v, heads, mask=None):
|
|
|
|
| 210 |
from sageattention import sageattn_qk_int8_pv_fp16_triton, sageattn_qk_int8_pv_fp16_cuda, sageattn_qk_int8_pv_fp8_cuda
|
| 211 |
|
| 212 |
if args.sageattn2_api is SageAttentionAPIs.Triton16:
|
| 213 |
+
sageattn = partial(sageattn_qk_int8_pv_fp16_triton, quantization_backend="cuda")
|
| 214 |
if args.sageattn2_api is SageAttentionAPIs.CUDA16:
|
| 215 |
sageattn = partial(sageattn_qk_int8_pv_fp16_cuda, qk_quant_gran="per_warp", pv_accum_dtype="fp16+fp32")
|
| 216 |
if args.sageattn2_api is SageAttentionAPIs.CUDA8:
|
| 217 |
+
sageattn = partial(sageattn_qk_int8_pv_fp8_cuda, qk_quant_gran="per_thread", pv_accum_dtype="fp32+fp32")
|
| 218 |
|
| 219 |
|
| 220 |
def attention_sage(q, k, v, heads, mask=None):
|