diff --git a/README.md b/README.md index 967c541607045d60f30ff504a99248e2c5068a06..1706558153baf5868b22afe5afe5c895df9082ea 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
-## Features [May. 28] +## Features [Jul. 23] > Most base features of the original [Automatic1111 Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) should still function #### New Features @@ -48,6 +48,7 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t - enable in **Settings/Optimizations** - [X] Support fast `fp8` operation *(`torch._scaled_mm`)* - requires RTX **40** + + - requires **UNet Weights in fp8** option - ~10% speed up; reduce quality - enable in **Settings/Optimizations** @@ -55,12 +56,14 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t > - Both `fp16_accumulation` and `cublas_ops` achieve the same speed up; if you already install/update to PyTorch **2.7.0**, you do not need to go for `cublas_ops` > - The `fp16_accumulation` and `cublas_ops` require `fp16` precision, thus is not compatible with the `fp8` operation +
+ - [X] Persistent LoRA Patching - speed up LoRA loading in subsequent generations - see [Commandline](#by-classic) - [X] Implement new Samplers - *(ported from reForge Webui)* -- [X] Implement Scheduler Dropdown +- [X] Implement Scheduler dropdown - *(backported from Automatic1111 Webui upstream)* - enable in **Settings/UI Alternatives** - [X] Add `CFG` slider to the `Hires. fix` section @@ -72,18 +75,34 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t - enable in **Settings/UI Alternatives** - [X] Implement full precision calculation for `Mask blur` blending - enable in **Settings/img2img** +- [X] Support loading upscalers in `half` precision + - speed up; reduce quality + - enable in **Settings/Upscaling** +- [X] Support running tile composition on GPU + - enable in **Settings/Upscaling** +- [X] Allow `newline` in LoRA metadata + - *(backported from Automatic1111 Webui upstream)* +- [X] Implement sending parameters from generation result rather than from UI + - **e.g.** send the prompts instead of `Wildcard` syntax + - enable in **Settings/Infotext** +- [X] Implement tiling optimization for VAE + - reduce memory usage; reduce speed + - enable in **Settings/VAE** - [X] Implement `diskcache` for hashes - *(backported from Automatic1111 Webui upstream)* - [X] Implement `skip_early_cond` - *(backported from Automatic1111 Webui upstream)* - enable in **Settings/Optimizations** -- [X] Support `v-pred` **SDXL** checkpoints *(**eg.** [NoobAI](https://civitai.com/models/833294?modelVersionId=1190596))* +- [X] Allow inserting the upscaled image to the Gallery instead of overriding the input image + - *(backported from upstream [PR](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16405))* +- [X] Support `v-pred` **SDXL** checkpoints *(**e.g.** [NoobAI](https://civitai.com/models/833294?modelVersionId=1190596))* - [X] Support new LoRA architectures - [X] Update `spandrel` - support new Upscaler architectures - [X] Add `pillow-heif` package - support `.avif` and `.heif` images - [X] Automatically determine the optimal row count for `X/Y/Z Plot` +- [X] Support new LoRA architectures - [X] `DepthAnything v2` Preprocessor - [X] Support [NoobAI Inpaint](https://civitai.com/models/1376234/noobai-inpainting-controlnet) ControlNet - [X] Support [Union](https://huggingface.co/xinsir/controlnet-union-sdxl-1.0) / [ProMax](https://huggingface.co/brad-twinkl/controlnet-union-sdxl-1.0-promax) ControlNet @@ -110,15 +129,17 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t - [X] Some Preprocessors *(ControlNet)* - [X] `Photopea` and `openpose_editor` *(ControlNet)* - [X] Unix `.sh` launch scripts - - You can still use this WebUI by copying a launch script from another working WebUI; I just don't want to maintain them... + - You can still use this WebUI by simply copying a launch script from other working WebUI #### Optimizations - [X] **[Freedom]** Natively integrate the `SD1` and `SDXL` logics - no longer `git` `clone` any repository on fresh install - no more random hacks and monkey patches +- [X] Fix `canvas-zoom-and-pan` built-in extension + - no more infinite-resizing bug when using `Send to` buttons - [X] Fix memory leak when switching checkpoints -- [X] Clean up the `ldm_patched` *(**ie.** `comfy`)* folder +- [X] Clean up the `ldm_patched` *(**i.e.** `comfy`)* folder - [X] Remove unused `cmd_args` - [X] Remove unused `args_parser` - [X] Remove unused `shared_options` @@ -127,6 +148,9 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t - [X] Remove redundant upscaler codes - put every upscaler inside the `ESRGAN` folder - [X] Optimize upscaler logics +- [X] Optimize certain operations in `Spandrel` +- [X] Optimize the creation of Extra Networks pages + - *(backported from Automatic1111 Webui upstream)* - [X] Improve color correction - [X] Improve hash caching - [X] Improve error logs @@ -135,16 +159,21 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t - improve formatting - update descriptions - [X] Check for Extension updates in parallel -- [X] Moved `embeddings` folder into `models` folder +- [X] Move `embeddings` folder into `models` folder - [X] ControlNet Rewrite - change Units to `gr.Tab` - remove multi-inputs, as they are "[misleading](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/932)" - change `visible` toggle to `interactive` toggle; now the UI will no longer jump around - - improved `Presets` application + - improve `Presets` application + - fix `Inpaint not masked` mode - [X] Disable Refiner by default - enable again in **Settings/UI Alternatives** - [X] Disable Tree View by default - enable again in **Settings/Extra Networks** +- [X] Hide Sampler Parameters by default + - enable again by adding **--adv-samplers** flag +- [X] Hide some X/Y/Z Plot options by default + - enable again by adding **--adv-xyz** flag - [X] Run `text encoder` on CPU by default - [X] Fix `pydantic` Errors - [X] Fix `Soft Inpainting` @@ -154,7 +183,7 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t - [X] Update `protobuf` - faster `insightface` loading - [X] Update to latest PyTorch - - `torch==2.7.0+cu128` + - `torch==2.7.1+cu128` - `xformers==0.0.30` > [!Note] @@ -175,7 +204,6 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t - `--no-download-sd-model`: Do not download a default checkpoint - can be removed after you download some checkpoints of your choice - `--xformers`: Install the `xformers` package to speed up generation - - Currently, `torch==2.7.0` does **not** support `xformers` yet - `--port`: Specify a server port to use - defaults to `7860` - `--api`: Enable [API](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API) access @@ -449,6 +477,9 @@ In my experience, the speed of each attention function for SDXL is ranked in the > [!Note] > `SageAttention` is based on quantization, so its quality might be slightly worse than others +> [!Important] +> When using `SageAttention 2`, both positive prompts and negative prompts are required; omitting negative prompts can cause `NaN` issues +
## Issues & Requests diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index 5ae6420ce964665afec844af7f9468efd8c1ee2c..c94a1bbc5e92279f332200570e8db71ae1fbdf8d 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -3,8 +3,6 @@ from __future__ import annotations import enum from collections import namedtuple -import torch.nn as nn -import torch.nn.functional as F from modules import cache, errors, hashes, sd_models, shared NetworkWeights = namedtuple("NetworkWeights", ["network_key", "sd_key", "w", "sd_module"]) @@ -33,12 +31,11 @@ class NetworkOnDisk: def read_metadata(): metadata = sd_models.read_metadata_from_safetensors(filename) - metadata.pop("ssmd_cover_images", None) # cover images are too big to display in UI return metadata if self.is_safetensors: try: - self.metadata = cache.cached_data_for_file("safetensors-metadata", "/".join(["lora", self.name]), filename, read_metadata) + self.metadata = cache.cached_data_for_file("safetensors-metadata", f"lora/{self.name}", filename, read_metadata) except Exception as e: errors.display(e, f"reading lora {filename}") @@ -53,7 +50,7 @@ class NetworkOnDisk: self.hash: str = None self.shorthash: str = None - self.set_hash(self.metadata.get("sshs_model_hash") or hashes.sha256_from_cache(self.filename, "/".join(["lora", self.name]), use_addnet_hash=self.is_safetensors) or "") + self.set_hash(self.metadata.get("sshs_model_hash") or hashes.sha256_from_cache(self.filename, f"lora/{self.name}", use_addnet_hash=self.is_safetensors) or "") self.sd_version: "SDVersion" = self.detect_version() @@ -76,14 +73,7 @@ class NetworkOnDisk: def read_hash(self): if not self.hash: - self.set_hash( - hashes.sha256( - self.filename, - "/".join(["lora", self.name]), - use_addnet_hash=self.is_safetensors, - ) - or "" - ) + self.set_hash(hashes.sha256(self.filename, f"lora/{self.name}", use_addnet_hash=self.is_safetensors) or "") def get_alias(self): import networks @@ -107,89 +97,3 @@ class Network: # LoraModule self.mentioned_name = None """the text that was used to add the network to prompt - can be either name or an alias""" - - -class ModuleType: - def create_module(self, net: Network, weights: NetworkWeights) -> Network | None: - return None - - -class NetworkModule: - def __init__(self, net: Network, weights: NetworkWeights): - self.network = net - self.network_key = weights.network_key - self.sd_key = weights.sd_key - self.sd_module = weights.sd_module - - if hasattr(self.sd_module, "weight"): - self.shape = self.sd_module.weight.shape - - self.ops = None - self.extra_kwargs = {} - if isinstance(self.sd_module, nn.Conv2d): - self.ops = F.conv2d - self.extra_kwargs = { - "stride": self.sd_module.stride, - "padding": self.sd_module.padding, - } - elif isinstance(self.sd_module, nn.Linear): - self.ops = F.linear - elif isinstance(self.sd_module, nn.LayerNorm): - self.ops = F.layer_norm - self.extra_kwargs = { - "normalized_shape": self.sd_module.normalized_shape, - "eps": self.sd_module.eps, - } - elif isinstance(self.sd_module, nn.GroupNorm): - self.ops = F.group_norm - self.extra_kwargs = { - "num_groups": self.sd_module.num_groups, - "eps": self.sd_module.eps, - } - - self.dim = None - self.bias = weights.w.get("bias") - self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None - self.scale = weights.w["scale"].item() if "scale" in weights.w else None - - def multiplier(self): - if "transformer" in self.sd_key[:20]: - return self.network.te_multiplier - else: - return self.network.unet_multiplier - - def calc_scale(self): - if self.scale is not None: - return self.scale - if self.dim is not None and self.alpha is not None: - return self.alpha / self.dim - - return 1.0 - - def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): - if self.bias is not None: - updown = updown.reshape(self.bias.shape) - updown += self.bias.to(orig_weight.device, dtype=updown.dtype) - updown = updown.reshape(output_shape) - - if len(output_shape) == 4: - updown = updown.reshape(output_shape) - - if orig_weight.size().numel() == updown.size().numel(): - updown = updown.reshape(orig_weight.shape) - - if ex_bias is not None: - ex_bias = ex_bias * self.multiplier() - - return updown * self.calc_scale() * self.multiplier(), ex_bias - - def calc_updown(self, target): - raise NotImplementedError - - def forward(self, x, y): - """A general forward implementation for all modules""" - if self.ops is None: - raise NotImplementedError - - updown, ex_bias = self.calc_updown(self.sd_module.weight) - return y + self.ops(x, weight=updown, bias=ex_bias, **self.extra_kwargs) diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py index f4ab1fb8666538440c9184e9ea512e75ed4fa609..ba15303be1884d06fbac8622398080144d876ac9 100644 --- a/extensions-builtin/Lora/ui_edit_user_metadata.py +++ b/extensions-builtin/Lora/ui_edit_user_metadata.py @@ -51,13 +51,13 @@ class LoraUserMetadataEditor(UserMetadataEditor): def save_lora_user_metadata( self, - name, - desc, - sd_version, - activation_text, - preferred_weight, - negative_text, - notes, + name: str, + desc: str, + sd_version: str, + activation_text: str, + preferred_weight: float, + negative_text: str, + notes: str, ): user_metadata = self.get_user_metadata(name) user_metadata["description"] = desc @@ -68,7 +68,6 @@ class LoraUserMetadataEditor(UserMetadataEditor): user_metadata["notes"] = notes self.write_user_metadata(name, user_metadata) - self.page.refresh() def get_metadata_table(self, name): table = super().get_metadata_table(name) @@ -157,8 +156,8 @@ class LoraUserMetadataEditor(UserMetadataEditor): self.create_default_editor_elems() self.taginfo = gr.HighlightedText(label="Training dataset tags") - self.edit_activation_text = gr.Text(label="Activation text", info="Will be added to prompt along with Lora") - self.edit_negative_text = gr.Text(label="Negative prompt", info="Will be added to negative prompts") + self.edit_activation_text = gr.Textbox(label="Positive Prompt", info="Will be added to the prompt after the LoRA syntax", lines=2) + self.edit_negative_text = gr.Textbox(label="Negative Prompt", info="Will be added to the negative prompt", lines=2) self.slider_preferred_weight = gr.Slider( label="Preferred weight", info="Set to 0 to use the default set in Settings", diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index c6146f38730370b5bc7f18a2efaba4ba03b8b54a..055c412a41f1160fc9c877ca3371f1189e8fa935 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -1,53 +1,53 @@ -(function () { +const elementIDs = { + img2imgTabs: "#mode_img2img .tab-nav", + inpaint: "#img2maskimg", + inpaintSketch: "#inpaint_sketch", + rangeGroup: "#img2img_column_size", + sketch: "#img2img_sketch", +}; + +const tabNameToElementId = { + "Inpaint sketch": elementIDs.inpaintSketch, + "Inpaint": elementIDs.inpaint, + "Sketch": elementIDs.sketch, +}; +(function () { onUiLoaded(async () => { - const elementIDs = { - img2imgTabs: "#mode_img2img .tab-nav", - inpaint: "#img2maskimg", - inpaintSketch: "#inpaint_sketch", - rangeGroup: "#img2img_column_size", - sketch: "#img2img_sketch" - }; - - const tabNameToElementId = { - "Inpaint sketch": elementIDs.inpaintSketch, - "Inpaint": elementIDs.inpaint, - "Sketch": elementIDs.sketch - }; - /** Waits for an element to be present in the DOM */ - const waitForElement = (id) => new Promise(resolve => { - const checkForElement = () => { - const element = document.querySelector(id); - if (element) return resolve(element); - setTimeout(checkForElement, 100); - }; - checkForElement(); - }); + const waitForElement = (id) => + new Promise((resolve) => { + const checkForElement = () => { + const element = document.querySelector(id); + if (element) return resolve(element); + setTimeout(checkForElement, 100); + }; + checkForElement(); + }); function getActiveTab(elements, all = false) { + if (!elements.img2imgTabs) return null; + const tabs = elements.img2imgTabs.querySelectorAll("button"); if (all) return tabs; for (let tab of tabs) { - if (tab.classList.contains("selected")) - return tab; + if (tab.classList.contains("selected")) return tab; } } // Get tab ID function getTabId(elements) { const activeTab = getActiveTab(elements); + if (!activeTab) return null; return tabNameToElementId[activeTab.innerText]; } // Wait until opts loaded async function waitForOpts() { for (; ;) { - if (window.opts && Object.keys(window.opts).length) { - return window.opts; - } - await new Promise(resolve => setTimeout(resolve, 100)); + if (window.opts && Object.keys(window.opts).length) return window.opts; + await new Promise((resolve) => setTimeout(resolve, 100)); } } @@ -108,8 +108,7 @@ typeof userValue === "object" || userValue === "disable" ) { - result[key] = - userValue === undefined ? defaultValue : userValue; + result[key] = userValue === undefined ? defaultValue : userValue; } else if (isValidHotkey(userValue)) { const normalizedUserValue = normalizeHotkey(userValue); @@ -120,20 +119,20 @@ } else { console.error( `Hotkey: ${formatHotkeyForDisplay( - userValue + userValue, )} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay( - defaultValue - )}` + defaultValue, + )}`, ); result[key] = defaultValue; } } else { console.error( `Hotkey: ${formatHotkeyForDisplay( - userValue + userValue, )} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay( - defaultValue - )}` + defaultValue, + )}`, ); result[key] = defaultValue; } @@ -145,11 +144,10 @@ // Disables functions in the config object based on the provided list of function names function disableFunctions(config, disabledFunctions) { // Bind the hasOwnProperty method to the functionMap object to avoid errors - const hasOwnProperty = - Object.prototype.hasOwnProperty.bind(functionMap); + const hasOwnProperty = Object.prototype.hasOwnProperty.bind(functionMap); // Loop through the disabledFunctions array and disable the corresponding functions in the config object - disabledFunctions.forEach(funcName => { + disabledFunctions.forEach((funcName) => { if (hasOwnProperty(funcName)) { const key = functionMap[funcName]; config[key] = "disable"; @@ -179,16 +177,14 @@ if (!img || !imageARPreview) return; imageARPreview.style.transform = ""; - if (parseFloat(mainTab.style.width) > 865) { + if (parseFloat(mainTab.style.width) > 800) { const transformString = mainTab.style.transform; const scaleMatch = transformString.match( - /scale\(([-+]?[0-9]*\.?[0-9]+)\)/ + /scale\(([-+]?[0-9]*\.?[0-9]+)\)/, ); let zoom = 1; // default zoom - if (scaleMatch && scaleMatch[1]) { - zoom = Number(scaleMatch[1]); - } + if (scaleMatch && scaleMatch[1]) zoom = Number(scaleMatch[1]); imageARPreview.style.transformOrigin = "0 0"; imageARPreview.style.transform = `scale(${zoom})`; @@ -200,7 +196,7 @@ setTimeout(() => { img.style.display = "none"; - }, 400); + }, 500); } const hotkeysConfigOpts = await waitForOpts(); @@ -229,39 +225,39 @@ "Moving canvas": "canvas_hotkey_move", "Fullscreen": "canvas_hotkey_fullscreen", "Reset Zoom": "canvas_hotkey_reset", - "Overlap": "canvas_hotkey_overlap" + "Overlap": "canvas_hotkey_overlap", }; // Loading the configuration from opts const preHotkeysConfig = createHotkeyConfig( defaultHotkeysConfig, - hotkeysConfigOpts + hotkeysConfigOpts, ); // Disable functions that are not needed by the user const hotkeysConfig = disableFunctions( preHotkeysConfig, - preHotkeysConfig.canvas_disabled_functions + preHotkeysConfig.canvas_disabled_functions, ); let isMoving = false; - let mouseX, mouseY; let activeElement; + let interactedWithAltKey = false; const elements = Object.fromEntries( - Object.keys(elementIDs).map(id => [ + Object.keys(elementIDs).map((id) => [ id, - gradioApp().querySelector(elementIDs[id]) - ]) + gradioApp().querySelector(elementIDs[id]), + ]), ); const elemData = {}; // Apply functionality to the range inputs. Restore redmask and correct for long images. - const rangeInputs = elements.rangeGroup ? - Array.from(elements.rangeGroup.querySelectorAll("input")) : - [ + const rangeInputs = elements.rangeGroup + ? Array.from(elements.rangeGroup.querySelectorAll("input")) + : [ gradioApp().querySelector("#img2img_width input[type='range']"), - gradioApp().querySelector("#img2img_height input[type='range']") + gradioApp().querySelector("#img2img_height input[type='range']"), ]; for (const input of rangeInputs) { @@ -272,7 +268,7 @@ const targetElement = gradioApp().querySelector(elemId); if (!targetElement) { - console.log("Element not found"); + console.log(`Element ${elemId} not found...`); return; } @@ -281,14 +277,13 @@ elemData[elemId] = { zoom: 1, panX: 0, - panY: 0 + panY: 0, }; let fullScreenMode = false; // Create tooltip function createTooltip() { - const toolTipElement = - targetElement.querySelector(".image-container"); + const toolTipElement = targetElement.querySelector(".image-container"); const tooltip = document.createElement("div"); tooltip.className = "canvas-tooltip"; @@ -306,39 +301,37 @@ { configKey: "canvas_hotkey_zoom", action: "Zoom canvas", - keySuffix: " + wheel" + keySuffix: " + wheel", }, { configKey: "canvas_hotkey_adjust", action: "Adjust brush size", - keySuffix: " + wheel" + keySuffix: " + wheel", }, { configKey: "canvas_hotkey_reset", action: "Reset zoom" }, { configKey: "canvas_hotkey_fullscreen", - action: "Fullscreen mode" + action: "Fullscreen mode", }, { configKey: "canvas_hotkey_move", action: "Move canvas" }, - { configKey: "canvas_hotkey_overlap", action: "Overlap" } + { configKey: "canvas_hotkey_overlap", action: "Overlap" }, ]; // Create hotkeys array with disabled property based on the config values - const hotkeys = hotkeysInfo.map(info => { + const hotkeys = hotkeysInfo.map((info) => { const configValue = hotkeysConfig[info.configKey]; - const key = info.keySuffix ? - `${configValue}${info.keySuffix}` : - configValue.charAt(configValue.length - 1); + const key = info.keySuffix + ? `${configValue}${info.keySuffix}` + : configValue.charAt(configValue.length - 1); return { key, action: info.action, - disabled: configValue === "disable" + disabled: configValue === "disable", }; }); for (const hotkey of hotkeys) { - if (hotkey.disabled) { - continue; - } + if (hotkey.disabled) continue; const p = document.createElement("p"); p.innerHTML = `${hotkey.key} - ${hotkey.action}`; @@ -353,16 +346,14 @@ toolTipElement.appendChild(tooltip); } - //Show tool tip if setting enable - if (hotkeysConfig.canvas_show_tooltip) { - createTooltip(); - } + // Show tool tip if setting enable + if (hotkeysConfig.canvas_show_tooltip) createTooltip(); // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui. function fixCanvas() { - const activeTab = getActiveTab(elements).textContent.trim(); + const activeTab = getActiveTab(elements)?.textContent.trim(); - if (activeTab !== "img2img") { + if (activeTab && activeTab !== "img2img") { const img = targetElement.querySelector(`${elemId} img`); if (img && img.style.display !== "none") { @@ -377,12 +368,10 @@ elemData[elemId] = { zoomLevel: 1, panX: 0, - panY: 0 + panY: 0, }; - if (isExtension) { - targetElement.style.overflow = "hidden"; - } + if (isExtension) targetElement.style.overflow = "hidden"; targetElement.isZoomed = false; @@ -390,16 +379,16 @@ targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`; const canvas = gradioApp().querySelector( - `${elemId} canvas[key="interface"]` + `${elemId} canvas[key="interface"]`, ); toggleOverlap("off"); fullScreenMode = false; - const closeBtn = targetElement.querySelector("button[aria-label='Remove Image']"); - if (closeBtn) { - closeBtn.addEventListener("click", resetZoom); - } + const closeBtn = targetElement.querySelector( + "button[aria-label='Remove Image']", + ); + if (closeBtn) closeBtn.addEventListener("click", resetZoom); if (canvas && isExtension) { const parentElement = targetElement.closest('[id^="component-"]'); @@ -411,14 +400,13 @@ fitToElement(); return; } - } if ( canvas && !isExtension && - parseFloat(canvas.style.width) > 865 && - parseFloat(targetElement.style.width) > 865 + parseFloat(canvas.style.width) > 800 && + parseFloat(targetElement.style.width) > 800 ) { fitToElement(); return; @@ -435,11 +423,8 @@ targetElement.style.zIndex = targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1; - if (forced === "off") { - targetElement.style.zIndex = zIndex1; - } else if (forced === "on") { - targetElement.style.zIndex = zIndex2; - } + if (forced === "off") targetElement.style.zIndex = zIndex1; + else if (forced === "on") targetElement.style.zIndex = zIndex2; } // Adjust the brush size based on the deltaY value from a mouse wheel event @@ -447,21 +432,18 @@ elemId, deltaY, withoutValue = false, - percentage = 5 + percentage = 5, ) { const input = gradioApp().querySelector( - `${elemId} input[aria-label='Brush radius']` + `${elemId} input[aria-label='Brush radius']`, ) || - gradioApp().querySelector( - `${elemId} button[aria-label="Use brush"]` - ); + gradioApp().querySelector(`${elemId} button[aria-label="Use brush"]`); if (input) { input.click(); if (!withoutValue) { - const maxValue = - parseFloat(input.getAttribute("max")) || 100; + const maxValue = parseFloat(input.getAttribute("max")) || 100; const changeAmount = maxValue * (percentage / 100); const newValue = parseFloat(input.value) + @@ -474,7 +456,7 @@ // Reset zoom when uploading a new image const fileInput = gradioApp().querySelector( - `${elemId} input[type="file"][accept="image/*"].svelte-116rqfv` + `${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`, ); fileInput.addEventListener("click", resetZoom); @@ -482,18 +464,23 @@ function updateZoom(newZoomLevel, mouseX, mouseY) { newZoomLevel = Math.max(0.1, Math.min(newZoomLevel, 15)); - elemData[elemId].panX += - mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel; - elemData[elemId].panY += - mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel; + // Check if we're close to the original zoom level (1.0) + if (Math.abs(newZoomLevel - 1.0) < 0.01) { + newZoomLevel = 1; + elemData[elemId].panX = 0; + elemData[elemId].panY = 0; + } else { + elemData[elemId].panX += + mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel; + elemData[elemId].panY += + mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel; + } targetElement.style.transformOrigin = "0 0"; targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`; toggleOverlap("on"); - if (isExtension) { - targetElement.style.overflow = "visible"; - } + if (isExtension) targetElement.style.overflow = "visible"; return newZoomLevel; } @@ -502,27 +489,26 @@ function changeZoomLevel(operation, e) { if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) { e.preventDefault(); + if (hotkeysConfig.canvas_hotkey_zoom === "Alt") + interactedWithAltKey = true; let zoomPosX, zoomPosY; let delta = 0.2; - if (elemData[elemId].zoomLevel > 7) { - delta = 0.9; - } else if (elemData[elemId].zoomLevel > 2) { - delta = 0.6; - } + if (elemData[elemId].zoomLevel > 7) delta = 0.9; + else if (elemData[elemId].zoomLevel > 2) delta = 0.6; zoomPosX = e.clientX; zoomPosY = e.clientY; fullScreenMode = false; elemData[elemId].zoomLevel = updateZoom( - elemData[elemId].zoomLevel + - (operation === "+" ? delta : -delta), + elemData[elemId].zoomLevel + (operation === "+" ? delta : -delta), zoomPosX - targetElement.getBoundingClientRect().left, - zoomPosY - targetElement.getBoundingClientRect().top + zoomPosY - targetElement.getBoundingClientRect().top, ); - targetElement.isZoomed = true; + targetElement.isZoomed = + Math.abs(elemData[elemId].zoomLevel - 1.0) > 0.01; } } @@ -533,17 +519,14 @@ */ function fitToElement() { - //Reset Zoom + // Reset Zoom targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`; let parentElement; - if (isExtension) { + if (isExtension) parentElement = targetElement.closest('[id^="component-"]'); - } else { - parentElement = targetElement.parentElement; - } - + else parentElement = targetElement.parentElement; // Get element and screen dimensions const elementWidth = targetElement.offsetWidth; @@ -569,8 +552,7 @@ const originYValue = parseFloat(originY); const offsetX = - (screenWidth - elementWidth * scale) / 2 - - originXValue * (1 - scale); + (screenWidth - elementWidth * scale) / 2 - originXValue * (1 - scale); const offsetY = (screenHeight - elementHeight * scale) / 2.5 - originYValue * (1 - scale); @@ -596,18 +578,15 @@ // Fullscreen mode function fitToScreen() { const canvas = gradioApp().querySelector( - `${elemId} canvas[key="interface"]` + `${elemId} canvas[key="interface"]`, ); if (!canvas) return; - if (canvas.offsetWidth > 862 || isExtension) { - targetElement.style.width = (canvas.offsetWidth + 2) + "px"; - } + if (canvas.offsetWidth > 800 || isExtension) + targetElement.style.width = canvas.offsetWidth + 16 + "px"; - if (isExtension) { - targetElement.style.overflow = "visible"; - } + if (isExtension) targetElement.style.overflow = "visible"; if (fullScreenMode) { resetZoom(); @@ -615,8 +594,8 @@ return; } - //Reset Zoom - targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`; + // Reset Zoom + targetElement.style.transform = 'translate(0px, 0px) scale(1.0)'; // Get scrollbar width to right-align the image const scrollbarWidth = @@ -670,24 +649,31 @@ // Handle keydown events function handleKeyDown(event) { // Disable key locks to make pasting from the buffer work correctly - if ((event.ctrlKey && event.code === 'KeyV') || (event.ctrlKey && event.code === 'KeyC') || event.code === "F5") { + if ( + (event.ctrlKey && event.code === "KeyV") || + (event.ctrlKey && event.code === "KeyC") || + event.code === "F5" + ) { return; } // before activating shortcut, ensure user is not actively typing in an input field if (!hotkeysConfig.canvas_blur_prompt) { - if (event.target.nodeName === 'TEXTAREA' || event.target.nodeName === 'INPUT') { + if ( + event.target.nodeName === "TEXTAREA" || + event.target.nodeName === "INPUT" + ) return; - } } - const hotkeyActions = { [hotkeysConfig.canvas_hotkey_reset]: resetZoom, [hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap, [hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen, - [hotkeysConfig.canvas_hotkey_shrink_brush]: () => adjustBrushSize(elemId, 10), - [hotkeysConfig.canvas_hotkey_grow_brush]: () => adjustBrushSize(elemId, -10) + [hotkeysConfig.canvas_hotkey_shrink_brush]: () => + adjustBrushSize(elemId, 10), + [hotkeysConfig.canvas_hotkey_grow_brush]: () => + adjustBrushSize(elemId, -10), }; const action = hotkeyActions[event.code]; @@ -699,15 +685,8 @@ if ( isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) || isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust) - ) { + ) event.preventDefault(); - } - } - - // Get Mouse position - function getMousePosition(e) { - mouseX = e.offsetX; - mouseY = e.offsetY; } // Simulation of the function to put a long image into the screen. @@ -716,31 +695,40 @@ targetElement.isExpanded = false; function autoExpand() { - const canvas = document.querySelector(`${elemId} canvas[key="interface"]`); + const canvas = document.querySelector( + `${elemId} canvas[key="interface"]`, + ); if (canvas) { - if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) { - targetElement.style.visibility = "hidden"; + if ( + hasHorizontalScrollbar(targetElement) && + targetElement.isExpanded === false + ) { setTimeout(() => { fitToScreen(); resetZoom(); - targetElement.style.visibility = "visible"; targetElement.isExpanded = true; - }, 10); + }, 25); } } } - targetElement.addEventListener("mousemove", getMousePosition); - - //observers + // Observers // Creating an observer with a callback function to handle DOM changes - const observer = new MutationObserver((mutationsList, observer) => { - for (let mutation of mutationsList) { + const observer = new MutationObserver((mutationsList) => { + for (const mutation of mutationsList) { // If the style attribute of the canvas has changed, by observation it happens only when the picture changes - if (mutation.type === 'attributes' && mutation.attributeName === 'style' && - mutation.target.tagName.toLowerCase() === 'canvas') { + if ( + mutation.type === "attributes" && + mutation.attributeName === "style" && + mutation.target.tagName.toLowerCase() === "canvas" + ) { targetElement.isExpanded = false; - setTimeout(resetZoom, 10); + setTimeout(resetZoom, 25); + setTimeout(autoExpand, 25); + setTimeout(() => { + const btn = targetElement.querySelector("button[aria-label='Undo']"); + btn.click(); + }, 25); } } }); @@ -749,7 +737,11 @@ if (hotkeysConfig.canvas_auto_expand) { targetElement.addEventListener("mousemove", autoExpand); // Set up an observer to track attribute changes - observer.observe(targetElement, { attributes: true, childList: true, subtree: true }); + observer.observe(targetElement, { + attributes: true, + childList: true, + subtree: true, + }); } // Handle events only inside the targetElement @@ -778,44 +770,53 @@ targetElement.addEventListener("mouseleave", handleMouseLeave); // Reset zoom when click on another tab - elements.img2imgTabs.addEventListener("click", resetZoom); - elements.img2imgTabs.addEventListener("click", () => { - // targetElement.style.width = ""; - if (parseInt(targetElement.style.width) > 865) { - setTimeout(fitToElement, 0); - } - }); + if (elements.img2imgTabs) { + elements.img2imgTabs.addEventListener("click", resetZoom); + elements.img2imgTabs.addEventListener("click", () => { + // targetElement.style.width = ""; + if (parseInt(targetElement.style.width) > 800) + setTimeout(fitToElement, 0); + }); + } - targetElement.addEventListener("wheel", e => { - // change zoom level - const operation = e.deltaY > 0 ? "-" : "+"; - changeZoomLevel(operation, e); + targetElement.addEventListener( + "wheel", + (e) => { + // change zoom level + const operation = (e.deltaY || -e.wheelDelta) > 0 ? "-" : "+"; + changeZoomLevel(operation, e); - // Handle brush size adjustment with ctrl key pressed - if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) { - e.preventDefault(); + // Handle brush size adjustment with ctrl key pressed + if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) { + e.preventDefault(); - // Increase or decrease brush size based on scroll direction - adjustBrushSize(elemId, e.deltaY); - } - }); + if (hotkeysConfig.canvas_hotkey_adjust === "Alt") + interactedWithAltKey = true; + + // Increase or decrease brush size based on scroll direction + adjustBrushSize(elemId, e.deltaY); + } + }, + { passive: false }, + ); // Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element. function handleMoveKeyDown(e) { - // Disable key locks to make pasting from the buffer work correctly - if ((e.ctrlKey && e.code === 'KeyV') || (e.ctrlKey && event.code === 'KeyC') || e.code === "F5") { + if ( + (e.ctrlKey && e.code === "KeyV") || + (e.ctrlKey && event.code === "KeyC") || + e.code === "F5" + ) { return; } // before activating shortcut, ensure user is not actively typing in an input field if (!hotkeysConfig.canvas_blur_prompt) { - if (e.target.nodeName === 'TEXTAREA' || e.target.nodeName === 'INPUT') { + if (e.target.nodeName === "TEXTAREA" || e.target.nodeName === "INPUT") return; - } } - if (e.code === hotkeysConfig.canvas_hotkey_move) { if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) { e.preventDefault(); @@ -826,21 +827,26 @@ } function handleMoveKeyUp(e) { - if (e.code === hotkeysConfig.canvas_hotkey_move) { - isMoving = false; - } + if (e.code === hotkeysConfig.canvas_hotkey_move) isMoving = false; } document.addEventListener("keydown", handleMoveKeyDown); document.addEventListener("keyup", handleMoveKeyUp); + /** Prevent firefox from opening main menu when alt is used as a hotkey for zoom or brush size */ + function handleAltKeyUp(e) { + if (e.key !== "Alt" || !interactedWithAltKey) return; + e.preventDefault(); + interactedWithAltKey = false; + } + + document.addEventListener("keyup", handleAltKeyUp); + // Detect zoom level and update the pan speed. function updatePanPosition(movementX, movementY) { let panSpeed = 2; - if (elemData[elemId].zoomLevel > 8) { - panSpeed = 3.5; - } + if (elemData[elemId].zoomLevel > 8) panSpeed = 3.5; elemData[elemId].panX += movementX * panSpeed; elemData[elemId].panY += movementY * panSpeed; @@ -857,10 +863,7 @@ updatePanPosition(e.movementX, e.movementY); targetElement.style.pointerEvents = "none"; - if (isExtension) { - targetElement.style.overflow = "visible"; - } - + if (isExtension) targetElement.style.overflow = "visible"; } else { targetElement.style.pointerEvents = "auto"; } @@ -874,26 +877,36 @@ // Checks for extension function checkForOutBox() { const parentElement = targetElement.closest('[id^="component-"]'); - if (parentElement.offsetWidth < targetElement.offsetWidth && !targetElement.isExpanded) { + if ( + parentElement.offsetWidth < targetElement.offsetWidth && + !targetElement.isExpanded + ) { resetZoom(); targetElement.isExpanded = true; } - if (parentElement.offsetWidth < targetElement.offsetWidth && elemData[elemId].zoomLevel == 1) { + if ( + parentElement.offsetWidth < targetElement.offsetWidth && + elemData[elemId].zoomLevel == 1 + ) { resetZoom(); } - if (parentElement.offsetWidth < targetElement.offsetWidth && targetElement.offsetWidth * elemData[elemId].zoomLevel > parentElement.offsetWidth && elemData[elemId].zoomLevel < 1 && !targetElement.isZoomed) { + if ( + parentElement.offsetWidth < targetElement.offsetWidth && + targetElement.offsetWidth * elemData[elemId].zoomLevel > + parentElement.offsetWidth && + elemData[elemId].zoomLevel < 1 && + !targetElement.isZoomed + ) { resetZoom(); } } - if (isExtension) { + if (isExtension) targetElement.addEventListener("mousemove", checkForOutBox); - } - - window.addEventListener('resize', (e) => { + window.addEventListener("resize", (e) => { resetZoom(); if (isExtension) { @@ -903,8 +916,6 @@ }); gradioApp().addEventListener("mousemove", handleMoveByKey); - - } applyZoomAndPan(elementIDs.sketch, false); @@ -924,17 +935,20 @@ } if (!mainEl) return; - mainEl.addEventListener("click", async () => { - for (const elementID of elementIDs) { - const el = await waitForElement(elementID); - if (!el) break; - applyZoomAndPan(elementID); - } - }, { once: true }); + mainEl.addEventListener( + "click", + async () => { + for (const elementID of elementIDs) { + const el = await waitForElement(elementID); + if (!el) break; + applyZoomAndPan(elementID); + } + }, + { once: true }, + ); }; window.applyZoomAndPan = applyZoomAndPan; // Only 1 elements, argument elementID, for example applyZoomAndPan("#txt2img_controlnet_ControlNet_input_image") window.applyZoomAndPanIntegration = applyZoomAndPanIntegration; // for any extension }); - })(); diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/densepose/densepose.py b/extensions-builtin/forge_legacy_preprocessors/annotator/densepose/densepose.py index bde378de81436f0f33a8bcd94233aaf7f7409e02..5ded6967992886a0b9f5faaf5669d9c193b5b0bb 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/densepose/densepose.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/densepose/densepose.py @@ -2,7 +2,7 @@ from typing import Tuple import math import numpy as np from enum import IntEnum -from typing import List, Tuple, Union +from typing import List, Union import torch from torch.nn import functional as F import logging diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/hed/__init__.py b/extensions-builtin/forge_legacy_preprocessors/annotator/hed/__init__.py index 3bb86953d08f58582334c21600a6e4b5fa7cd031..056059bb74fbaace6e79f55aba480239551f88bd 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/hed/__init__.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/hed/__init__.py @@ -11,7 +11,6 @@ import torch import numpy as np from einops import rearrange -import os from modules import devices from annotator.annotator_path import models_path from annotator.util import safe_step, nms diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/multi_depth_model_woauxi.py b/extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/multi_depth_model_woauxi.py index c989a66829a65b9024c95c2f91af670986fc8675..b80ca086d2d498f42cb62b1aa534d5a806882926 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/multi_depth_model_woauxi.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/multi_depth_model_woauxi.py @@ -2,7 +2,6 @@ from . import network_auxi as network from .net_tools import get_func import torch import torch.nn as nn -from modules import devices class RelDepthModel(nn.Module): diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/util/visualizer.py b/extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/util/visualizer.py index 63c3243b26ec942687dd790ed3589f79f05de3c7..5341a75a484c57b5671052c564c5313c1b0f9e5b 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/util/visualizer.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/util/visualizer.py @@ -1,11 +1,9 @@ -import numpy as np import os import sys import ntpath import time from . import util, html from subprocess import Popen, PIPE -import torch if sys.version_info[0] == 2: diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/blocks.py b/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/blocks.py index 0daef3f3fe2fd4a00610d99f1c9023bfca180243..9415b9873bf75673c5db48e5a85219063dadc633 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/blocks.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/blocks.py @@ -5,7 +5,6 @@ from .vit import ( _make_pretrained_vitb_rn50_384, _make_pretrained_vitl16_384, _make_pretrained_vitb16_384, - forward_vit, ) diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/dpt_depth.py b/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/dpt_depth.py index d877bfda47f9c07ef8d73b46b5f5cd86397933eb..30dc6cce102ac883356206f3dc8b001e62f2eaa9 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/dpt_depth.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/dpt_depth.py @@ -1,10 +1,8 @@ import torch import torch.nn as nn -import torch.nn.functional as F from .base_model import BaseModel from .blocks import ( - FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder, diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/midas_net_custom.py b/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/midas_net_custom.py index b962f80ef884661de289d791a175f4bbfe2c5ac7..f59c3f0d7abaac7e1f5afb4b4673afe0e801406a 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/midas_net_custom.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/midas/midas/midas_net_custom.py @@ -8,7 +8,6 @@ import torch.nn as nn from .base_model import BaseModel from .blocks import ( - FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder, diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/__init__.py b/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/__init__.py index f4a06a45a02beaf25421eafa04b49edc38179894..54e119cdb26500dc45e39d44a266075e2952e70b 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/__init__.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/__init__.py @@ -49,6 +49,6 @@ def apply_mlsd(input_image, thr_v, thr_d): cv2.line( img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1 ) - except Exception as e: + except Exception: pass return img_output[:, :, 0] diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_large.py b/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_large.py index 1751bb40fa04dbc17e1ede3269ee6acf171a91c4..e6bcc6be99d54cf15decacdf278834d9ad9893ad 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_large.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_large.py @@ -1,5 +1,3 @@ -import os -import sys import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_tiny.py b/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_tiny.py index 1d556776603897d279fcad05498016587f233411..af2f4ebd8df5bbded297a18bae6095e161eb4b8a 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_tiny.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/models/mbv2_mlsd_tiny.py @@ -1,5 +1,3 @@ -import os -import sys import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/utils.py b/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/utils.py index 758a4a91d41c6c4b13bbb8581d1e76009dcbfa80..1be5630e39b290499c22231b7305aee0bd678761 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/utils.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/mlsd/utils.py @@ -9,7 +9,6 @@ Copyright 2021-present NAVER Corp. Apache License v2.0 """ -import os import numpy as np import cv2 import torch @@ -648,7 +647,7 @@ def pred_squares( score_array = score_array[sorted_idx] squares = squares[sorted_idx] - except Exception as e: + except Exception: pass """return list diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmcv/ops/fused_bias_leakyrelu.py b/extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmcv/ops/fused_bias_leakyrelu.py index 35c6112ed3c91fb7b0338973f20e37b1cff0d270..496b90d1a54b3d03a0ed34747815d0f5c9f5af15 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmcv/ops/fused_bias_leakyrelu.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmcv/ops/fused_bias_leakyrelu.py @@ -179,7 +179,7 @@ class FusedBiasLeakyReLUFunction(Function): class FusedBiasLeakyReLU(nn.Module): - """Fused bias leaky ReLU. + r"""Fused bias leaky ReLU. This function is introduced in the StyleGAN2: http://arxiv.org/abs/1912.04958 @@ -213,7 +213,7 @@ class FusedBiasLeakyReLU(nn.Module): def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=2**0.5): - """Fused bias leaky ReLU function. + r"""Fused bias leaky ReLU function. This function is introduced in the StyleGAN2: http://arxiv.org/abs/1912.04958 diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmseg/apis/inference.py b/extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmseg/apis/inference.py index 611564a32a2051e1e94e80aab3787bbb58194278..b9a10125367cf3ec4384e981272f585ebce53cae 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmseg/apis/inference.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/mmpkg/mmseg/apis/inference.py @@ -1,4 +1,3 @@ -import matplotlib.pyplot as plt import annotator.mmpkg.mmcv as mmcv import torch from annotator.mmpkg.mmcv.parallel import collate, scatter diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/body.py b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/body.py index 6a7442c65203b09636679908de6a3994b0be53c0..4b6bce22ee5c0d879d421de27de126c0b82de6b1 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/body.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/body.py @@ -1,13 +1,10 @@ import cv2 import numpy as np import math -import time from scipy.ndimage import gaussian_filter import matplotlib.pyplot as plt -import matplotlib import torch -from torchvision import transforms -from typing import NamedTuple, List, Union +from typing import List from . import util from .model import bodypose_model diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/face.py b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/face.py index a61cdb2fe92b5d51c1e8aca6b1387c1378af3b4e..20d43d9cf61e8276bf0664bac1b08f619bfa8e8e 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/face.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/face.py @@ -3,7 +3,6 @@ import numpy as np from torchvision.transforms import ToTensor, ToPILImage import torch import torch.nn.functional as F -import cv2 from . import util from torch.nn import Conv2d, Module, ReLU, MaxPool2d, init diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/hand.py b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/hand.py index 77da5b127389dcea5af24d6cde955499041b4a02..a1cda2f6cd198ba4b360358da1c4ea7fac4fec65 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/hand.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/hand.py @@ -1,11 +1,6 @@ import cv2 -import json import numpy as np -import math -import time from scipy.ndimage import gaussian_filter -import matplotlib.pyplot as plt -import matplotlib import torch from skimage.measure import label diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/model.py b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/model.py index b95479ef1f983774f10d05732f204e6a0f13af24..8f273d1aa71f1060a28aad4346701ec23f05b6f3 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/model.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/model.py @@ -1,7 +1,6 @@ import torch from collections import OrderedDict -import torch import torch.nn as nn diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/types.py b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/types.py index 3136612f8535517de5acf053d9f5851d29bbcdba..45a4f6e5594dac67a1c804a0e3e071941783b7f5 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/types.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/openpose/types.py @@ -1,4 +1,4 @@ -from typing import NamedTuple, List, Optional, Union +from typing import NamedTuple, List, Optional class Keypoint(NamedTuple): diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/pidinet/model.py b/extensions-builtin/forge_legacy_preprocessors/annotator/pidinet/model.py index 74d7a6a7e9515b76ea40275fc84c97a3b275ee34..5ca2445b814afe2ba4da338ed3166c558daddfb5 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/pidinet/model.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/pidinet/model.py @@ -5,8 +5,6 @@ Date: Feb 18, 2021 import math -import cv2 -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/teed/Fsmish.py b/extensions-builtin/forge_legacy_preprocessors/annotator/teed/Fsmish.py index 49e124068f6fd37ec64aaf20fd005f2cea725dcb..69691029aee958e66076a9990f258546ee3c7eaf 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/teed/Fsmish.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/teed/Fsmish.py @@ -7,7 +7,6 @@ Wang, Xueliang, Honge Ren, and Achuan Wang. # import pytorch import torch -import torch.nn.functional as F @torch.jit.script diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/teed/Xsmish.py b/extensions-builtin/forge_legacy_preprocessors/annotator/teed/Xsmish.py index 44ef861168058fb34cff2a35fe9eb4612c9c4635..55eebdbc13cc419ceb3bf86c38450961369d3d14 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/teed/Xsmish.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/teed/Xsmish.py @@ -7,8 +7,6 @@ smish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + sigmoid(x))) """ # import pytorch -import torch -import torch.nn.functional as F from torch import nn # import activation functions diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py index 8caf8e0411e2b8e498c2d5e3ead825582b14b7e2..b4779153964e1502a4eda492c7a95f8e262ceded 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py @@ -2,7 +2,6 @@ import timm import torch.nn as nn -from pathlib import Path from .utils import activations, forward_default, get_activation from ..external.next_vit.classification.nextvit import * diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py index 0d1ebb0af8c075a7fa5514ed095c8de43b4ac13a..4e502740bd42c8b159ca0c0bda0083bf79577a96 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py @@ -5,10 +5,6 @@ from .backbones.beit import ( _make_pretrained_beitl16_512, _make_pretrained_beitl16_384, _make_pretrained_beitb16_384, - forward_beit, -) -from .backbones.swin_common import ( - forward_swin, ) from .backbones.swin2 import ( _make_pretrained_swin2l24_384, @@ -20,13 +16,11 @@ from .backbones.swin import ( ) from .backbones.levit import ( _make_pretrained_levit_384, - forward_levit, ) from .backbones.vit import ( _make_pretrained_vitb_rn50_384, _make_pretrained_vitl16_384, _make_pretrained_vitb16_384, - forward_vit, ) diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py index b962f80ef884661de289d791a175f4bbfe2c5ac7..f59c3f0d7abaac7e1f5afb4b4673afe0e801406a 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py @@ -8,7 +8,6 @@ import torch.nn as nn from .base_model import BaseModel from .blocks import ( - FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder, diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py index 95b426121657682fa3ddec1172f5efdf48ff00a9..8e305085355ee6a92036c769ce1c72e6242bf03a 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py @@ -1,14 +1,11 @@ #!/usr/bin/env python3 from __future__ import print_function -import roslib # roslib.load_manifest('my_package') import sys import rospy import cv2 -import numpy as np -from std_msgs.msg import String from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py index 172a22f55dcc48806a134f0f1b8ed09ae53b983b..dc2fa151dd82dc4d29a149dbcdce73b6fd535e80 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py @@ -1,14 +1,11 @@ #!/usr/bin/env python3 from __future__ import print_function -import roslib # roslib.load_manifest('my_package') import sys import rospy import cv2 -import numpy as np -from std_msgs.msg import String from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py index 036b38b8005904b0444b54746a1a918e71aa4939..556d34dc42c9a7cc270c9f1eb4c4c950f31df1fd 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py @@ -1,13 +1,10 @@ #!/usr/bin/env python3 -import roslib # roslib.load_manifest('my_package') -import sys import rospy import cv2 -from std_msgs.msg import String from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py index ec36c6bbe05f157259677c1c6a87adbe9a860431..68c9005ef8cf753969ad8a85deb5efc1ee05e107 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py @@ -2,16 +2,10 @@ import os import ntpath -import glob import torch -import utils -import cv2 import numpy as np -from torchvision.transforms import Compose, Normalize -from torchvision import transforms from shutil import copyfile -import fileinput import sys sys.path.append(os.getcwd() + "/..") @@ -46,7 +40,6 @@ def restore_file(): modify_file() from midas.midas_net import MidasNet -from midas.transforms import Resize, NormalizeImage, PrepareForNet restore_file() diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_onnx.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_onnx.py index be9686af6f6df13eda9c15c9b30f6f132b6bf0c6..b6f879c76a48bd0d4f9a4c3f46fcbe8435b11c27 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_onnx.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_onnx.py @@ -4,14 +4,12 @@ import os import glob import utils import cv2 -import sys import numpy as np import argparse -import onnx import onnxruntime as rt -from transforms import Resize, NormalizeImage, PrepareForNet +from transforms import Resize, PrepareForNet def run(input_path, output_path, model_path, model_type="large"): diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py index de121302b6677dd81f923d8f91ea1ffcfe3cebbe..ea389c9dd274321be52297d6e382d8f2a9db27ba 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py @@ -8,7 +8,7 @@ import argparse import tensorflow as tf -from transforms import Resize, NormalizeImage, PrepareForNet +from transforms import Resize, PrepareForNet def run(input_path, output_path, model_path, model_type="large"): diff --git a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/utils/misc.py b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/utils/misc.py index 1539abd94a80e11826b4ba5151882e6ec65a9f76..d964915ff698f9e300abc648c998326eb9ba282b 100644 --- a/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/utils/misc.py +++ b/extensions-builtin/forge_legacy_preprocessors/annotator/zoe/zoedepth/utils/misc.py @@ -24,11 +24,7 @@ """Miscellaneous utility functions.""" -from scipy import ndimage -import base64 -import math -import re from io import BytesIO import matplotlib diff --git a/extensions-builtin/forge_legacy_preprocessors/install.py b/extensions-builtin/forge_legacy_preprocessors/install.py index 1ee0ea1f32b45d02f5fa2b9350c2ffa6f358e9a8..01d9dbf672541fe893246345424122a5525df945 100644 --- a/extensions-builtin/forge_legacy_preprocessors/install.py +++ b/extensions-builtin/forge_legacy_preprocessors/install.py @@ -32,7 +32,7 @@ def try_install_from_wheel(pkg_name: str, wheel_url: str): try: launch.run_pip( - f"install -U {wheel_url}", + f"install {wheel_url}", f"Legacy Preprocessor Requirement: {pkg_name}", ) except Exception as e: diff --git a/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor.py b/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor.py index aef174e7786641eb6ba69895b65cef9f28467d93..07459ef3ca2d149af1d7a79f7b3ae05bbf69cbb3 100644 --- a/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor.py +++ b/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor.py @@ -3,7 +3,6 @@ import cv2 import numpy as np import torch import math -import functools from dataclasses import dataclass from transformers.models.clip.modeling_clip import CLIPVisionModelOutput @@ -849,9 +848,9 @@ class InsightFaceModel: img, remove_pad = resize_image_with_pad(img, res) face_info = self.model.get(img) if not face_info: - raise Exception(f"Insightface: No face found in image.") + raise Exception("Insightface: No face found in image.") if len(face_info) > 1: - print("Insightface: More than one face is detected in the image. " f"Only the biggest one will be used.") + print("Insightface: More than one face is detected in the image. " "Only the biggest one will be used.") # only use the maximum face face_info = sorted( face_info, diff --git a/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py b/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py index 64095afdba898d23fc838a1e4a3a3d0f0f5048a6..9e07abb1ece173f0a2456cb16eeeb4d065d1aaa9 100644 --- a/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py +++ b/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py @@ -1,3 +1,5 @@ +import functools + from legacy_preprocessors.preprocessor import * diff --git a/extensions-builtin/sd_forge_controlnet/lib_controlnet/controlnet_ui/controlnet_ui_group.py b/extensions-builtin/sd_forge_controlnet/lib_controlnet/controlnet_ui/controlnet_ui_group.py index 76d64e129dc7e1e3e1672aa2b55b12f8c3dd5e3a..d098ea6b61797a2fde87f8a738b801e83e6ef58a 100644 --- a/extensions-builtin/sd_forge_controlnet/lib_controlnet/controlnet_ui/controlnet_ui_group.py +++ b/extensions-builtin/sd_forge_controlnet/lib_controlnet/controlnet_ui/controlnet_ui_group.py @@ -384,7 +384,7 @@ class ControlNetUiGroup: with gr.Row(elem_classes=["controlnet_control_type", "controlnet_row"]): self.type_filter = gr.Radio( global_state.get_all_preprocessor_tags(), - label=f"Control Type", + label="Control Type", value="All", elem_id=f"{elem_id_tabname}_{tabname}_controlnet_type_filter_radio", elem_classes="controlnet_control_type_filter_group", @@ -420,7 +420,7 @@ class ControlNetUiGroup: with gr.Row(elem_classes=["controlnet_weight_steps", "controlnet_row"]): self.weight = gr.Slider( - label=f"Control Weight", + label="Control Weight", value=self.default_unit.weight, minimum=0.0, maximum=2.0, @@ -960,6 +960,7 @@ class ControlNetUiGroup: @staticmethod def reset(): ControlNetUiGroup.a1111_context = A1111Context() + ControlNetUiGroup.all_callbacks_registered = False ControlNetUiGroup.callbacks_registered = False ControlNetUiGroup.all_ui_groups = [] diff --git a/extensions-builtin/sd_forge_controlnet/lib_controlnet/external_code.py b/extensions-builtin/sd_forge_controlnet/lib_controlnet/external_code.py index 2208dd1ce8469a2f64bdf59b8e21b56b4663b821..d096daeaecb84cd65abe631c0322817ca41682a5 100644 --- a/extensions-builtin/sd_forge_controlnet/lib_controlnet/external_code.py +++ b/extensions-builtin/sd_forge_controlnet/lib_controlnet/external_code.py @@ -128,7 +128,7 @@ def pixel_perfect_resolution( else: estimation = max(k0, k1) * float(min(raw_H, raw_W)) - logger.debug(f"Pixel Perfect Computation:") + logger.debug("Pixel Perfect Computation:") logger.debug(f"resize_mode = {resize_mode}") logger.debug(f"raw_H = {raw_H}") logger.debug(f"raw_W = {raw_W}") diff --git a/extensions-builtin/sd_forge_controlnet/scripts/controlnet.py b/extensions-builtin/sd_forge_controlnet/scripts/controlnet.py index d17c93556ad86d5cd08939e331f39f25a2b7f5f2..e32b2bcfb7c1634cd95c2a9bd3d866c548f4b0c3 100644 --- a/extensions-builtin/sd_forge_controlnet/scripts/controlnet.py +++ b/extensions-builtin/sd_forge_controlnet/scripts/controlnet.py @@ -1,37 +1,28 @@ -from modules import shared, scripts, script_callbacks, masking, images -from modules_forge.supported_controlnet import ControlModelPatcher -from modules_forge.shared import try_load_supported_control_model -from modules_forge.forge_util import HWC3, numpy_to_pytorch -from modules.processing import ( - StableDiffusionProcessingImg2Img, - StableDiffusionProcessingTxt2Img, - StableDiffusionProcessing, -) - -from typing import Optional -from PIL import Image -import gradio as gr -import numpy as np import functools -import torch -import cv2 +from typing import Optional, TYPE_CHECKING -from lib_controlnet import global_state, external_code -from lib_controlnet.external_code import ControlNetUnit -from lib_controlnet.utils import ( - align_dim_latent, - crop_and_resize_image, - judge_image_type, - prepare_mask, - set_numpy_seed, -) +if TYPE_CHECKING: + from modules_forge.supported_preprocessor import Preprocessor +import cv2 +import gradio as gr +import numpy as np +import torch +from lib_controlnet import external_code, global_state +from lib_controlnet.api import controlnet_api from lib_controlnet.controlnet_ui.controlnet_ui_group import ControlNetUiGroup from lib_controlnet.enums import HiResFixOption -from lib_controlnet.api import controlnet_api +from lib_controlnet.external_code import ControlNetUnit from lib_controlnet.infotext import Infotext from lib_controlnet.logging import logger +from lib_controlnet.utils import align_dim_latent, crop_and_resize_image, judge_image_type, prepare_mask, set_numpy_seed +from PIL import Image, ImageOps +from modules import images, masking, script_callbacks, scripts, shared +from modules.processing import StableDiffusionProcessing, StableDiffusionProcessingImg2Img, StableDiffusionProcessingTxt2Img +from modules_forge.forge_util import HWC3, numpy_to_pytorch +from modules_forge.shared import try_load_supported_control_model +from modules_forge.supported_controlnet import ControlModelPatcher global_state.update_controlnet_filenames() @@ -80,9 +71,7 @@ class ControlNetForForgeOfficial(scripts.Script): with gr.Tab(label=f"ControlNet Unit {i + 1}", id=i): group = ControlNetUiGroup(is_img2img, default_unit) ui_groups.append(group) - controls.append( - group.render(f"ControlNet-{i}", elem_id_tabname) - ) + controls.append(group.render(f"ControlNet-{i}", elem_id_tabname)) for i, ui_group in enumerate(ui_groups): infotext.register_unit(i, ui_group) @@ -93,64 +82,36 @@ class ControlNetForForgeOfficial(scripts.Script): return controls - def get_enabled_units(self, units): - # Parse dict from API calls - units = [ - ControlNetUnit.from_dict(unit) if isinstance(unit, dict) else unit - for unit in units - ] + def get_enabled_units(self, units: list[ControlNetUnit]): # Parse dict from API calls + units = [ControlNetUnit.from_dict(unit) if isinstance(unit, dict) else unit for unit in units] assert all(isinstance(unit, ControlNetUnit) for unit in units) enabled_units = [x for x in units if x.enabled] return enabled_units @staticmethod - def try_crop_image_with_a1111_mask( - p: StableDiffusionProcessing, - input_image: np.ndarray, - resize_mode: external_code.ResizeMode, - preprocessor, - ) -> np.ndarray: + def try_crop_image_with_a1111_mask(p: StableDiffusionProcessing, input_image: np.ndarray, resize_mode: external_code.ResizeMode, preprocessor: "Preprocessor") -> np.ndarray: a1111_mask_image: Optional[Image.Image] = getattr(p, "image_mask", None) - is_only_masked_inpaint: bool = ( - issubclass(type(p), StableDiffusionProcessingImg2Img) - and p.inpaint_full_res - and a1111_mask_image is not None - ) + is_only_masked_inpaint: bool = issubclass(type(p), StableDiffusionProcessingImg2Img) and p.inpaint_full_res and a1111_mask_image is not None - if ( - preprocessor.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab - and is_only_masked_inpaint - ): + if preprocessor.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab and is_only_masked_inpaint: logger.info("Crop input image based on A1111 mask.") input_image = [input_image[:, :, i] for i in range(input_image.shape[2])] input_image = [Image.fromarray(x) for x in input_image] mask = prepare_mask(a1111_mask_image, p) - crop_region = masking.get_crop_region( - np.array(mask), p.inpaint_full_res_padding - ) - crop_region = masking.expand_crop_region( - crop_region, p.width, p.height, mask.width, mask.height - ) + crop_region = masking.get_crop_region(np.array(mask), p.inpaint_full_res_padding) + crop_region = masking.expand_crop_region(crop_region, p.width, p.height, mask.width, mask.height) - input_image = [ - images.resize_image(resize_mode.int_value(), i, mask.width, mask.height) - for i in input_image - ] + input_image = [images.resize_image(resize_mode.int_value(), i, mask.width, mask.height) for i in input_image] input_image = [x.crop(crop_region) for x in input_image] - input_image = [ - images.resize_image( - external_code.ResizeMode.OUTER_FIT.int_value(), x, p.width, p.height - ) - for x in input_image - ] + input_image = [images.resize_image(external_code.ResizeMode.OUTER_FIT.int_value(), x, p.width, p.height) for x in input_image] input_image = [np.asarray(x)[:, :, 0] for x in input_image] input_image = np.stack(input_image, axis=2) return input_image - def get_input_data(self, p, unit, preprocessor, h, w): + def get_input_data(self, p: StableDiffusionProcessing, unit: ControlNetUnit, preprocessor: "Preprocessor", h: int, w: int): resize_mode = external_code.resize_mode_from_value(unit.resize_mode) image_list = [] @@ -159,6 +120,9 @@ class ControlNetForForgeOfficial(scripts.Script): a1111_i2i_image = getattr(p, "init_images", [None])[0] a1111_i2i_mask = getattr(p, "image_mask", None) + if a1111_i2i_mask is not None and getattr(p, "inpainting_mask_invert", False): + a1111_i2i_mask = ImageOps.invert(a1111_i2i_mask) + using_a1111_data = False if unit.image is None: @@ -198,16 +162,11 @@ class ControlNetForForgeOfficial(scripts.Script): (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST, ) - mask = self.try_crop_image_with_a1111_mask( - p, mask, resize_mode, preprocessor - ) + mask = self.try_crop_image_with_a1111_mask(p, mask, resize_mode, preprocessor) image_list = [[image, mask]] - if ( - resize_mode == external_code.ResizeMode.OUTER_FIT - and preprocessor.expand_mask_when_resize_and_fill - ): + if resize_mode == external_code.ResizeMode.OUTER_FIT and preprocessor.expand_mask_when_resize_and_fill: new_image_list = [] for input_image, input_mask in image_list: if input_mask is None: @@ -232,16 +191,12 @@ class ControlNetForForgeOfficial(scripts.Script): return image_list, resize_mode @staticmethod - def get_target_dimensions( - p: StableDiffusionProcessing, - ) -> tuple[int, int, int, int]: + def get_target_dimensions(p: StableDiffusionProcessing) -> tuple[int, int, int, int]: """Returns (h, w, hr_h, hr_w).""" h = align_dim_latent(p.height) w = align_dim_latent(p.width) - high_res_fix = getattr(p, "enable_hr", False) and isinstance( - p, StableDiffusionProcessingTxt2Img - ) + high_res_fix = getattr(p, "enable_hr", False) and isinstance(p, StableDiffusionProcessingTxt2Img) if high_res_fix: if p.hr_resize_x == 0 and p.hr_resize_y == 0: @@ -258,20 +213,11 @@ class ControlNetForForgeOfficial(scripts.Script): return h, w, hr_y, hr_x @torch.no_grad() - def process_unit_after_click_generate( - self, - p: StableDiffusionProcessing, - unit: ControlNetUnit, - params: ControlNetCachedParameters, - *args, - **kwargs, - ) -> bool: + def process_unit_after_click_generate(self, p: StableDiffusionProcessing, unit: ControlNetUnit, params: ControlNetCachedParameters, *args, **kwargs) -> bool: h, w, hr_y, hr_x = self.get_target_dimensions(p) - has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr( - p, "enable_hr", False - ) + has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(p, "enable_hr", False) if unit.use_preview_as_input: unit.module = "None" @@ -322,9 +268,7 @@ class ControlNetForForgeOfficial(scripts.Script): control_masks.append(input_mask) if len(input_list) > 1 and not preprocessor_output_is_image: - logger.info( - "Batch wise input only support controlnet, control-lora, and t2i adapters!" - ) + logger.info("Batch wise input only support controlnet, control-lora, and t2i adapters!") break if has_high_res_fix: @@ -335,14 +279,7 @@ class ControlNetForForgeOfficial(scripts.Script): alignment_indices = [i % len(preprocessor_outputs) for i in range(p.batch_size)] def attach_extra_result_image(img: np.ndarray, is_high_res: bool = False): - if ( - not shared.opts.data.get("control_net_no_detectmap", False) - and ( - (is_high_res and hr_option.high_res_enabled) - or (not is_high_res and hr_option.low_res_enabled) - ) - and unit.save_detected_map - ): + if not shared.opts.data.get("control_net_no_detectmap", False) and ((is_high_res and hr_option.high_res_enabled) or (not is_high_res and hr_option.low_res_enabled)) and unit.save_detected_map: p.extra_result_images.append(img) if preprocessor_output_is_image: @@ -350,35 +287,21 @@ class ControlNetForForgeOfficial(scripts.Script): params.control_cond_for_hr_fix = [] for preprocessor_output in preprocessor_outputs: - control_cond = crop_and_resize_image( - preprocessor_output, resize_mode, h, w - ) - attach_extra_result_image( - external_code.visualize_inpaint_mask(control_cond) - ) - params.control_cond.append( - numpy_to_pytorch(control_cond).movedim(-1, 1) - ) + control_cond = crop_and_resize_image(preprocessor_output, resize_mode, h, w) + attach_extra_result_image(external_code.visualize_inpaint_mask(control_cond)) + params.control_cond.append(numpy_to_pytorch(control_cond).movedim(-1, 1)) - params.control_cond = torch.cat(params.control_cond, dim=0)[ - alignment_indices - ].contiguous() + params.control_cond = torch.cat(params.control_cond, dim=0)[alignment_indices].contiguous() if has_high_res_fix: for preprocessor_output in preprocessor_outputs: - control_cond_for_hr_fix = crop_and_resize_image( - preprocessor_output, resize_mode, hr_y, hr_x - ) + control_cond_for_hr_fix = crop_and_resize_image(preprocessor_output, resize_mode, hr_y, hr_x) attach_extra_result_image( external_code.visualize_inpaint_mask(control_cond_for_hr_fix), is_high_res=True, ) - params.control_cond_for_hr_fix.append( - numpy_to_pytorch(control_cond_for_hr_fix).movedim(-1, 1) - ) - params.control_cond_for_hr_fix = torch.cat( - params.control_cond_for_hr_fix, dim=0 - )[alignment_indices].contiguous() + params.control_cond_for_hr_fix.append(numpy_to_pytorch(control_cond_for_hr_fix).movedim(-1, 1)) + params.control_cond_for_hr_fix = torch.cat(params.control_cond_for_hr_fix, dim=0)[alignment_indices].contiguous() else: params.control_cond_for_hr_fix = params.control_cond else: @@ -392,30 +315,20 @@ class ControlNetForForgeOfficial(scripts.Script): for input_mask in control_masks: fill_border = preprocessor.fill_mask_with_one_when_resize_and_fill - control_mask = crop_and_resize_image( - input_mask, resize_mode, h, w, fill_border - ) + control_mask = crop_and_resize_image(input_mask, resize_mode, h, w, fill_border) attach_extra_result_image(control_mask) control_mask = numpy_to_pytorch(control_mask).movedim(-1, 1)[:, :1] params.control_mask.append(control_mask) if has_high_res_fix: - control_mask_for_hr_fix = crop_and_resize_image( - input_mask, resize_mode, hr_y, hr_x, fill_border - ) + control_mask_for_hr_fix = crop_and_resize_image(input_mask, resize_mode, hr_y, hr_x, fill_border) attach_extra_result_image(control_mask_for_hr_fix, is_high_res=True) - control_mask_for_hr_fix = numpy_to_pytorch( - control_mask_for_hr_fix - ).movedim(-1, 1)[:, :1] + control_mask_for_hr_fix = numpy_to_pytorch(control_mask_for_hr_fix).movedim(-1, 1)[:, :1] params.control_mask_for_hr_fix.append(control_mask_for_hr_fix) - params.control_mask = torch.cat(params.control_mask, dim=0)[ - alignment_indices - ].contiguous() + params.control_mask = torch.cat(params.control_mask, dim=0)[alignment_indices].contiguous() if has_high_res_fix: - params.control_mask_for_hr_fix = torch.cat( - params.control_mask_for_hr_fix, dim=0 - )[alignment_indices].contiguous() + params.control_mask_for_hr_fix = torch.cat(params.control_mask_for_hr_fix, dim=0)[alignment_indices].contiguous() else: params.control_mask_for_hr_fix = params.control_mask @@ -434,31 +347,18 @@ class ControlNetForForgeOfficial(scripts.Script): params.preprocessor = preprocessor - params.preprocessor.process_after_running_preprocessors( - process=p, params=params, **kwargs - ) - params.model.process_after_running_preprocessors( - process=p, params=params, **kwargs - ) + params.preprocessor.process_after_running_preprocessors(process=p, params=params, **kwargs) + params.model.process_after_running_preprocessors(process=p, params=params, **kwargs) logger.info(f"{type(params.model).__name__}: {model_filename}") return True @torch.no_grad() - def process_unit_before_every_sampling( - self, - p: StableDiffusionProcessing, - unit: ControlNetUnit, - params: ControlNetCachedParameters, - *args, - **kwargs, - ): + def process_unit_before_every_sampling(self, p: StableDiffusionProcessing, unit: ControlNetUnit, params: ControlNetCachedParameters, *args, **kwargs): is_hr_pass = getattr(p, "is_hr_pass", False) - has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr( - p, "enable_hr", False - ) + has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(p, "enable_hr", False) if has_high_res_fix: hr_option = HiResFixOption.from_value(unit.hr_option) @@ -466,11 +366,11 @@ class ControlNetForForgeOfficial(scripts.Script): hr_option = HiResFixOption.BOTH if has_high_res_fix and is_hr_pass and (not hr_option.high_res_enabled): - logger.info(f"ControlNet Skipped High-res pass.") + logger.info("ControlNet Skipped High-res pass.") return if has_high_res_fix and (not is_hr_pass) and (not hr_option.low_res_enabled): - logger.info(f"ControlNet Skipped Low-res pass.") + logger.info("ControlNet Skipped Low-res pass.") return if is_hr_pass: @@ -543,16 +443,13 @@ class ControlNetForForgeOfficial(scripts.Script): params.model.positive_advanced_weighting = soft_weighting.copy() params.model.negative_advanced_weighting = soft_weighting.copy() - cond, mask = params.preprocessor.process_before_every_sampling( - p, cond, mask, *args, **kwargs - ) + cond, mask = params.preprocessor.process_before_every_sampling(p, cond, mask, *args, **kwargs) params.model.advanced_mask_weighting = mask params.model.process_before_every_sampling(p, cond, mask, *args, **kwargs) logger.info(f"ControlNet Method {params.preprocessor.name} patched.") - return @staticmethod def bound_check_params(unit: ControlNetUnit) -> None: @@ -567,35 +464,16 @@ class ControlNetForForgeOfficial(scripts.Script): preprocessor = global_state.get_preprocessor(unit.module) if unit.processor_res < 0: - unit.processor_res = int( - preprocessor.slider_resolution.gradio_update_kwargs.get("value", 512) - ) - + unit.processor_res = int(preprocessor.slider_resolution.gradio_update_kwargs.get("value", 512)) if unit.threshold_a < 0: - unit.threshold_a = int( - preprocessor.slider_1.gradio_update_kwargs.get("value", 1.0) - ) - + unit.threshold_a = int(preprocessor.slider_1.gradio_update_kwargs.get("value", 1.0)) if unit.threshold_b < 0: - unit.threshold_b = int( - preprocessor.slider_2.gradio_update_kwargs.get("value", 1.0) - ) - - return + unit.threshold_b = int(preprocessor.slider_2.gradio_update_kwargs.get("value", 1.0)) @torch.no_grad() - def process_unit_after_every_sampling( - self, - p: StableDiffusionProcessing, - unit: ControlNetUnit, - params: ControlNetCachedParameters, - *args, - **kwargs, - ): - + def process_unit_after_every_sampling(self, p: StableDiffusionProcessing, unit: ControlNetUnit, params: ControlNetCachedParameters, *args, **kwargs): params.preprocessor.process_after_every_sampling(p, params, *args, **kwargs) params.model.process_after_every_sampling(p, params, *args, **kwargs) - return @torch.no_grad() def process(self, p, *args, **kwargs): @@ -614,19 +492,15 @@ class ControlNetForForgeOfficial(scripts.Script): if i not in self.current_params: logger.warning(f"ControlNet Unit {i + 1} is skipped...") continue - self.process_unit_before_every_sampling( - p, unit, self.current_params[i], *args, **kwargs - ) + self.process_unit_before_every_sampling(p, unit, self.current_params[i], *args, **kwargs) @torch.no_grad() def postprocess_batch_list(self, p, pp, *args, **kwargs): for i, unit in enumerate(self.get_enabled_units(args)): if i in self.current_params: - self.process_unit_after_every_sampling( - p, unit, self.current_params[i], pp, *args, **kwargs - ) + self.process_unit_after_every_sampling(p, unit, self.current_params[i], pp, *args, **kwargs) - def postprocess(self, p, processed, *args): + def postprocess(self, *args): self.current_params = {} @@ -689,4 +563,6 @@ script_callbacks.on_ui_settings(on_ui_settings) script_callbacks.on_infotext_pasted(Infotext.on_infotext_pasted) script_callbacks.on_after_component(ControlNetUiGroup.on_after_component) script_callbacks.on_before_reload(ControlNetUiGroup.reset) -script_callbacks.on_app_started(controlnet_api) + +if shared.cmd_opts.api: + script_callbacks.on_app_started(controlnet_api) diff --git a/extensions-builtin/sd_forge_multidiffusion/lib_multidiffusion/tiled_diffusion.py b/extensions-builtin/sd_forge_multidiffusion/lib_multidiffusion/tiled_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..b579afca0a872b5a0e07fdd33ac4516247ac3602 --- /dev/null +++ b/extensions-builtin/sd_forge_multidiffusion/lib_multidiffusion/tiled_diffusion.py @@ -0,0 +1,539 @@ +# 1st Edit by. https://github.com/shiimizu/ComfyUI-TiledDiffusion +# 2nd Edit by. Forge Official +# 3rd Edit by. Panchovix +# 4th Edit by. Haoming02 +# - Based on: https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111 + +from enum import Enum +from typing import Callable, Final, Union + +import numpy as np +import torch +from numpy import exp, pi, sqrt +from torch import Tensor + +from ldm_patched.modules.controlnet import ControlNet, T2IAdapter +from ldm_patched.modules.model_base import BaseModel +from ldm_patched.modules.model_management import current_loaded_models, get_torch_device, load_models_gpu +from ldm_patched.modules.model_patcher import ModelPatcher +from ldm_patched.modules.utils import common_upscale + +opt_C: Final[int] = 4 +opt_f: Final[int] = 8 +device: Final[torch.device] = get_torch_device() + + +class BlendMode(Enum): + FOREGROUND = "Foreground" + BACKGROUND = "Background" + + +class BBox: + def __init__(self, x: int, y: int, w: int, h: int): + self.x = x + self.y = y + self.w = w + self.h = h + self.box = [x, y, x + w, y + h] + self.slicer = slice(None), slice(None), slice(y, y + h), slice(x, x + w) + + def __getitem__(self, idx: int) -> int: + return self.box[idx] + + +def processing_interrupted(): + from modules import shared + + return shared.state.interrupted or shared.state.skipped + + +def ceildiv(big: int, small: int) -> int: + return -(big // -small) + + +def repeat_to_batch_size(tensor: torch.Tensor, batch_size: int, dim: int = 0): + if dim == 0 and tensor.shape[dim] == 1: + return tensor.expand([batch_size] + [-1] * (len(tensor.shape) - 1)) + if tensor.shape[dim] > batch_size: + return tensor.narrow(dim, 0, batch_size) + elif tensor.shape[dim] < batch_size: + return tensor.repeat(dim * [1] + [ceildiv(batch_size, tensor.shape[dim])] + [1] * (len(tensor.shape) - 1 - dim)).narrow(dim, 0, batch_size) + return tensor + + +def split_bboxes(w: int, h: int, tile_w: int, tile_h: int, overlap: int = 16, init_weight: Union[Tensor, float] = 1.0) -> tuple[list[BBox], Tensor]: + cols = ceildiv((w - overlap), (tile_w - overlap)) + rows = ceildiv((h - overlap), (tile_h - overlap)) + dx = (w - tile_w) / (cols - 1) if cols > 1 else 0 + dy = (h - tile_h) / (rows - 1) if rows > 1 else 0 + + bbox_list: list[BBox] = [] + weight = torch.zeros((1, 1, h, w), device=device, dtype=torch.float32) + for row in range(rows): + y = min(int(row * dy), h - tile_h) + for col in range(cols): + x = min(int(col * dx), w - tile_w) + + bbox = BBox(x, y, tile_w, tile_h) + bbox_list.append(bbox) + weight[bbox.slicer] += init_weight + + return bbox_list, weight + + +class AbstractDiffusion: + def __init__(self): + self.method = self.__class__.__name__ + + self.w: int = 0 + self.h: int = 0 + self.tile_width: int = None + self.tile_height: int = None + self.tile_overlap: int = None + self.tile_batch_size: int = None + + self.x_buffer: Tensor = None + self._weights: Tensor = None + self._init_grid_bbox = None + self._init_done = None + + self.step_count = 0 + self.inner_loop_count = 0 + self.kdiff_step = -1 + + self.enable_grid_bbox: bool = False + self.tile_w: int = None + self.tile_h: int = None + self.tile_bs: int = None + self.num_tiles: int = None + self.num_batches: int = None + self.batched_bboxes: list[list[BBox]] = [] + + self.enable_controlnet: bool = False + self.control_tensor_batch_dict = {} + self.control_tensor_batch: list[list[Tensor]] = [[]] + self.control_params: dict[tuple, list[list[Tensor]]] = {} + self.control_tensor_cpu: bool = False + self.control_tensor_custom: list[list[Tensor]] = [] + + self.refresh = False + self.weights = None + + def reset(self): + tile_width = self.tile_width + tile_height = self.tile_height + tile_overlap = self.tile_overlap + tile_batch_size = self.tile_batch_size + compression = self.compression + width = self.width + height = self.height + overlap = self.overlap + self.__init__() + self.compression = compression + self.width = width + self.height = height + self.overlap = overlap + self.tile_width = tile_width + self.tile_height = tile_height + self.tile_overlap = tile_overlap + self.tile_batch_size = tile_batch_size + + def repeat_tensor(self, x: Tensor, n: int, concat=False, concat_to=0) -> Tensor: + """repeat the tensor on it's first dim""" + if n == 1: + return x + B = x.shape[0] + r_dims = len(x.shape) - 1 + if B == 1: + shape = [n] + [-1] * r_dims + return x.expand(shape) + else: + if concat: + return torch.cat([x for _ in range(n)], dim=0)[:concat_to] + shape = [n] + [1] * r_dims + return x.repeat(shape) + + def reset_buffer(self, x_in: Tensor): + if self.x_buffer is None or self.x_buffer.shape != x_in.shape: + self.x_buffer = torch.zeros_like(x_in, device=x_in.device, dtype=x_in.dtype) + else: + self.x_buffer.zero_() + + def init_grid_bbox(self, tile_w: int, tile_h: int, overlap: int, tile_bs: int): + self.weights = torch.zeros((1, 1, self.h, self.w), device=device, dtype=torch.float32) + self.enable_grid_bbox = True + + self.tile_w = min(tile_w, self.w) + self.tile_h = min(tile_h, self.h) + overlap = max(0, min(overlap, min(tile_w, tile_h) - 4)) + bboxes, weights = split_bboxes(self.w, self.h, self.tile_w, self.tile_h, overlap, self.get_tile_weights()) + self.weights += weights + self.num_tiles = len(bboxes) + self.num_batches = ceildiv(self.num_tiles, tile_bs) + self.tile_bs = ceildiv(len(bboxes), self.num_batches) + self.batched_bboxes = [bboxes[i * self.tile_bs : (i + 1) * self.tile_bs] for i in range(self.num_batches)] + + def get_grid_bbox(self, tile_w: int, tile_h: int, overlap: int, tile_bs: int, w: int, h: int, device: torch.device, get_tile_weights: Callable = lambda: 1.0) -> list[list[BBox]]: + weights = torch.zeros((1, 1, h, w), device=device, dtype=torch.float32) + + tile_w = min(tile_w, w) + tile_h = min(tile_h, h) + overlap = max(0, min(overlap, min(tile_w, tile_h) - 4)) + bboxes, weights_ = split_bboxes(w, h, tile_w, tile_h, overlap, get_tile_weights()) + weights += weights_ + num_tiles = len(bboxes) + num_batches = ceildiv(num_tiles, tile_bs) + tile_bs = ceildiv(len(bboxes), num_batches) + batched_bboxes = [bboxes[i * tile_bs : (i + 1) * tile_bs] for i in range(num_batches)] + return batched_bboxes + + def get_tile_weights(self) -> Union[Tensor, float]: + return 1.0 + + def init_noise_inverse(self, steps: int, retouch: float, get_cache_callback, set_cache_callback, renoise_strength: float, renoise_kernel: int): + self.noise_inverse_enabled = True + self.noise_inverse_steps = steps + self.noise_inverse_retouch = float(retouch) + self.noise_inverse_renoise_strength = float(renoise_strength) + self.noise_inverse_renoise_kernel = int(renoise_kernel) + self.noise_inverse_set_cache = set_cache_callback + self.noise_inverse_get_cache = get_cache_callback + + def init_done(self): + """ + Call this after all `init_*`, settings are done, now perform: + - settings sanity check + - pre-computations, cache init + - anything thing needed before denoising starts + """ + + self.total_bboxes = 0 + if self.enable_grid_bbox: + self.total_bboxes += self.num_batches + assert self.total_bboxes > 0, "Nothing to paint! No background to draw and no custom bboxes were provided." + + def prepare_controlnet_tensors(self, refresh: bool = False, tensor=None): + """Crop the control tensor into tiles and cache them""" + if not refresh: + if self.control_tensor_batch is not None or self.control_params is not None: + return + tensors = [tensor] + self.org_control_tensor_batch = tensors + self.control_tensor_batch = [] + for i in range(len(tensors)): + control_tile_list = [] + control_tensor = tensors[i] + for bboxes in self.batched_bboxes: + single_batch_tensors = [] + for bbox in bboxes: + if len(control_tensor.shape) == 3: + control_tensor.unsqueeze_(0) + control_tile = control_tensor[:, :, bbox[1] * opt_f : bbox[3] * opt_f, bbox[0] * opt_f : bbox[2] * opt_f] + single_batch_tensors.append(control_tile) + control_tile = torch.cat(single_batch_tensors, dim=0) + if self.control_tensor_cpu: + control_tile = control_tile.cpu() + control_tile_list.append(control_tile) + self.control_tensor_batch.append(control_tile_list) + + def switch_controlnet_tensors(self, batch_id: int, x_batch_size: int, tile_batch_size: int, is_denoise=False): + if self.control_tensor_batch is None: + return + + for param_id in range(len(self.control_tensor_batch)): + control_tile = self.control_tensor_batch[param_id][batch_id] + if x_batch_size > 1: + all_control_tile = [] + for i in range(tile_batch_size): + this_control_tile = [control_tile[i].unsqueeze(0)] * x_batch_size + all_control_tile.append(torch.cat(this_control_tile, dim=0)) + control_tile = torch.cat(all_control_tile, dim=0) + self.control_tensor_batch[param_id][batch_id] = control_tile + + def process_controlnet(self, x_noisy, c_in: dict, cond_or_uncond: list, bboxes, batch_size: int, batch_id: int, shifts=None, shift_condition=None): + control: ControlNet = c_in["control"] + param_id = -1 + tuple_key = tuple(cond_or_uncond) + tuple(x_noisy.shape) + while control is not None: + param_id += 1 + + if tuple_key not in self.control_params: + self.control_params[tuple_key] = [[None]] + + while len(self.control_params[tuple_key]) <= param_id: + self.control_params[tuple_key].append([None]) + + while len(self.control_params[tuple_key][param_id]) <= batch_id: + self.control_params[tuple_key][param_id].append(None) + + if self.refresh or control.cond_hint is None or not isinstance(self.control_params[tuple_key][param_id][batch_id], Tensor): + if control.cond_hint is not None: + del control.cond_hint + control.cond_hint = None + compression_ratio = control.compression_ratio + if control.vae is not None: + compression_ratio *= control.vae.downscale_ratio + else: + if control.latent_format is not None: + raise ValueError("This Controlnet needs a VAE but none was provided, please use a ControlNetApply node with a VAE input and connect it.") + PH, PW = self.h * compression_ratio, self.w * compression_ratio + + device = getattr(control, "device", x_noisy.device) + dtype = getattr(control, "manual_cast_dtype", None) + if dtype is None: + dtype = getattr(getattr(control, "control_model", None), "dtype", None) + if dtype is None: + dtype = x_noisy.dtype + + if isinstance(control, T2IAdapter): + width, height = control.scale_image_to(PW, PH) + cns = common_upscale(control.cond_hint_original, width, height, control.upscale_algorithm, "center").float().to(device=device) + if control.channels_in == 1 and control.cond_hint.shape[1] > 1: + cns = torch.mean(control.cond_hint, 1, keepdim=True) + elif control.__class__.__name__ == "ControlLLLiteAdvanced": + if getattr(control, "sub_idxs", None) is not None and control.cond_hint_original.shape[0] >= control.full_latent_length: + cns = common_upscale(control.cond_hint_original[control.sub_idxs], PW, PH, control.upscale_algorithm, "center").to(dtype=dtype, device=device) + else: + cns = common_upscale(control.cond_hint_original, PW, PH, control.upscale_algorithm, "center").to(dtype=dtype, device=device) + else: + cns = common_upscale(control.cond_hint_original, PW, PH, control.upscale_algorithm, "center").to(dtype=dtype, device=device) + if getattr(control, "vae", None) is not None: + loaded_models_ = current_loaded_models(only_currently_used=True) + cns = control.vae.encode(cns.movedim(1, -1)) + load_models_gpu(loaded_models_) + if getattr(control, "latent_format", None) is not None: + cns = control.latent_format.process_in(cns) + if len(getattr(control, "extra_concat_orig", ())) > 0: + to_concat = [] + for c in control.extra_concat_orig: + c = c.to(device=device) + c = common_upscale(c, cns.shape[3], cns.shape[2], control.upscale_algorithm, "center") + to_concat.append(repeat_to_batch_size(c, cns.shape[0])) + cns = torch.cat([cns] + to_concat, dim=1) + + cns = cns.to(device=device, dtype=dtype) + cf = control.compression_ratio + if cns.shape[0] != batch_size: + cns = repeat_to_batch_size(cns, batch_size) + if shifts is not None: + control.cns = cns + sh_h, sh_w = shifts + sh_h *= cf + sh_w *= cf + if (sh_h, sh_w) != (0, 0): + if sh_h == 0 or sh_w == 0: + cns = control.cns.roll(shifts=(sh_h, sh_w), dims=(-2, -1)) + else: + if shift_condition: + cns = control.cns.roll(shifts=sh_h, dims=-2) + else: + cns = control.cns.roll(shifts=sh_w, dims=-1) + cns_slices = [cns[:, :, bbox[1] * cf : bbox[3] * cf, bbox[0] * cf : bbox[2] * cf] for bbox in bboxes] + control.cond_hint = torch.cat(cns_slices, dim=0).to(device=cns.device) + del cns_slices + del cns + self.control_params[tuple_key][param_id][batch_id] = control.cond_hint + else: + if hasattr(control, "cns") and shifts is not None: + cf = control.compression_ratio + cns = control.cns + sh_h, sh_w = shifts + sh_h *= cf + sh_w *= cf + if (sh_h, sh_w) != (0, 0): + if sh_h == 0 or sh_w == 0: + cns = control.cns.roll(shifts=(sh_h, sh_w), dims=(-2, -1)) + else: + if shift_condition: + cns = control.cns.roll(shifts=sh_h, dims=-2) + else: + cns = control.cns.roll(shifts=sh_w, dims=-1) + cns_slices = [cns[:, :, bbox[1] * cf : bbox[3] * cf, bbox[0] * cf : bbox[2] * cf] for bbox in bboxes] + control.cond_hint = torch.cat(cns_slices, dim=0).to(device=cns.device) + del cns_slices + del cns + else: + control.cond_hint = self.control_params[tuple_key][param_id][batch_id] + control = control.previous_controlnet + + +class MultiDiffusion(AbstractDiffusion): + + @torch.inference_mode() + def __call__(self, model_function: BaseModel.apply_model, args: dict): + x_in: Tensor = args["input"] + t_in: Tensor = args["timestep"] + c_in: dict = args["c"] + cond_or_uncond: list = args["cond_or_uncond"] + + N, C, H, W = x_in.shape + + self.refresh = False + if self.weights is None or self.h != H or self.w != W: + self.h, self.w = H, W + self.refresh = True + self.init_grid_bbox(self.tile_width, self.tile_height, self.tile_overlap, self.tile_batch_size) + self.init_done() + self.h, self.w = H, W + self.reset_buffer(x_in) + + for batch_id, bboxes in enumerate(self.batched_bboxes): + if processing_interrupted(): + return x_in + + x_tile = torch.cat([x_in[bbox.slicer] for bbox in bboxes], dim=0) + t_tile = repeat_to_batch_size(t_in, x_tile.shape[0]) + c_tile = {} + for k, v in c_in.items(): + if isinstance(v, torch.Tensor): + if len(v.shape) == len(x_tile.shape): + bboxes_ = bboxes + if v.shape[-2:] != x_in.shape[-2:]: + cf = x_in.shape[-1] * self.compression // v.shape[-1] + bboxes_ = self.get_grid_bbox( + self.width // cf, + self.height // cf, + self.overlap // cf, + self.tile_batch_size, + v.shape[-1], + v.shape[-2], + x_in.device, + self.get_tile_weights, + ) + v = torch.cat([v[bbox_.slicer] for bbox_ in bboxes_[batch_id]]) + if v.shape[0] != x_tile.shape[0]: + v = repeat_to_batch_size(v, x_tile.shape[0]) + c_tile[k] = v + + if "control" in c_in: + self.process_controlnet(x_tile, c_in, cond_or_uncond, bboxes, N, batch_id) + c_tile["control"] = c_in["control"].get_control_orig(x_tile, t_tile, c_tile, len(cond_or_uncond)) + + x_tile_out = model_function(x_tile, t_tile, **c_tile) + + for i, bbox in enumerate(bboxes): + self.x_buffer[bbox.slicer] += x_tile_out[i * N : (i + 1) * N, :, :, :] + del x_tile_out, x_tile, t_tile, c_tile + + return torch.where(self.weights > 1, self.x_buffer / self.weights, self.x_buffer) + + +class MixtureOfDiffusers(AbstractDiffusion): + """ + Mixture-of-Diffusers Implementation + https://github.com/albarji/mixture-of-diffusers + """ + + def init_done(self): + super().init_done() + self.rescale_factor = 1 / self.weights + + @staticmethod + def get_weight(tile_w: int, tile_h: int) -> Tensor: + """ + Copy from the original implementation of Mixture of Diffusers + https://github.com/albarji/mixture-of-diffusers/blob/master/mixdiff/tiling.py + This generates gaussian weights to smooth the noise of each tile. + This is critical for this method to work. + """ + f = lambda x, midpoint, var=0.01: exp(-(x - midpoint) * (x - midpoint) / (tile_w * tile_w) / (2 * var)) / sqrt(2 * pi * var) + x_probs = [f(x, (tile_w - 1) / 2) for x in range(tile_w)] + y_probs = [f(y, tile_h / 2) for y in range(tile_h)] + + w = np.outer(y_probs, x_probs) + return torch.from_numpy(w).to(device, dtype=torch.float32) + + def get_tile_weights(self) -> Tensor: + self.tile_weights = self.get_weight(self.tile_w, self.tile_h) + return self.tile_weights + + @torch.inference_mode() + def __call__(self, model_function: BaseModel.apply_model, args: dict): + x_in: Tensor = args["input"] + t_in: Tensor = args["timestep"] + c_in: dict = args["c"] + cond_or_uncond: list = args["cond_or_uncond"] + + N, C, H, W = x_in.shape + + self.refresh = False + if self.weights is None or self.h != H or self.w != W: + self.h, self.w = H, W + self.refresh = True + self.init_grid_bbox(self.tile_width, self.tile_height, self.tile_overlap, self.tile_batch_size) + self.init_done() + self.h, self.w = H, W + self.reset_buffer(x_in) + + for batch_id, bboxes in enumerate(self.batched_bboxes): + if processing_interrupted(): + return x_in + x_tile_list = [] + for bbox in bboxes: + x_tile_list.append(x_in[bbox.slicer]) + + x_tile = torch.cat(x_tile_list, dim=0) + t_tile = repeat_to_batch_size(t_in, x_tile.shape[0]) + c_tile = {} + for k, v in c_in.items(): + if isinstance(v, torch.Tensor): + if len(v.shape) == len(x_tile.shape): + bboxes_ = bboxes + if v.shape[-2:] != x_in.shape[-2:]: + cf = x_in.shape[-1] * self.compression // v.shape[-1] + bboxes_ = self.get_grid_bbox( + (tile_w := self.width // cf), + (tile_h := self.height // cf), + self.overlap // cf, + self.tile_batch_size, + v.shape[-1], + v.shape[-2], + x_in.device, + lambda: self.get_weight(tile_w, tile_h), + ) + v = torch.cat([v[bbox_.slicer] for bbox_ in bboxes_[batch_id]]) + if v.shape[0] != x_tile.shape[0]: + v = repeat_to_batch_size(v, x_tile.shape[0]) + c_tile[k] = v + + if "control" in c_in: + self.process_controlnet(x_tile, c_in, cond_or_uncond, bboxes, N, batch_id) + c_tile["control"] = c_in["control"].get_control_orig(x_tile, t_tile, c_tile, len(cond_or_uncond)) + + x_tile_out = model_function(x_tile, t_tile, **c_tile) + + for i, bbox in enumerate(bboxes): + w = self.tile_weights * self.rescale_factor[bbox.slicer] + self.x_buffer[bbox.slicer] += x_tile_out[i * N : (i + 1) * N, :, :, :] * w + del x_tile_out, x_tile, t_tile, c_tile + + return self.x_buffer + + +class TiledDiffusion: + + @staticmethod + def apply(model: ModelPatcher, method: str, tile_width: int, tile_height: int, tile_overlap: int, tile_batch_size: int): + match method: + case "MultiDiffusion": + impl = MultiDiffusion() + case "Mixture of Diffusers": + impl = MixtureOfDiffusers() + case _: + raise SystemError + + compression = 8 + impl.tile_width = tile_width // compression + impl.tile_height = tile_height // compression + impl.tile_overlap = tile_overlap // compression + impl.tile_batch_size = tile_batch_size + + impl.compression = compression + impl.width = tile_width + impl.height = tile_height + impl.overlap = tile_overlap + + model = model.clone() + model.set_model_unet_function_wrapper(impl) + + return model diff --git a/extensions-builtin/sd_forge_multidiffusion/scripts/forge_multidiffusion.py b/extensions-builtin/sd_forge_multidiffusion/scripts/forge_multidiffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..442656c377511f0a8392c29674e66276155b824e --- /dev/null +++ b/extensions-builtin/sd_forge_multidiffusion/scripts/forge_multidiffusion.py @@ -0,0 +1,46 @@ +import gradio as gr +from lib_multidiffusion.tiled_diffusion import TiledDiffusion + +from modules import scripts +from modules.ui_components import InputAccordion + + +class MultiDiffusionForForge(scripts.Script): + sorting_priority = 16 + + def title(self): + return "MultiDiffusion Integrated" + + def show(self, is_img2img): + return scripts.AlwaysVisible if is_img2img else None + + def ui(self, *args, **kwargs): + with InputAccordion(False, label=self.title()) as enabled: + method = gr.Radio(label="Method", choices=("MultiDiffusion", "Mixture of Diffusers"), value="Mixture of Diffusers") + with gr.Row(): + tile_width = gr.Slider(label="Tile Width", minimum=256, maximum=2048, step=64, value=768) + tile_height = gr.Slider(label="Tile Height", minimum=256, maximum=2048, step=64, value=768) + with gr.Row(): + tile_overlap = gr.Slider(label="Tile Overlap", minimum=0, maximum=1024, step=16, value=64) + tile_batch_size = gr.Slider(label="Tile Batch Size", minimum=1, maximum=8, step=1, value=1) + + return enabled, method, tile_width, tile_height, tile_overlap, tile_batch_size + + def process_before_every_sampling(self, p, enabled: bool, method: str, tile_width: int, tile_height: int, tile_overlap: int, tile_batch_size: int, **kwargs): + if not enabled: + return + + unet = p.sd_model.forge_objects.unet + unet = TiledDiffusion.apply(unet, method, tile_width, tile_height, tile_overlap, tile_batch_size) + p.sd_model.forge_objects.unet = unet + + p.extra_generation_params.update( + { + "multidiffusion_enabled": enabled, + "multidiffusion_method": method, + "multidiffusion_tile_width": tile_width, + "multidiffusion_tile_height": tile_height, + "multidiffusion_tile_overlap": tile_overlap, + "multidiffusion_tile_batch_size": tile_batch_size, + } + ) diff --git a/extensions-builtin/xyz/lib_xyz/builtins.py b/extensions-builtin/xyz/lib_xyz/builtins.py index f1433eb6e5cca6e7d8e1da74f2a97c5b071a7fdf..08fa0719756c050831913a594cc0ac006be631db 100644 --- a/extensions-builtin/xyz/lib_xyz/builtins.py +++ b/extensions-builtin/xyz/lib_xyz/builtins.py @@ -31,44 +31,56 @@ from .utils import boolean_choice, str_permutations builtin_options = [ AxisOption("Nothing", str, do_nothing, format_value=format_nothing), AxisOption("Seed", int, apply_field("seed")), - AxisOption("Var. seed", int, apply_field("subseed")), - AxisOption("Var. strength", float, apply_field("subseed_strength")), AxisOption("Steps", int, apply_field("steps")), - AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")), + AxisOptionTxt2Img("Hires. steps", int, apply_field("hr_second_pass_steps")), AxisOption("CFG Scale", float, apply_field("cfg_scale")), - AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")), AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names), - AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names), + AxisOptionTxt2Img("Hires. sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names), AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names), + AxisOption("Schedule type", str, apply_field("scheduler"), choices=lambda: [x.label for x in sd_schedulers.schedulers]), AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), - AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")), AxisOption("Size", str, apply_size), - AxisOption("Sigma Churn", float, apply_field("s_churn")), - AxisOption("Sigma min", float, apply_field("s_tmin")), - AxisOption("Sigma max", float, apply_field("s_tmax")), - AxisOption("Sigma noise", float, apply_field("s_noise")), - AxisOption("Schedule type", str, apply_field("scheduler"), choices=lambda: [x.label for x in sd_schedulers.schedulers]), - AxisOption("Schedule min sigma", float, apply_override("sigma_min")), - AxisOption("Schedule max sigma", float, apply_override("sigma_max")), - AxisOption("Schedule rho", float, apply_override("rho")), - AxisOption("Eta", float, apply_field("eta")), - AxisOption("Clip skip", int, apply_clip_skip), AxisOption("Denoising", float, apply_field("denoising_strength")), - AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")), - AxisOption("Extra noise", float, apply_override("img2img_extra_noise")), - AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), - AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), + AxisOptionTxt2Img("Hires. upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ["None"] + list(sd_vae.vae_dict)), AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), - AxisOption("UniPC Order", int, apply_uni_pc_order, cost=0.5), - AxisOption("Face restore", str, apply_face_restore, format_value=format_value), - AxisOption("Token merging ratio", float, apply_override("token_merging_ratio")), - AxisOption("Token merging ratio high-res", float, apply_override("token_merging_ratio_hr")), - AxisOption("Always discard next-to-last sigma", str, apply_override("always_discard_next_to_last_sigma", boolean=True), choices=boolean_choice(reverse=True)), - AxisOption("SGM noise multiplier", str, apply_override("sgm_noise_multiplier", boolean=True), choices=boolean_choice(reverse=True)), - AxisOption("Refiner checkpoint", str, apply_field("refiner_checkpoint"), format_value=format_remove_path, confirm=confirm_checkpoints_or_none, cost=1.0, choices=lambda: ["None"] + sorted(sd_models.checkpoints_list, key=str.casefold)), - AxisOption("Refiner switch at", float, apply_field("refiner_switch_at")), - AxisOption("RNG source", str, apply_override("randn_source"), choices=lambda: ["GPU", "CPU", "NV"]), ] + +if shared.cmd_opts.adv_xyz: + builtin_options.extend( + [ + AxisOption("Var. seed", int, apply_field("subseed")), + AxisOption("Var. strength", float, apply_field("subseed_strength")), + AxisOption("Clip skip", int, apply_clip_skip), + AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")), + AxisOption("Extra noise", float, apply_override("img2img_extra_noise")), + AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")), + AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), + AxisOption("Face restore", str, apply_face_restore, format_value=format_value), + AxisOption("SkipEarly", float, apply_field("skip_early_cond")), + AxisOption("NGMS", float, apply_field("s_min_uncond")), + AxisOption("Token merging ratio", float, apply_override("token_merging_ratio")), + AxisOption("Always discard next-to-last sigma", str, apply_override("always_discard_next_to_last_sigma", boolean=True), choices=boolean_choice(reverse=True)), + AxisOption("SGM noise multiplier", str, apply_override("sgm_noise_multiplier", boolean=True), choices=boolean_choice(reverse=True)), + AxisOption("Refiner checkpoint", str, apply_field("refiner_checkpoint"), format_value=format_remove_path, confirm=confirm_checkpoints_or_none, cost=1.0, choices=lambda: ["None"] + sorted(sd_models.checkpoints_list, key=str.casefold)), + AxisOption("Refiner switch at", float, apply_field("refiner_switch_at")), + AxisOption("RNG source", str, apply_override("randn_source"), choices=lambda: ["GPU", "CPU", "NV"]), + ] + ) + +if shared.cmd_opts.adv_samplers: + builtin_options.extend( + [ + AxisOption("Sigma Churn", float, apply_field("s_churn")), + AxisOption("Sigma min", float, apply_field("s_tmin")), + AxisOption("Sigma max", float, apply_field("s_tmax")), + AxisOption("Sigma noise", float, apply_field("s_noise")), + AxisOption("Schedule min sigma", float, apply_override("sigma_min")), + AxisOption("Schedule max sigma", float, apply_override("sigma_max")), + AxisOption("Schedule rho", float, apply_override("rho")), + AxisOption("Eta", float, apply_field("eta")), + AxisOption("UniPC Order", int, apply_uni_pc_order, cost=0.5), + ] + ) diff --git a/html/extra-networks-no-cards.html b/html/extra-networks-no-cards.html index 647808338679aea1c4d076d7b77d220d408db034..a463bf25250dde5da1187634dc2e3c05c8b123d7 100644 --- a/html/extra-networks-no-cards.html +++ b/html/extra-networks-no-cards.html @@ -1,6 +1,5 @@
-

Nothing here. Add some content to the following directories:

- +

Nothing here... Add some contents to the following folder(s):

diff --git a/html/extra-networks-pane.html b/html/extra-networks-pane.html index d74c9eeca1c23ed48137d6d069cf182dd3df6bdb..fab48cd1575dc55da47a6bd2cced018e7c8308ae 100644 --- a/html/extra-networks-pane.html +++ b/html/extra-networks-pane.html @@ -30,6 +30,9 @@ {tree_html}
+
+ {dir_btns_html} +
{items_html}
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index ec4f8a5f9e0549f847640534dfae9f308ab9ef95..15e3213316ed6326d7ef703db188e5b00ba98113 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -77,12 +77,12 @@ function setupExtraNetworksForTab(tabname) { sortKey = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1); let sortKeyStore = sortKey + "-" + (reverse ? "Descending" : "Ascending") + "-" + cards.length; - if (sortKeyStore == sort_mode.dataset.sortkey && !force) { + if (sortKeyStore == sort_mode.dataset.sortkey && !force) return; - } + sort_mode.dataset.sortkey = sortKeyStore; - cards.forEach(function (card) { + cards.forEach((card) => { card.originalParentElement = card.parentElement; }); let sortedCards = Array.from(cards); @@ -95,18 +95,18 @@ function setupExtraNetworksForTab(tabname) { return (a < b ? -1 : (a > b ? 1 : 0)); }); - if (reverse) { - sortedCards.reverse(); - } - cards.forEach(function (card) { + + if (reverse) sortedCards.reverse(); + + cards.forEach((card) => { card.remove(); }); - sortedCards.forEach(function (card) { + sortedCards.forEach((card) => { card.originalParentElement.appendChild(card); }); }; - search.addEventListener("input", applyFilter); + search.addEventListener("input", () => { applyFilter(); }); applySort(); applyFilter(); extraNetworksApplySort[tabname_full] = applySort; @@ -197,7 +197,7 @@ function setupExtraNetworks() { setupExtraNetworksForTab('img2img'); } -const re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/; +const re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/s; const re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g; const re_extranet_neg = /\(([^:^>]+:[\d.]+)\)/; @@ -273,6 +273,15 @@ function saveCardPreview(event, tabname, filename) { event.preventDefault(); } +function extraNetworksSearchButton(tabname, extra_networks_tabname, event) { + const searchTextarea = gradioApp().querySelector("#" + tabname + "_" + extra_networks_tabname + "_extra_search"); + const button = event.target; + const text = button.classList.contains("search-all") ? "" : button.textContent.trim(); + + searchTextarea.value = text; + updateInput(searchTextarea); +} + function extraNetworksTreeProcessFileClick(event, btn, tabname, extra_networks_tabname) { /** * Processes `onclick` events when user clicks on files in tree. @@ -590,6 +599,9 @@ function extraNetworksEditUserMetadata(event, tabname, extraPage, cardName) { } function extraNetworksRefreshSingleCard(page, tabname, name) { + const refreshButton = document.getElementById(`${tabname}_${page}_extra_refresh`); + refreshButton.click(); + requestGet("./sd_extra_networks/get-single-card", { page: page, tabname: tabname, name: name }, function (data) { if (data && data.html) { let card = gradioApp().querySelector(`#${tabname}_${page.replace(" ", "_")}_cards > .card[data-name="${name}"]`); diff --git a/javascript/localization.js b/javascript/localization.js index 4a62b36f7ab2cd854538c3beac81009c4c04b000..b8193dd18310dda83959cdf1f97d9019ec6bc1ab 100644 --- a/javascript/localization.js +++ b/javascript/localization.js @@ -3,9 +3,6 @@ var ignore_ids_for_localization = { setting_sd_model_checkpoint: 'OPTION', - modelmerger_primary_model_name: 'OPTION', - modelmerger_secondary_model_name: 'OPTION', - modelmerger_tertiary_model_name: 'OPTION', txt2img_styles: 'OPTION', img2img_styles: 'OPTION', setting_random_artist_categories: 'OPTION', diff --git a/javascript/ui.js b/javascript/ui.js index 1ac4efe1544e471be26ef8374b18f03c4aff7078..4825a72fbccfaadd16fbf6787e0bc5530870da8d 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -10,10 +10,9 @@ function set_theme(theme) { function all_gallery_buttons() { let allGalleryButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnails > .thumbnail-item.thumbnail-small'); let visibleGalleryButtons = []; - allGalleryButtons.forEach(function (elem) { - if (elem.parentElement.offsetParent) { + allGalleryButtons.forEach((elem) => { + if (elem.parentElement.offsetParent) visibleGalleryButtons.push(elem); - } }); return visibleGalleryButtons; } @@ -26,20 +25,15 @@ function selected_gallery_index() { return all_gallery_buttons().findIndex(elem => elem.classList.contains('selected')); } -function extract_image_from_gallery(gallery) { - if (gallery.length == 0) { - return [null]; - } - if (gallery.length == 1) { - return [gallery[0]]; - } +let t2i_gallery_index = 0; +let i2i_gallery_index = 0; - let index = selected_gallery_index(); +function extract_image_from_gallery(gallery, tabname = null) { + if (gallery.length === 0) return [null]; + if (gallery.length === 1) return [gallery[0]]; - if (index < 0 || index >= gallery.length) { - // Use the first image in the gallery as the default - index = 0; - } + const index = (tabname === null) ? 0 + : (tabname === "txt2img" ? t2i_gallery_index : i2i_gallery_index); return [gallery[index]]; } @@ -262,16 +256,6 @@ onUiLoaded(function () { }); -function modelmerger() { - let id = randomId(); - requestProgress(id, gradioApp().getElementById('modelmerger_results_panel'), null, function () { }); - - let res = create_submit_args(arguments); - res[0] = id; - return res; -} - - function ask_for_style_name(_, prompt_text, negative_prompt_text) { let name_ = prompt('Style name:'); return [name_, prompt_text, negative_prompt_text]; @@ -337,6 +321,7 @@ var txt2img_textarea, img2img_textarea = undefined; function restart_reload() { document.body.innerHTML = '

Reloading...

'; + if (opts.no_flashbang) document.body.style.backgroundColor = "black"; let requestPing = function () { requestGet("./internal/ping", {}, function (data) { diff --git a/ldm_patched/ldm/modules/attention.py b/ldm_patched/ldm/modules/attention.py index d9234b542365e0301e80a190c645156827687137..ef5b277141ac29f5c4f3fee2e6e5fa938c409601 100644 --- a/ldm_patched/ldm/modules/attention.py +++ b/ldm_patched/ldm/modules/attention.py @@ -210,11 +210,11 @@ if isSage2 and args.sageattn2_api is not SageAttentionAPIs.Automatic: from sageattention import sageattn_qk_int8_pv_fp16_triton, sageattn_qk_int8_pv_fp16_cuda, sageattn_qk_int8_pv_fp8_cuda if args.sageattn2_api is SageAttentionAPIs.Triton16: - sageattn = sageattn_qk_int8_pv_fp16_triton + sageattn = partial(sageattn_qk_int8_pv_fp16_triton, quantization_backend="cuda") if args.sageattn2_api is SageAttentionAPIs.CUDA16: sageattn = partial(sageattn_qk_int8_pv_fp16_cuda, qk_quant_gran="per_warp", pv_accum_dtype="fp16+fp32") if args.sageattn2_api is SageAttentionAPIs.CUDA8: - sageattn = partial(sageattn_qk_int8_pv_fp8_cuda, qk_quant_gran="per_warp", pv_accum_dtype="fp16+fp32") + sageattn = partial(sageattn_qk_int8_pv_fp8_cuda, qk_quant_gran="per_thread", pv_accum_dtype="fp32+fp32") def attention_sage(q, k, v, heads, mask=None): diff --git a/ldm_patched/ldm/modules/diffusionmodules/model.py b/ldm_patched/ldm/modules/diffusionmodules/model.py index d7d2c75d304ded5ddacbdecc7a9d9d108dc3bf9c..6a9056f5e60d67c3071ddd2111f9e035a2a12ff6 100644 --- a/ldm_patched/ldm/modules/diffusionmodules/model.py +++ b/ldm_patched/ldm/modules/diffusionmodules/model.py @@ -12,8 +12,12 @@ import numpy as np import torch import torch.nn as nn from ldm_patched.modules import model_management +from modules.shared import opts -ops = ldm_patched.modules.ops.disable_weight_init +if opts.sd_vae_tiled_ops: + ops = ldm_patched.modules.ops.tiled_ops +else: + ops = ldm_patched.modules.ops.disable_weight_init if model_management.xformers_enabled_vae(): import xformers @@ -230,7 +234,7 @@ def xformers_attention(q, k, v): try: out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) out = out.transpose(1, 2).reshape(B, C, H, W) - except NotImplementedError as e: + except NotImplementedError: out = slice_attention( q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), @@ -252,7 +256,7 @@ def pytorch_attention(q, k, v): q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False ) out = out.transpose(2, 3).reshape(B, C, H, W) - except model_management.OOM_EXCEPTION as e: + except model_management.OOM_EXCEPTION: print("scaled_dot_product_attention OOMed: switched to slice attention") out = slice_attention( q.view(B, -1, C), @@ -511,7 +515,10 @@ class Decoder(nn.Module): up.block = block up.attn = attn if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) + if opts.sd_vae_tiled_ops: + up.upsample = ops.Upsample(block_in, resamp_with_conv) + else: + up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order diff --git a/ldm_patched/modules/controlnet.py b/ldm_patched/modules/controlnet.py index 2fe104dd3fd6bab53bbc58a423590613bfd4e166..8de4bb9bf3309b0a5d9d8db05a729fba27144dbb 100644 --- a/ldm_patched/modules/controlnet.py +++ b/ldm_patched/modules/controlnet.py @@ -784,7 +784,7 @@ def load_t2i_adapter(t2i_data): prefix_replace["adapter.body.{}.resnets.{}.".format(i, j)] = ( "body.{}.".format(i * 2 + j) ) - prefix_replace["adapter.body.{}.".format(i, j)] = "body.{}.".format(i * 2) + prefix_replace["adapter.body.{}.".format(i, )] = "body.{}.".format(i * 2) prefix_replace["adapter."] = "" t2i_data = ldm_patched.modules.utils.state_dict_prefix_replace( t2i_data, prefix_replace diff --git a/ldm_patched/modules/lora.py b/ldm_patched/modules/lora.py index 305164be120562c41c88620883456d6a6e075222..5c5bab4cd48a08a18ce97779e2b8e5a382bd165c 100644 --- a/ldm_patched/modules/lora.py +++ b/ldm_patched/modules/lora.py @@ -3,7 +3,9 @@ # 3rd Edit by. Haoming02 # - Based on: https://github.com/comfyanonymous/ComfyUI/blob/v0.3.29/comfy/lora.py +import torch +import ldm_patched.modules.model_management import ldm_patched.modules.utils LORA_CLIP_MAP = { @@ -38,7 +40,7 @@ def load_lora(lora, to_load): try: reshape = lora[reshape_name].tolist() loaded_keys.add(reshape_name) - except: + except Exception: pass regular_lora = "{}.lora_up.weight".format(x) @@ -221,7 +223,7 @@ def model_lora_keys_clip(model, key_map={}): text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}" clip_l_present = False - clip_g_present = False + for b in range(32): # TODO: clean up for c in LORA_CLIP_MAP: k = "clip_h.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) @@ -245,22 +247,18 @@ def model_lora_keys_clip(model, key_map={}): k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) if k in sdk: - clip_g_present = True + if clip_l_present: lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) # SDXL base key_map[lora_key] = k lora_key = "text_encoder_2.text_model.encoder.layers.{}.{}".format(b, c) # diffusers lora key_map[lora_key] = k else: - lora_key = "lora_te_text_model_encoder_layers_{}_{}".format( - b, LORA_CLIP_MAP[c] - ) # TODO: test if this is correct for SDXL-Refiner + lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) # TODO: test if this is correct for SDXL-Refiner key_map[lora_key] = k lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) # diffusers lora key_map[lora_key] = k - lora_key = "lora_prior_te_text_model_encoder_layers_{}_{}".format( - b, LORA_CLIP_MAP[c] - ) # cascade lora: TODO put lora key prefix in the model config + lora_key = "lora_prior_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) # cascade lora: TODO put lora key prefix in the model config key_map[lora_key] = k return key_map @@ -295,3 +293,33 @@ def model_lora_keys_unet(model, key_map={}): key_map[diffusers_lora_key] = unet_key return key_map + + +def weight_decompose(dora_scale: torch.Tensor, weight: torch.Tensor, lora_diff: torch.Tensor, alpha: float, strength: float, intermediate_dtype: torch.dtype) -> torch.Tensor: + dora_scale = ldm_patched.modules.model_management.cast_to_device(dora_scale, weight.device, intermediate_dtype) + lora_diff *= alpha + weight_calc = weight + lora_diff.type(weight.dtype) + weight_norm = weight_calc.transpose(0, 1).reshape(weight_calc.shape[1], -1).norm(dim=1, keepdim=True).reshape(weight_calc.shape[1], *[1] * (weight_calc.dim() - 1)).transpose(0, 1) + + weight_calc *= (dora_scale / weight_norm).type(weight.dtype) + if strength != 1.0: + weight_calc -= weight + weight += strength * (weight_calc) + else: + weight[:] = weight_calc + return weight + + +def pad_tensor_to_shape(tensor: torch.Tensor, new_shape: list[int]) -> torch.Tensor: + if any([new_shape[i] < tensor.shape[i] for i in range(len(new_shape))]): + raise ValueError("The new shape must be larger than the original tensor in all dimensions") + if len(new_shape) != len(tensor.shape): + raise ValueError("The new shape must have the same number of dimensions as the original tensor") + + padded_tensor = torch.zeros(new_shape, dtype=tensor.dtype, device=tensor.device) + + orig_slices = tuple(slice(0, dim) for dim in tensor.shape) + new_slices = tuple(slice(0, dim) for dim in tensor.shape) + + padded_tensor[new_slices] = tensor[orig_slices] + return padded_tensor diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py index d43789267231c437fefbadfd8ce3dbdaec12cee2..cf2e6fed4bdf830a5a70086442575bb4ce681525 100644 --- a/ldm_patched/modules/model_management.py +++ b/ldm_patched/modules/model_management.py @@ -733,16 +733,21 @@ def prefer_fp8(): def support_fp8(): if not prefer_fp8(): return False + if not is_nvidia(): + return False if int(torch_version[0]) < 2 or int(torch_version[2]) < 4: return False device = get_torch_device() props = torch.cuda.get_device_properties(device) - if props.major < 8 or props.minor < 9: - return False - return True + if props.major >= 9: + return True + elif props.major == 8 and props.minor >= 9: + return True + else: + return False def supports_dtype(device, dtype): # TODO diff --git a/ldm_patched/modules/model_patcher.py b/ldm_patched/modules/model_patcher.py index ae1929eafb7e20249ccd5781118382ad5e667e27..2176810e75a536851c3d95ba2a01864e58e126c3 100644 --- a/ldm_patched/modules/model_patcher.py +++ b/ldm_patched/modules/model_patcher.py @@ -8,13 +8,16 @@ https://github.com/comfyanonymous/ComfyUI import copy import inspect +import logging import torch import ldm_patched.modules.model_management import ldm_patched.modules.utils from ldm_patched.modules.args_parser import args +from ldm_patched.modules.lora import pad_tensor_to_shape, weight_decompose +logger = logging.getLogger(__name__) extra_weight_calculators = {} # backward compatibility @@ -114,7 +117,7 @@ class ModelPatcher: def set_model_sampler_cfg_function(self, sampler_cfg_function, disable_cfg1_optimization=False): if len(inspect.signature(sampler_cfg_function).parameters) == 3: - self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) # Old way + self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) else: self.model_options["sampler_cfg_function"] = sampler_cfg_function if disable_cfg1_optimization: @@ -287,7 +290,7 @@ class ModelPatcher: def calculate_weight(self, patches, weight, key): for p in patches: - alpha = p[0] + strength = p[0] v = p[1] strength_model = p[2] @@ -295,7 +298,7 @@ class ModelPatcher: weight *= strength_model if isinstance(v, list): - v = (self.calculate_weight(v[1:], v[0].clone(), key),) + v = (self.calculate_weight(v[1:], v[0][1](ldm_patched.modules.model_management.cast_to_device(v[0][0], weight.device, torch.float32, copy=True), inplace=True), key),) if len(v) == 1: patch_type = "diff" @@ -304,58 +307,44 @@ class ModelPatcher: v = v[1] if patch_type == "diff": - w1 = v[0] - if alpha != 0.0: - if w1.shape != weight.shape: - if w1.ndim == weight.ndim == 4: - new_shape = [max(n, m) for n, m in zip(weight.shape, w1.shape)] - print(f"Merged with {key} channel changed to {new_shape}") - new_diff = alpha * ldm_patched.modules.model_management.cast_to_device(w1, weight.device, weight.dtype) - new_weight = torch.zeros(size=new_shape).to(weight) - new_weight[ - : weight.shape[0], - : weight.shape[1], - : weight.shape[2], - : weight.shape[3], - ] = weight - new_weight[ - : new_diff.shape[0], - : new_diff.shape[1], - : new_diff.shape[2], - : new_diff.shape[3], - ] += new_diff - new_weight = new_weight.contiguous().clone() - weight = new_weight - else: - print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape)) + diff: torch.Tensor = v[0] + do_pad_weight = len(v) > 1 and v[1]["pad_weight"] + if do_pad_weight and diff.shape != weight.shape: + logger.debug(f'Padding Weight "{key}" ({weight.shape} -> {diff.shape})') + weight = pad_tensor_to_shape(weight, diff.shape) + + if strength != 0.0: + if diff.shape != weight.shape: + logger.warning(f'SHAPE MISMATCH "{key}" WEIGHT NOT MERGED ({diff.shape} != {weight.shape})') else: - weight += alpha * ldm_patched.modules.model_management.cast_to_device(w1, weight.device, weight.dtype) + weight += strength * ldm_patched.modules.model_management.cast_to_device(diff, weight.device, weight.dtype) + elif patch_type == "lora": # lora/locon mat1 = ldm_patched.modules.model_management.cast_to_device(v[0], weight.device, torch.float32) mat2 = ldm_patched.modules.model_management.cast_to_device(v[1], weight.device, torch.float32) + dora_scale = v[4] + reshape = v[5] + + if reshape is not None: + weight = pad_tensor_to_shape(weight, reshape) if v[2] is not None: - alpha *= v[2] / mat2.shape[0] + alpha = v[2] / mat2.shape[0] + else: + alpha = 1.0 if v[3] is not None: - # locon mid weights, hopefully the math is fine because I didn't properly test it mat3 = ldm_patched.modules.model_management.cast_to_device(v[3], weight.device, torch.float32) - final_shape = [ - mat2.shape[1], - mat2.shape[0], - mat3.shape[2], - mat3.shape[3], - ] - mat2 = ( - torch.mm( - mat2.transpose(0, 1).flatten(start_dim=1), - mat3.transpose(0, 1).flatten(start_dim=1), - ) - .reshape(final_shape) - .transpose(0, 1) - ) + final_shape = [mat2.shape[1], mat2.shape[0], mat3.shape[2], mat3.shape[3]] + mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1), mat3.transpose(0, 1).flatten(start_dim=1)).reshape(final_shape).transpose(0, 1) + try: - weight += (alpha * torch.mm(mat1.flatten(start_dim=1), mat2.flatten(start_dim=1))).reshape(weight.shape).type(weight.dtype) + lora_diff = torch.mm(mat1.flatten(start_dim=1), mat2.flatten(start_dim=1)).reshape(weight.shape) + if dora_scale is not None: + weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, torch.float32) + else: + weight += ((strength * alpha) * lora_diff).type(weight.dtype) except Exception as e: - print("ERROR", key, e) + logger.error(f"Failed to apply {patch_type} to {key}\n{e}") + elif patch_type == "lokr": w1 = v[0] w2 = v[1] @@ -364,94 +353,113 @@ class ModelPatcher: w2_a = v[5] w2_b = v[6] t2 = v[7] + dora_scale = v[8] dim = None if w1 is None: dim = w1_b.shape[0] - w1 = torch.mm( - ldm_patched.modules.model_management.cast_to_device(w1_a, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w1_b, weight.device, torch.float32), - ) + w1 = torch.mm(ldm_patched.modules.model_management.cast_to_device(w1_a, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w1_b, weight.device, torch.float32)) else: w1 = ldm_patched.modules.model_management.cast_to_device(w1, weight.device, torch.float32) if w2 is None: dim = w2_b.shape[0] if t2 is None: - w2 = torch.mm( - ldm_patched.modules.model_management.cast_to_device(w2_a, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w2_b, weight.device, torch.float32), - ) + w2 = torch.mm(ldm_patched.modules.model_management.cast_to_device(w2_a, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w2_b, weight.device, torch.float32)) else: - w2 = torch.einsum( - "i j k l, j r, i p -> p r k l", - ldm_patched.modules.model_management.cast_to_device(t2, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w2_b, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w2_a, weight.device, torch.float32), - ) + w2 = torch.einsum("i j k l, j r, i p -> p r k l", ldm_patched.modules.model_management.cast_to_device(t2, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w2_b, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w2_a, weight.device, torch.float32)) else: w2 = ldm_patched.modules.model_management.cast_to_device(w2, weight.device, torch.float32) if len(w2.shape) == 4: w1 = w1.unsqueeze(2).unsqueeze(2) if v[2] is not None and dim is not None: - alpha *= v[2] / dim + alpha = v[2] / dim + else: + alpha = 1.0 try: - weight += alpha * torch.kron(w1, w2).reshape(weight.shape).type(weight.dtype) + lora_diff = torch.kron(w1, w2).reshape(weight.shape) + if dora_scale is not None: + weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, torch.float32) + else: + weight += ((strength * alpha) * lora_diff).type(weight.dtype) except Exception as e: - print("ERROR", key, e) + logger.error(f"Failed to apply {patch_type} to {key}\n{e}") + elif patch_type == "loha": w1a = v[0] w1b = v[1] if v[2] is not None: - alpha *= v[2] / w1b.shape[0] + alpha = v[2] / w1b.shape[0] + else: + alpha = 1.0 + w2a = v[3] w2b = v[4] + dora_scale = v[7] if v[5] is not None: # cp decomposition t1 = v[5] t2 = v[6] - m1 = torch.einsum( - "i j k l, j r, i p -> p r k l", - ldm_patched.modules.model_management.cast_to_device(t1, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w1b, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w1a, weight.device, torch.float32), - ) - - m2 = torch.einsum( - "i j k l, j r, i p -> p r k l", - ldm_patched.modules.model_management.cast_to_device(t2, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w2b, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w2a, weight.device, torch.float32), - ) + m1 = torch.einsum("i j k l, j r, i p -> p r k l", ldm_patched.modules.model_management.cast_to_device(t1, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w1b, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w1a, weight.device, torch.float32)) + m2 = torch.einsum("i j k l, j r, i p -> p r k l", ldm_patched.modules.model_management.cast_to_device(t2, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w2b, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w2a, weight.device, torch.float32)) else: - m1 = torch.mm( - ldm_patched.modules.model_management.cast_to_device(w1a, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w1b, weight.device, torch.float32), - ) - m2 = torch.mm( - ldm_patched.modules.model_management.cast_to_device(w2a, weight.device, torch.float32), - ldm_patched.modules.model_management.cast_to_device(w2b, weight.device, torch.float32), - ) + m1 = torch.mm(ldm_patched.modules.model_management.cast_to_device(w1a, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w1b, weight.device, torch.float32)) + m2 = torch.mm(ldm_patched.modules.model_management.cast_to_device(w2a, weight.device, torch.float32), ldm_patched.modules.model_management.cast_to_device(w2b, weight.device, torch.float32)) try: - weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype) + lora_diff = (m1 * m2).reshape(weight.shape) + if dora_scale is not None: + weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, torch.float32) + else: + weight += ((strength * alpha) * lora_diff).type(weight.dtype) except Exception as e: - print("ERROR", key, e) + logger.error(f"Failed to apply {patch_type} to {key}\n{e}") + elif patch_type == "glora": - if v[4] is not None: - alpha *= v[4] / v[0].shape[0] + dora_scale = v[5] + + old_glora = False + if v[3].shape[1] == v[2].shape[0] == v[0].shape[0] == v[1].shape[1]: + rank = v[0].shape[0] + old_glora = True + + if v[3].shape[0] == v[2].shape[1] == v[0].shape[1] == v[1].shape[0]: + if old_glora and v[1].shape[0] == weight.shape[0] and weight.shape[0] == weight.shape[1]: + pass + else: + old_glora = False + rank = v[1].shape[0] a1 = ldm_patched.modules.model_management.cast_to_device(v[0].flatten(start_dim=1), weight.device, torch.float32) a2 = ldm_patched.modules.model_management.cast_to_device(v[1].flatten(start_dim=1), weight.device, torch.float32) b1 = ldm_patched.modules.model_management.cast_to_device(v[2].flatten(start_dim=1), weight.device, torch.float32) b2 = ldm_patched.modules.model_management.cast_to_device(v[3].flatten(start_dim=1), weight.device, torch.float32) - weight += ((torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1), a2), a1)) * alpha).reshape(weight.shape).type(weight.dtype) - elif patch_type in extra_weight_calculators: - weight = extra_weight_calculators[patch_type](weight, alpha, v) + if v[4] is not None: + alpha = v[4] / rank + else: + alpha = 1.0 + + try: + if old_glora: + lora_diff = (torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1).to(dtype=torch.float32), a2), a1)).reshape(weight.shape) # old lycoris glora + else: + if weight.dim() > 2: + lora_diff = torch.einsum("o i ..., i j -> o j ...", torch.einsum("o i ..., i j -> o j ...", weight.to(dtype=torch.float32), a1), a2).reshape(weight.shape) + else: + lora_diff = torch.mm(torch.mm(weight.to(dtype=torch.float32), a1), a2).reshape(weight.shape) + lora_diff += torch.mm(b1, b2).reshape(weight.shape) + + if dora_scale is not None: + weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, torch.float32) + else: + weight += ((strength * alpha) * lora_diff).type(weight.dtype) + except Exception as e: + logger.error(f"Failed to apply {patch_type} to {key}\n{e}") + else: - print("patch type not recognized", patch_type, key) + logger.warning(f'Unrecognized/Unsupported Patch Type "{patch_type}"...') return weight diff --git a/ldm_patched/modules/ops.py b/ldm_patched/modules/ops.py index aeb54a8ade67aaced37c5abd918235cc7fc0523c..c13e219aa2e8e1e423b02fce1169bbf584f53131 100644 --- a/ldm_patched/modules/ops.py +++ b/ldm_patched/modules/ops.py @@ -9,7 +9,7 @@ https://github.com/comfyanonymous/ComfyUI import contextlib import torch -from ldm_patched.modules.model_management import cast_to_device +from ldm_patched.modules.model_management import cast_to_device, device_supports_non_blocking from modules_forge import stream stash = {} @@ -296,3 +296,106 @@ else: def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) + + +class tiled_ops(disable_weight_init): + class Conv2d(disable_weight_init.Conv2d): + tile_size: int + + def __init__(self, *arg, **kwargs): + from modules.shared import opts + + super().__init__(*arg, **kwargs) + self._3x1x1: bool = self.kernel_size == (3, 3) and self.stride == (1, 1) and self.padding == (1, 1) + self.tile_size = opts.sd_vae_tiled_size + + @torch.inference_mode() + def forward(self, x: torch.Tensor): + if not self._3x1x1: + return super().forward(x) + + B, C, H, W = x.shape + + if H <= self.tile_size and W <= self.tile_size: + return super().forward(x) + + out = torch.empty((B, C if self.out_channels is None else self.out_channels, H, W), device=x.device, dtype=x.dtype, memory_format=torch.contiguous_format) + non_blocking = device_supports_non_blocking(x.device) + + for i in range(0, H, self.tile_size): + for j in range(0, W, self.tile_size): + i0 = max(i - 1, 0) + j0 = max(j - 1, 0) + i1 = min(i + self.tile_size + 1, H) + j1 = min(j + self.tile_size + 1, W) + + tile = x[:, :, i0:i1, j0:j1] + tile_conv = super().forward(tile) + + pi = i - i0 + pj = j - j0 + ph = min(self.tile_size, H - i) + pw = min(self.tile_size, W - j) + + out[:, :, i : i + ph, j : j + pw].copy_(tile_conv[:, :, pi : pi + ph, pj : pj + pw], non_blocking=non_blocking) + del tile_conv + + return out + + class Upsample(torch.nn.Module): + tile_size: int + + def __init__(self, in_channels, with_conv): + from modules.shared import opts + + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = tiled_ops.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + + self.tile_size = opts.sd_vae_tiled_size + + @torch.inference_mode() + def forward(self, x: torch.Tensor): + B, C, H, W = x.shape + out_H, out_W = (H * 2, W * 2) + + if out_H <= self.tile_size and out_W <= self.tile_size: + x_up = torch.nn.functional.interpolate(x, size=(out_H, out_W), mode="nearest") + return x_up if not self.with_conv else self.conv(x_up) + + scale_h = out_H / H + scale_w = out_W / W + + out = torch.empty((B, C, out_H, out_W), device=x.device, dtype=x.dtype, memory_format=torch.contiguous_format) + non_blocking = device_supports_non_blocking(x.device) + + for i in range(0, H, self.tile_size): + for j in range(0, W, self.tile_size): + i0 = max(i - 1, 0) + j0 = max(j - 1, 0) + i1 = min(i + self.tile_size + 1, H) + j1 = min(j + self.tile_size + 1, W) + tile = x[:, :, i0:i1, j0:j1] + + tile_up = torch.nn.functional.interpolate(tile, scale_factor=(scale_h, scale_w), mode="nearest") + + if self.with_conv: + tile_up = self.conv(tile_up) + + pi = int((i - i0) * scale_h) + pj = int((j - j0) * scale_w) + ph = int(min(self.tile_size, H - i) * scale_h) + pw = int(min(self.tile_size, W - j) * scale_w) + + oi = int(i * scale_h) + oj = int(j * scale_w) + + # Clip output patch to not exceed requested output size + ph = min(ph, out_H - oi) + pw = min(pw, out_W - oj) + + out[:, :, oi : oi + ph, oj : oj + pw].copy_(tile_up[:, :, pi : pi + ph, pj : pj + pw], non_blocking=non_blocking) + del tile_up + + return out diff --git a/ldm_patched/modules/sd.py b/ldm_patched/modules/sd.py index a851af16ce75e5f3b83783076fd4a603f41f1097..7f618771ec770ce315bf5dc00e1a24b577bd4ac8 100644 --- a/ldm_patched/modules/sd.py +++ b/ldm_patched/modules/sd.py @@ -356,7 +356,7 @@ class VAE: min=0.0, max=1.0, ) - except model_management.OOM_EXCEPTION as e: + except model_management.OOM_EXCEPTION: print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.") pixel_samples = self.decode_tiled_(samples_in) @@ -399,7 +399,7 @@ class VAE: pixels_in = (2.0 * pixel_samples[x : x + batch_number] - 1.0).to(self.vae_dtype).to(self.device) samples[x : x + batch_number] = self.first_stage_model.encode(pixels_in).to(self.output_device).float() - except model_management.OOM_EXCEPTION as e: + except model_management.OOM_EXCEPTION: print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.") samples = self.encode_tiled_(pixel_samples) diff --git a/ldm_patched/modules/sd1_clip.py b/ldm_patched/modules/sd1_clip.py index e4fd693b2f2dcb068dc6a5ded237cb4083445fe3..7ddf0661b94c670d7c4f3a8ef18e8763f4cafeaa 100644 --- a/ldm_patched/modules/sd1_clip.py +++ b/ldm_patched/modules/sd1_clip.py @@ -389,7 +389,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No embed_out = safe_load_embed_zip(embed_path) else: embed = torch.load(embed_path, map_location="cpu") - except Exception as e: + except Exception: print(traceback.format_exc()) print() print("error loading embedding, skipping loading:", embedding_name) diff --git a/ldm_patched/unipc/uni_pc.py b/ldm_patched/unipc/uni_pc.py index 7a9b7ef11428cc41ad687a50165efe4298e590b6..68d443f508badb0e36f112f2279e06287bc116d3 100644 --- a/ldm_patched/unipc/uni_pc.py +++ b/ldm_patched/unipc/uni_pc.py @@ -16,7 +16,7 @@ class NoiseScheduleVP: continuous_beta_0=0.1, continuous_beta_1=20.0, ): - """Create a wrapper class for the forward SDE (VP type). + r"""Create a wrapper class for the forward SDE (VP type). *** Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 4a12126f93aa871ddb68c82842032306dc8c3579..5715d562f35f38f08d70fab77c01841fc20d4941 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -88,6 +88,7 @@ parser.add_argument("--forge-ref-a1111-home", type=Path, help="Look for models i parser.add_argument("--controlnet-dir", type=Path, help="Path to directory with ControlNet models", default=None) parser.add_argument("--controlnet-preprocessor-models-dir", type=Path, help="Path to directory with annotator model directories", default=None) parser.add_argument("--adv-samplers", action="store_true", help='show the "sampler parameters" advanced settings') +parser.add_argument("--adv-xyz", action="store_true", help="show non-UI parameters in X/Y/Z Plot options") parser.add_argument("--fps", type=int, default=30, help="refresh rate for threads") pkm = parser.add_mutually_exclusive_group() diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index af59f39433e7b29769fe0108b9bd8e92257cdcb5..9f7758184562e381b035ea1980ff6f7eaee25cdc 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -10,6 +10,11 @@ from modules.upscaler_utils import upscale_with_model from modules_forge.forge_util import prepare_free_memory +PREFER_HALF = opts.prefer_fp16_upscalers +if PREFER_HALF: + print("[Upscalers] Prefer Half-Precision:", PREFER_HALF) + + class UpscalerESRGAN(Upscaler): def __init__(self, dirname: str): self.user_path = dirname @@ -65,6 +70,6 @@ class UpscalerESRGAN(Upscaler): file_name=path.rsplit("/", 1)[-1], ) - model = modelloader.load_spandrel_model(filename, device="cpu") + model = modelloader.load_spandrel_model(filename, device="cpu", prefer_half=PREFER_HALF) model.to(devices.device_esrgan) return model diff --git a/modules/extras.py b/modules/extras.py index 0061109ce8ad27e3096c53b6e97d93b5ba2a2937..a33fa2cdc3fd6b693a00d24acd63b66468201428 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -22,6 +22,6 @@ def run_pnginfo(image): ) if len(info) == 0: - info = f"

Image contains no metadata...

" + info = "

Image contains no metadata...

" return "", geninfo, info diff --git a/modules/img2img.py b/modules/img2img.py index a295cc84f7dbe4ecb94f6ea1e10f2df4fae48804..bcc261cba8c8ae2390f85ec11442efcf1e144b27 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -221,8 +221,8 @@ def img2img_function( if selected_scale_tab == 1 and not is_batch: assert image, "Can't scale by because no image is selected" - width = int(image.width * scale_by) - height = int(image.height * scale_by) + width = round(image.width * scale_by / 64) * 64 + height = round(image.height * scale_by / 64) * 64 assert 0.0 <= denoising_strength <= 1.0, "can only work with strength in [0.0, 1.0]" diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index 34ee687cd624b33ebe9f498afb745a92ef69502c..5e5ee93e3f329696d20213d4d398628a8d8a32c6 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -1,15 +1,19 @@ from __future__ import annotations + import base64 import io import json import os import re +from functools import partial +from typing import Any, Callable import gradio as gr -from modules.paths import data_path -from modules import shared, ui_tempdir, script_callbacks, processing, prompt_parser from PIL import Image +from modules import images, processing, prompt_parser, script_callbacks, shared, ui_tempdir +from modules.paths import data_path + re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) re_imagesize = re.compile(r"^(\d+)x(\d+)$") @@ -17,14 +21,15 @@ type_of_gr_update = type(gr.skip()) class ParamBinding: - def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None): - self.paste_button = paste_button - self.tabname = tabname - self.source_text_component = source_text_component - self.source_image_component = source_image_component - self.source_tabname = source_tabname - self.override_settings_component = override_settings_component - self.paste_field_names = paste_field_names or [] + def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None, *, is_paste=False): + self.paste_button: gr.Button = paste_button + self.tabname: str = tabname + self.source_text_component: gr.Textbox = source_text_component + self.source_image_component: gr.Gallery = source_image_component + self.source_tabname: str = source_tabname + self.override_settings_component: list[gr.components.IOComponent] = override_settings_component + self.paste_field_names: list[str] = paste_field_names or [] + self.is_paste: bool = is_paste class PasteField(tuple): @@ -49,14 +54,14 @@ def reset(): registered_param_bindings.clear() -def quote(text): - if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text): +def quote(text: str) -> str: + if "," not in str(text) and "\n" not in str(text) and ":" not in str(text): return text return json.dumps(text, ensure_ascii=False) -def unquote(text): +def unquote(text: str) -> str: if len(text) == 0 or text[0] != '"' or text[-1] != '"': return text @@ -66,7 +71,7 @@ def unquote(text): return text -def image_from_url_text(filedata): +def image_from_url_text(filedata: str | dict) -> Image.Image: if filedata is None: return None @@ -74,11 +79,11 @@ def image_from_url_text(filedata): filedata = filedata[0] if type(filedata) == dict and filedata.get("is_file", False): - filename = filedata["name"] + filename: str = filedata["name"] is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) - assert is_in_right_dir, 'trying to open image file outside of allowed directories' + assert is_in_right_dir, "trying to open image file outside of allowed directories" - filename = filename.rsplit('?', 1)[0] + filename = filename.rsplit("?", 1)[0] return Image.open(filename) if type(filedata) == list: @@ -88,9 +93,9 @@ def image_from_url_text(filedata): filedata = filedata[0] if filedata.startswith("data:image/png;base64,"): - filedata = filedata[len("data:image/png;base64,"):] + filedata = filedata[22:] - filedata = base64.decodebytes(filedata.encode('utf-8')) + filedata = base64.decodebytes(filedata.encode("utf-8")) image = Image.open(io.BytesIO(filedata)) return image @@ -104,11 +109,12 @@ def add_paste_fields(tabname, init_img, fields, override_settings_component=None paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component} - # backwards compatibility for existing extensions + # Backward Compatibility import modules.ui - if tabname == 'txt2img': + + if tabname == "txt2img": modules.ui.txt2img_paste_fields = fields - elif tabname == 'img2img': + elif tabname == "img2img": modules.ui.img2img_paste_fields = fields @@ -119,19 +125,40 @@ def create_buttons(tabs_list): return buttons -def bind_buttons(buttons, send_image, send_generate_info): - """old function for backwards compatibility; do not use this, use register_paste_params_button""" - for tabname, button in buttons.items(): - source_text_component = send_generate_info if isinstance(send_generate_info, gr.components.Component) else None - source_tabname = send_generate_info if isinstance(send_generate_info, str) else None - - register_paste_params_button(ParamBinding(paste_button=button, tabname=tabname, source_text_component=source_text_component, source_image_component=send_image, source_tabname=source_tabname)) +def bind_buttons(*args, **kwargs): + raise NotImplementedError("use register_paste_params_button instead") def register_paste_params_button(binding: ParamBinding): registered_param_bindings.append(binding) +def _parse_info(output: gr.components.IOComponent, key: str | Callable, params: dict[str, Any]): + if callable(key): + v = key(params) + else: + v = params.get(key, None) + + if v is None: + return gr.skip() + if isinstance(v, type_of_gr_update): + return v + + try: + valtype = type(output.value) + + if valtype == bool and v == "False": + val = False + elif valtype == int: + val = float(v) + else: + val = valtype(v) + + return gr.update(value=val) + except Exception: + return gr.skip() + + def connect_paste_params_buttons(): for binding in registered_param_bindings: if binding.tabname not in paste_fields: @@ -146,7 +173,7 @@ def connect_paste_params_buttons(): if binding.source_image_component and destination_image_component: if isinstance(binding.source_image_component, gr.Gallery): func = send_image_and_dimensions if destination_width_component else image_from_url_text - jsfunc = "extract_image_from_gallery" + jsfunc = f'(gal) => {{ return extract_image_from_gallery(gal, "{binding.source_tabname}"); }}' else: func = send_image_and_dimensions if destination_width_component else lambda x: x jsfunc = None @@ -160,27 +187,65 @@ def connect_paste_params_buttons(): ) if binding.source_text_component is not None and fields is not None: - connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname) + connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname, is_paste=binding.is_paste) if binding.source_tabname is not None and fields is not None: - paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) + binding.paste_field_names - binding.paste_button.click( - fn=lambda *x: x, - inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names], - outputs=[field for field, name in fields if name in paste_field_names], - show_progress=False, - ) + paste_field_names = [ + *["Prompt", "Negative prompt", "Steps", "Face restoration"], + *(["Seed"] if shared.opts.send_seed else []), + *(["CFG scale"] if shared.opts.send_cfg else []), + *binding.paste_field_names, + ] + + if binding.source_tabname == "txt2img" and shared.opts.send_image_info_t2i_to_i2i: + + def read_infotext(gallery: list[dict], index: int, paste_fields: list[tuple]): + res = [] + + if len(gallery) == 0: + for _ in paste_fields: + res.append(gr.skip()) + return res + + image = image_from_url_text(gallery[index]) + info, _ = images.read_info_from_image(image) + + if not info: + for _ in paste_fields: + res.append(gr.skip()) + return res + + params = parse_generation_parameters(info) + script_callbacks.infotext_pasted_callback(info, params) + + for output, key in paste_fields: + res.append(_parse_info(output, key, params)) + + return res + + binding.paste_button.click( + fn=partial(read_infotext, paste_fields=[(field, name) for field, name in fields if name in paste_field_names]), + inputs=[binding.source_image_component, shared.t2i_gallery_index], + outputs=[field for field, name in fields if name in paste_field_names], + show_progress=False, + ) + + else: + binding.paste_button.click( + fn=lambda *args: args, + inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names], + outputs=[field for field, name in fields if name in paste_field_names], + show_progress=False, + ) binding.paste_button.click( fn=None, _js=f"switch_to_{binding.tabname}", - inputs=None, - outputs=None, show_progress=False, ) -def send_image_and_dimensions(x): +def send_image_and_dimensions(x: Any) -> tuple[Image.Image, int, int]: if isinstance(x, Image.Image): img = x else: @@ -196,20 +261,21 @@ def send_image_and_dimensions(x): return img, w, h -def restore_old_hires_fix_params(res): - """for infotexts that specify old First pass size parameter, convert it into - width, height, and hr scale""" +def restore_old_hires_fix_params(res: dict): + """ + for infotexts that specify old First pass size parameter, convert it into width, height, and hr scale + """ - firstpass_width = res.get('First pass size-1', None) - firstpass_height = res.get('First pass size-2', None) + firstpass_width = res.get("First pass size-1", None) + firstpass_height = res.get("First pass size-2", None) if shared.opts.use_old_hires_fix_width_height: hires_width = int(res.get("Hires resize-1", 0)) hires_height = int(res.get("Hires resize-2", 0)) if hires_width and hires_height: - res['Size-1'] = hires_width - res['Size-2'] = hires_height + res["Size-1"] = hires_width + res["Size-2"] = hires_height return if firstpass_width is None or firstpass_height is None: @@ -222,21 +288,22 @@ def restore_old_hires_fix_params(res): if firstpass_width == 0 or firstpass_height == 0: firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height) - res['Size-1'] = firstpass_width - res['Size-2'] = firstpass_height - res['Hires resize-1'] = width - res['Hires resize-2'] = height + res["Size-1"] = firstpass_width + res["Size-2"] = firstpass_height + res["Hires resize-1"] = width + res["Hires resize-2"] = height def parse_generation_parameters(x: str, skip_fields: list[str] | None = None): - """parses generation parameters string, the one you see in text field under the picture in UI: -``` -girl with an artist's beret, determined, blue eyes, desert scene, computer monitors, heavy makeup, by Alphonse Mucha and Charlie Bowater, ((eyeshadow)), (coquettish), detailed, intricate -Negative prompt: ugly, fat, obese, chubby, (((deformed))), [blurry], bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), messy drawing -Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model hash: 45dee52b -``` - - returns a dict with field values + """ + parses generation parameters string, the one you see in text field under the picture in UI: + ``` + girl with an artist's beret, determined, blue eyes, desert scene, computer monitors, heavy makeup, by Alphonse Mucha and Charlie Bowater, ((eyeshadow)), (coquettish), detailed, intricate + Negative prompt: ugly, fat, obese, chubby, (((deformed))), [blurry], bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), messy drawing + Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model hash: 45dee52b + ``` + + returns a dict with field values """ if skip_fields is None: skip_fields = shared.opts.infotext_skip_pasting @@ -251,7 +318,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model *lines, lastline = x.strip().split("\n") if len(re_param.findall(lastline)) < 3: lines.append(lastline) - lastline = '' + lastline = "" for line in lines: line = line.strip() @@ -288,7 +355,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model except Exception: print(f'Error parsing "{k}: {v}"') - # Missing CLIP skip means it was set to 1 (the default) if "Clip skip" not in res: res["Clip skip"] = "1" @@ -315,7 +381,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Mask mode"] = "Inpaint masked" if "Masked content" not in res: - res["Masked content"] = 'original' + res["Masked content"] = "original" if "Inpaint area" not in res: res["Inpaint area"] = "Whole picture" @@ -325,7 +391,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model restore_old_hires_fix_params(res) - # Missing RNG means the default was set, which is GPU RNG if "RNG" not in res: res["RNG"] = "GPU" @@ -354,7 +419,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model p_attention = prompt_parser.parse_prompt_attention(prompt) n_attention = prompt_parser.parse_prompt_attention(negative_prompt) prompt_attention = [*p_attention, *n_attention] - prompt_with_attention = [p for p in prompt_attention if p[1] == 1.0 or p[0] == 'BREAK'] + prompt_with_attention = [p for p in prompt_attention if p[1] == 1.0 or p[0] == "BREAK"] if len(prompt_attention) != len(prompt_with_attention): res["Emphasis"] = "Original" @@ -364,10 +429,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model return res -infotext_to_setting_name_mapping = [ - -] -"""Mapping of infotext labels to setting names. Only left for backwards compatibility - use OptionInfo(..., infotext='...') instead. +infotext_to_setting_name_mapping: list[tuple[str, str]] = [] +""" +Mapping of infotext labels to setting names. Only left for backwards compatibility - use OptionInfo(..., infotext='...') instead. Example content: infotext_to_setting_name_mapping = [ @@ -379,8 +443,9 @@ infotext_to_setting_name_mapping = [ """ -def create_override_settings_dict(text_pairs): - """creates processing's override_settings parameters from gradio's multiselect +def create_override_settings_dict(text_pairs: list[str]) -> dict[str, Any]: + """ + creates processing's override_settings parameters from gradio's multiselect Example input: ['Clip skip: 2', 'Model hash: e6e99610c4', 'ENSD: 31337'] @@ -410,7 +475,8 @@ def create_override_settings_dict(text_pairs): def get_override_settings(params, *, skip_fields=None): - """Returns a list of settings overrides from the infotext parameters dictionary. + """ + Returns a list of settings overrides from the infotext parameters dictionary. This function checks the `params` dictionary for any keys that correspond to settings in `shared.opts` and returns a list of tuples containing the parameter name, setting name, and new value cast to correct type. @@ -451,44 +517,28 @@ def get_override_settings(params, *, skip_fields=None): return res -def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname): +def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname, *, is_paste=False): def paste_func(prompt): - if not prompt and not shared.cmd_opts.hide_ui_dir_config: + res = [] + + if is_paste and not prompt: filename = os.path.join(data_path, "params.txt") try: with open(filename, "r", encoding="utf8") as file: prompt = file.read() except OSError: - pass + prompt = None + + if not prompt: + for _, _ in paste_fields: + res.append(gr.skip()) + return res params = parse_generation_parameters(prompt) script_callbacks.infotext_pasted_callback(prompt, params) - res = [] for output, key in paste_fields: - if callable(key): - v = key(params) - else: - v = params.get(key, None) - - if v is None: - res.append(gr.skip()) - elif isinstance(v, type_of_gr_update): - res.append(v) - else: - try: - valtype = type(output.value) - - if valtype == bool and v == "False": - val = False - elif valtype == int: - val = float(v) - else: - val = valtype(v) - - res.append(gr.update(value=val)) - except Exception: - res.append(gr.skip()) + res.append(_parse_info(output, key, params)) return res @@ -512,9 +562,7 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, ) button.click( fn=None, - _js=f"recalculate_prompts_{tabname}", - inputs=[], - outputs=[], + _js=f"recalculate_prompts_{'txt2img' if tabname == 'txt2img' else 'img2img'}", show_progress=False, ) @@ -522,4 +570,4 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, import sys # Backward Compatibility -sys.modules["modules.generation_parameters_copypaste"] = sys.modules[__name__] +sys.modules["modules.generation_parameters_copypaste"] = sys.modules[__name__] diff --git a/modules/initialize.py b/modules/initialize.py index 60092cf67b86751bb9d974ce13712269d2d7a4a7..87d62f667fe459299d29328bb42dd1c4332944a6 100644 --- a/modules/initialize.py +++ b/modules/initialize.py @@ -3,7 +3,6 @@ import logging import os import sys import warnings -import os from modules.timer import startup_timer @@ -18,19 +17,19 @@ class HiddenPrints: sys.stdout = self._original_stdout -def imports(): - logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... +def shush(): + logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) logging.getLogger("xformers").addFilter(lambda record: "triton" not in record.getMessage().lower()) + warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") + warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") - import torch # noqa: F401 - startup_timer.record("import torch") +def imports(): + import torch # noqa: F401 + import torchvision # noqa: F401 import pytorch_lightning # noqa: F401 startup_timer.record("import torch") - warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") - warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") - os.environ.setdefault("GRADIO_ANALYTICS_ENABLED", "False") import gradio # noqa: F401 diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 09c005dd3ebd00168a77b98a9337b05cc2b25c16..8929bb901d38274f271df016c114d0fa75917ddb 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -269,8 +269,8 @@ def requirements_met(requirements_file): def prepare_environment(): torch_index_url = os.environ.get("TORCH_INDEX_URL", "https://download.pytorch.org/whl/cu128") - torch_command = os.environ.get("TORCH_COMMAND", f"pip install torch==2.7.0+cu128 torchvision==0.22.0+cu128 --extra-index-url {torch_index_url}") - xformers_package = os.environ.get("XFORMERS_PACKAGE", f"xformers==0.0.30 --extra-index-url {torch_index_url}") + torch_command = os.environ.get("TORCH_COMMAND", f"pip install torch==2.7.1+cu128 torchvision==0.22.1+cu128 --extra-index-url {torch_index_url}") + xformers_package = os.environ.get("XFORMERS_PACKAGE", f"xformers==0.0.31.post1 --extra-index-url {torch_index_url}") sage_package = os.environ.get("SAGE_PACKAGE", "sageattention==1.0.6") clip_package = os.environ.get("CLIP_PACKAGE", "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip") diff --git a/modules/modelloader.py b/modules/modelloader.py index 859e5eb7345d80da0c47dd25d85035b890e31bb8..b61696c6c53a3ee025c304e0c3060555c241f6c5 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -129,6 +129,9 @@ def load_spandrel_model( if model_descriptor.supports_half: model_descriptor.model.half() half = True + elif model_descriptor.supports_bfloat16: + model_descriptor.model.bfloat16() + half = True else: logger.warning(f"Model {path} does not support half precision...") diff --git a/modules/processing.py b/modules/processing.py index 8ae8eb468ad3924e58a0829c8bc9326f412f3333..8e8a3044528c6b92b2da07ae7b8ef2423a6e1e16 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -205,6 +205,7 @@ class StableDiffusionProcessing: do_not_reload_embeddings: bool = False denoising_strength: float = None ddim_discretize: str = None + skip_early_cond: float = None s_min_uncond: float = None s_churn: float = None s_tmax: float = None @@ -215,8 +216,8 @@ class StableDiffusionProcessing: sampler_index: int = None refiner_checkpoint: str = None refiner_switch_at: float = None - token_merging_ratio = 0 - token_merging_ratio_hr = 0 + token_merging_ratio: float = 0.0 + token_merging_ratio_hr: float = 0.0 disable_extra_networks: bool = False firstpass_image: Image = None @@ -274,7 +275,8 @@ class StableDiffusionProcessing: self.styles = [] self.sampler_noise_scheduler_override = None - self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond + self.skip_early_cond = self.get_skip_early_cond() + self.s_min_uncond = self.get_s_min_uncond() self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float("inf") @@ -424,11 +426,31 @@ class StableDiffusionProcessing: StableDiffusionProcessing.cached_c = [None, None] StableDiffusionProcessing.cached_uc = [None, None] - def get_token_merging_ratio(self, for_hr=False): - if for_hr: - return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio + def get_skip_early_cond(self, *, for_hr: bool = False) -> float: + if isinstance(self, StableDiffusionProcessingImg2Img): + return self.skip_early_cond or opts.skip_early_cond_img2img or opts.skip_early_cond + elif for_hr: + return opts.skip_early_cond_hr or self.skip_early_cond or opts.skip_early_cond + else: + return self.skip_early_cond or opts.skip_early_cond - return self.token_merging_ratio or opts.token_merging_ratio + def get_s_min_uncond(self, *, for_hr: bool = False) -> float: + if isinstance(self, StableDiffusionProcessingImg2Img): + return self.s_min_uncond or opts.s_min_uncond_img2img or opts.s_min_uncond + elif for_hr: + return opts.s_min_uncond_hr or self.s_min_uncond or opts.s_min_uncond + else: + return self.s_min_uncond or opts.s_min_uncond + + def get_token_merging_ratio(self, *, for_hr: bool = False) -> float: + if "token_merging_ratio" in self.override_settings: + return self.override_settings["token_merging_ratio"] + elif isinstance(self, StableDiffusionProcessingImg2Img): + return self.token_merging_ratio or opts.token_merging_ratio_img2img or opts.token_merging_ratio + elif for_hr: + return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio + else: + return self.token_merging_ratio or opts.token_merging_ratio def setup_prompts(self): if isinstance(self.prompt, list): @@ -568,6 +590,7 @@ class Processed: self.s_tmin = p.s_tmin self.s_tmax = p.s_tmax self.s_noise = p.s_noise + self.skip_early_cond = p.skip_early_cond self.s_min_uncond = p.s_min_uncond self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override self.prompt = self.prompt if not isinstance(self.prompt, list) else self.prompt[0] @@ -624,9 +647,6 @@ class Processed: def infotext(self, p: StableDiffusionProcessing, index): return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size) - def get_token_merging_ratio(self, for_hr=False): - return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio - def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None): g = rng.ImageRNG(shape, seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w) @@ -685,6 +705,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter token_merging_ratio = p.get_token_merging_ratio() token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True) + _hr_tome = enable_hr and token_merging_ratio_hr > 0.0 and token_merging_ratio_hr != token_merging_ratio + prompt_text = p.main_prompt if use_main_prompt else all_prompts[index] negative_prompt = p.main_negative_prompt if use_main_prompt else all_negative_prompts[index] @@ -716,11 +738,12 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": opts.eta_noise_seed_delta if uses_ensd else None, - "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio, - "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr, + "Token merging ratio": token_merging_ratio if token_merging_ratio > 0.0 else None, + "Token merging ratio hr": token_merging_ratio_hr if _hr_tome else None, "Init image hash": getattr(p, "init_img_hash", None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, - "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + "SkipEarly": None if p.skip_early_cond < 0.05 else p.skip_early_cond, + "NGMS": None if p.s_min_uncond < 0.05 else p.s_min_uncond, "Tiling": True if p.tiling else None, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, @@ -1115,6 +1138,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): hr_negative_prompts: list = field(default=None, init=False) hr_extra_network_data: list = field(default=None, init=False) + txt2img_upscale: bool = field(default=False, init=False) + """attribute signifying that this is processed by the Upscale button""" + def __post_init__(self): super().__post_init__() @@ -1311,6 +1337,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): img2img_sampler_name = self.hr_sampler_name or self.sampler_name + self.skip_early_cond = self.get_skip_early_cond(for_hr=True) + self.s_min_uncond = self.get_s_min_uncond(for_hr=True) + self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) if self.latent_scale_mode is not None: @@ -1714,6 +1743,3 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): devices.torch_gc() return samples - - def get_token_merging_ratio(self, for_hr=False): - return self.token_merging_ratio or ("token_merging_ratio" in self.override_settings and opts.token_merging_ratio) or opts.token_merging_ratio_img2img or opts.token_merging_ratio diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index c8b423a0ec3b06536e0d5a739da920510937af33..0ccf98daaa8bc23ecaa79268d645709f62b34038 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -387,10 +387,10 @@ def parse_prompt_attention(text): (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 - \( - literal character '(' - \[ - literal character '[' - \) - literal character ')' - \] - literal character ']' + \\( - literal character '(' + \\[ - literal character '[' + \\) - literal character ')' + \\] - literal character ']' \\ - literal character '\' anything else - just text @@ -400,7 +400,7 @@ def parse_prompt_attention(text): [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] - >>> parse_prompt_attention('\(literal\]') + >>> parse_prompt_attention('\\(literal\\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index fed3bd61f72d86f8e5131d1ffe04ed749d3cf14e..15d98d457b8884f47f9b534efa03ca318cb06802 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -95,11 +95,6 @@ class AfterCFGCallbackParams: """Total number of sampling steps planned""" -class UiTrainTabParams: - def __init__(self, txt2img_preview_params): - self.txt2img_preview_params = txt2img_preview_params - - class ImageGridLoopParams: def __init__(self, imgs, cols, rows): self.imgs = imgs @@ -121,7 +116,6 @@ callback_map = dict( callbacks_app_started=[], callbacks_model_loaded=[], callbacks_ui_tabs=[], - callbacks_ui_train_tabs=[], callbacks_ui_settings=[], callbacks_before_image_saved=[], callbacks_image_saved=[], @@ -186,14 +180,6 @@ def ui_tabs_callback(): return res -def ui_train_tabs_callback(params: UiTrainTabParams): - for c in callback_map['callbacks_ui_train_tabs']: - try: - c.callback(params) - except Exception: - report_exception(c, 'callbacks_ui_train_tabs') - - def ui_settings_callback(): for c in callback_map['callbacks_ui_settings']: try: @@ -388,12 +374,6 @@ def on_ui_tabs(callback): add_callback(callback_map['callbacks_ui_tabs'], callback) -def on_ui_train_tabs(callback): - """register a function to be called when the UI is creating new tabs for the train tab. - Create your new tabs with gr.Tab. - """ - add_callback(callback_map['callbacks_ui_train_tabs'], callback) - def on_ui_settings(callback): """register a function to be called before UI settings are populated; add your settings diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 9685553ad1ed8f0a01e9c0393bec78b00a5228db..c64ace17d14316ee3a2a0ad9a1666945ee845360 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -3,7 +3,7 @@ from collections import namedtuple import torch -from modules import prompt_parser, devices, sd_hijack, sd_emphasis +from modules import devices, prompt_parser, sd_emphasis, sd_hijack from modules.shared import opts @@ -11,8 +11,7 @@ class PromptChunk: """ This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt. If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary. - Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token, - so just 75 tokens from prompt. + Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token each, and 75 tokens from prompt. """ def __init__(self): @@ -23,7 +22,7 @@ class PromptChunk: PromptChunkFix = namedtuple("PromptChunkFix", ["offset", "embedding"]) """ -An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt +An object of this type is a marker showing that textual inversion embedding's vectors have to be placed at offset in the prompt chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally are applied by sd_hijack.EmbeddingsWithFixes's forward function. """ @@ -31,17 +30,15 @@ are applied by sd_hijack.EmbeddingsWithFixes's forward function. class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): """ - A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, - making it possible to have unlimited prompt length and assign weights to tokens in prompt. + A pytorch module that is a wrapper for FrozenCLIPEmbedder module. It enhances FrozenCLIPEmbedder, + making it possible to have unlimited prompt length and assign weights to tokens in prompts. """ def __init__(self, wrapped, hijack): super().__init__() self.wrapped = wrapped - """ - Original FrozenCLIPEmbedder or FrozenOpenCLIPEmbedder module - """ + """Original FrozenCLIPEmbedder or FrozenOpenCLIPEmbedder module""" self.hijack: sd_hijack.StableDiffusionModelHijack = hijack self.chunk_length = 75 @@ -51,7 +48,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): self.legacy_ucg_val = None def empty_chunk(self): - """creates an empty PromptChunk and returns it""" + """Creates an empty PromptChunk and returns it""" chunk = PromptChunk() chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1) @@ -59,7 +56,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): return chunk def get_target_prompt_token_count(self, token_count): - """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented""" + """Returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented""" return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length @@ -70,10 +67,10 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): def encode_with_transformers(self, tokens): """ - converts a batch of token ids (in python lists) into a single tensor with numeric representation of those tokens; + Converts a batch of token ids (in python lists) into a single tensor with numeric representation of those tokens; All python lists with tokens are assumed to have same length, usually 77. - if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on - model - can be 768 and 1024. + If input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on + model - can be 768 and 1280. Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None). """ @@ -83,15 +80,14 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): """ Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through transformers. - nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned. + nvpt is used as a maximum length in tokens. If text produces less tokens than nvpt, only this many is returned. """ raise NotImplementedError def tokenize_line(self, line): """ - this transforms a single prompt into a list of PromptChunk objects - as many as needed to - represent the prompt. + This transforms a single prompt into a list of PromptChunk objects - as many as needed to represent the prompt. Returns the list and the total number of tokens in the prompt. """ @@ -110,7 +106,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): def next_chunk(is_last=False): """ puts current chunk into the list of results and produces the next one - empty; - if is_last is true, tokens tokens at the end won't add to token_count + if is_last is true, tokens tokens at the end won't be added to token_count """ nonlocal token_count nonlocal last_comma @@ -187,8 +183,8 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): def process_texts(self, texts): """ - Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum - length, in tokens, of all texts. + Accepts a list of texts and calls tokenize_line() on each, with cache. + Returns the list of results and maximum length, in tokens, of all texts. """ token_count = 0 @@ -212,7 +208,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): """ Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts. Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will - be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280. + be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768; for SDXL it's 1280. An example shape returned by this function can be: (2, 77, 768). For SDXL, instead of returning one tensor above, it returns a tuple with two: the other one with shape (B, 1280) with pooled values. Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one element @@ -261,15 +257,15 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): def process_tokens(self, remade_batch_tokens, batch_multipliers): """ - sends one single prompt chunk to be encoded by transformers neural network. + Sends one single prompt chunk to be encoded by transformers neural network. remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens. - Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier - corresponds to one token. + Multipliers are used to give more or less weight to the outputs of transformers network. + Each multiplier corresponds to one token. """ tokens = torch.asarray(remade_batch_tokens).to(devices.device) - # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones. + # SD1 uses the same token for padding and end of text; while SDXL uses different ones if self.id_end != self.id_pad: for batch_pos in range(len(remade_batch_tokens)): index = remade_batch_tokens[batch_pos].index(self.id_end) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index eb7cac8e74cf86cc0827f3cb2969fb397b5fb22c..1e9d64abbdb8166eeef6377c8bce5f5c11a2f1f2 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -1,12 +1,8 @@ +import math import torch from modules import prompt_parser, sd_samplers_common -from modules.script_callbacks import ( - AfterCFGCallbackParams, - CFGDenoiserParams, - cfg_after_cfg_callback, - cfg_denoiser_callback, -) -from modules.shared import opts, state +from modules.script_callbacks import AfterCFGCallbackParams, CFGDenoiserParams, cfg_after_cfg_callback, cfg_denoiser_callback +from modules.shared import state from modules_forge import forge_sampler @@ -87,7 +83,7 @@ class CFGDenoiser(torch.nn.Module): self.sampler.sampler_extra_args["cond"] = c self.sampler.sampler_extra_args["uncond"] = uc - def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond, **kwargs): + def forward(self, x, sigma, uncond, cond, cond_scale, image_cond, s_min_uncond=0.0, skip_early_cond=0.0, **kwargs): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException @@ -152,12 +148,13 @@ class CFGDenoiser(torch.nn.Module): ) cfg_denoiser_callback(denoiser_params) - if 0.0 <= self.step / self.total_steps <= opts.skip_early_cond: + if 0.0 < self.step / self.total_steps <= skip_early_cond: cond_scale = 1.0 - if 0.0 <= sigma[0] <= s_min_uncond: + if 0.0 < sigma[0] < s_min_uncond: cond_scale = 1.0 - skip_uncond: bool = abs(cond_scale - 1.0) < 10**-6 + model_options = kwargs.get("model_options", None) + skip_uncond: bool = math.isclose(cond_scale, 1.0) and not (model_options or {}).get("disable_cfg1_optimization", False) self.padded_cond_uncond = not skip_uncond denoised = forge_sampler.forge_sample( @@ -166,7 +163,7 @@ class CFGDenoiser(torch.nn.Module): cond_scale=cond_scale, cond_composition=cond_composition, skip_uncond=skip_uncond, - options=kwargs.get("model_options", None), + options=model_options, ) # if getattr(self.p.sd_model, "cond_stage_key", None) == "edit" and getattr(self, "image_cfg_scale", 1.0) != 1.0: diff --git a/modules/sd_samplers_cfgpp.py b/modules/sd_samplers_cfgpp.py index 69b4d70f7004f3c53cc8365172029607c46ead75..d922b44ff3c3f31b82c2a047f8a3b5d5eca160dd 100644 --- a/modules/sd_samplers_cfgpp.py +++ b/modules/sd_samplers_cfgpp.py @@ -1,10 +1,5 @@ import torch -from k_diffusion.sampling import ( - BrownianTreeNoiseSampler, - default_noise_sampler, - get_ancestral_step, - to_d, -) +from k_diffusion.sampling import BrownianTreeNoiseSampler, default_noise_sampler, get_ancestral_step, to_d from tqdm.auto import trange @@ -194,10 +189,62 @@ def sample_dpmpp_2m_cfg_pp(model, x, sigmas, extra_args=None, callback=None, dis @torch.no_grad() -def sample_dpmpp_3m_sde_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=None, s_noise=None, noise_sampler=None): - eta = 1.0 if eta is None else eta - s_noise = 1.0 if s_noise is None else s_noise +def sample_dpmpp_2m_sde_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None, solver_type="midpoint"): + if len(sigmas) <= 1: + return x + + if solver_type not in {"heun", "midpoint"}: + raise ValueError('solver_type must be "heun" or "midpoint"') + + seed = extra_args.get("seed", None) + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + + old_denoised = None + h_last = None + h = None + + temp = [0] + + def post_cfg_function(args): + temp[0] = args["uncond_denoised"] + return args["denoised"] + + model_options = extra_args.get("model_options", {}).copy() + extra_args["model_options"] = _set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True) + + for i in trange(len(sigmas) - 1, disable=disable): + denoised = model(x, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised}) + if sigmas[i + 1] == 0: + x = denoised + else: + t, s = -sigmas[i].log(), -sigmas[i + 1].log() + h = s - t + eta_h = eta * h + + x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * (x + (denoised - temp[0])) + (-h - eta_h).expm1().neg() * denoised + + if old_denoised is not None: + r = h_last / h + if solver_type == "heun": + x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised) + elif solver_type == "midpoint": + x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised) + + if eta: + x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise + old_denoised = denoised + h_last = h + return x + + +@torch.no_grad() +def sample_dpmpp_3m_sde_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None): if len(sigmas) <= 1: return x diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 8c3dcb5f3874d49c04e06c28ed3be7df0f03ea0c..9251fd87324ecdb81aea2f0261e3cd6d9e2b2124 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -261,7 +261,8 @@ class Sampler: self.eta = None self.config: SamplerData = None self.last_latent = None - self.s_min_uncond = None + self.skip_early_cond: float = None + self.s_min_uncond: float = None # Default values for sampler parameters self.s_churn = 0.0 @@ -314,6 +315,7 @@ class Sampler: self.model_wrap_cfg.step = 0 self.model_wrap_cfg.image_cfg_scale = getattr(p, "image_cfg_scale", None) self.eta = p.eta if p.eta is not None else getattr(opts, self.eta_option_field, 0.0) + self.skip_early_cond = getattr(p, "skip_early_cond", 0.0) self.s_min_uncond = getattr(p, "s_min_uncond", 0.0) k_diffusion.sampling.torch = TorchHijack(p) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 8ed2cc1025ebf409059383158032e3712227b2e2..1b1c2b419e4e3433beeb3b32d173d26f5ea78fc2 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -207,6 +207,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler): "image_cond": image_conditioning, "uncond": unconditional_conditioning, "cond_scale": p.hr_cfg_scale if p.is_hr_pass else p.cfg_scale, + "skip_early_cond": self.skip_early_cond, "s_min_uncond": self.s_min_uncond, } @@ -268,6 +269,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler): "image_cond": image_conditioning, "uncond": unconditional_conditioning, "cond_scale": p.hr_cfg_scale if p.is_hr_pass else p.cfg_scale, + "skip_early_cond": self.skip_early_cond, "s_min_uncond": self.s_min_uncond, } diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index 41ac5bf9972b80498a680531365e27e637c63d06..51b852000f8ced3fcdfad504a002b1293d6c6951 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -142,6 +142,7 @@ class CompVisSampler(sd_samplers_common.Sampler): "image_cond": image_conditioning, "uncond": unconditional_conditioning, "cond_scale": p.cfg_scale, + "skip_early_cond": self.skip_early_cond, "s_min_uncond": self.s_min_uncond, } @@ -184,6 +185,7 @@ class CompVisSampler(sd_samplers_common.Sampler): "image_cond": image_conditioning, "uncond": unconditional_conditioning, "cond_scale": p.cfg_scale, + "skip_early_cond": self.skip_early_cond, "s_min_uncond": self.s_min_uncond, } samples = self.launch_sampling( diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 2520cb3ef54e54bba2f87a2ca2e2534151637b0f..7146f40fad94c14ca6fd5de402ea605f409109ab 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -2,7 +2,7 @@ import os import collections from dataclasses import dataclass -from modules import paths, shared, devices, script_callbacks, sd_models, sd_samplers_common, extra_networks, sd_hijack, hashes +from modules import paths, shared, script_callbacks, sd_models, sd_samplers_common, extra_networks, sd_hijack, hashes import glob from copy import deepcopy diff --git a/modules/shared.py b/modules/shared.py index 373dd9fc0153c6efb17d251a2e56f147c50500c7..e9f29ee5559b464e0612251d476c5801515bab7b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -96,5 +96,8 @@ list_checkpoint_tiles = shared_items.list_checkpoint_tiles refresh_checkpoints = shared_items.refresh_checkpoints list_samplers = shared_items.list_samplers +t2i_gallery_index: gr.Number # int +i2i_gallery_index: gr.Number # int + # ===== backward compatibility ===== # batch_cond_uncond = True diff --git a/modules/shared_options.py b/modules/shared_options.py index 0705a71c9f27e285eb32814758cc8985972f4aeb..ee0455ea4df2620e3f70f88b8b784d42f3feac37 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -118,8 +118,10 @@ options_templates.update( { "ESRGAN_tile": OptionInfo(256, "Tile Size for Upscalers", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), "ESRGAN_tile_overlap": OptionInfo(16, "Tile Overlap for Upscalers", gr.Slider, {"minimum": 0, "maximum": 64, "step": 4}).info("low values = visible seam"), + "composite_tiles_on_gpu": OptionInfo(False, "Composite the Tiles on GPU").info("improve performance and resource utilization"), "upscaler_for_img2img": OptionInfo("None", "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}).info("for resizing the input image if the image resolution is smaller than the generation resolution"), "upscaling_max_images_in_cache": OptionInfo(4, "Number of upscaled images to cache", gr.Slider, {"minimum": 0, "maximum": 8, "step": 1}), + "prefer_fp16_upscalers": OptionInfo(False, "Prefer to load Upscaler in half precision").info("increase speed; reduce quality; will try fp16, then bf16, then fall back to fp32 if not supported").needs_restart(), }, ) ) @@ -151,6 +153,7 @@ options_templates.update( "list_hidden_files": OptionInfo(True, "List the models/files under hidden directories").info('directory is hidden if its name starts with "."'), "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping when loading .safetensors").info("fix slow loading speed in certain cases"), "dump_stacks_on_signal": OptionInfo(False, "Print the stack trace before terminating the webui via Ctrl + C"), + "no_flashbang": OptionInfo(False, 'Force the background color of the "Reloading..." screen to be black'), }, ) ) @@ -206,6 +209,9 @@ to create the resulting image after the sampling is finished. For img2img, VAE i "prefer_vae_precision_float16": OptionInfo(False, "Prefer VAE in float16 precision").info("VAE at fp16 tends to cause NaNs; enable with caution!"), "sd_vae_encode_method": OptionInfo("Full", "VAE for Encoding", gr.Radio, {"choices": ("Full", "TAESD")}, infotext="VAE Encoder").info("method to encode image to latent (img2img / Hires. fix / inpaint)"), "sd_vae_decode_method": OptionInfo("Full", "VAE for Decoding", gr.Radio, {"choices": ("Full", "TAESD")}, infotext="VAE Decoder").info("method to decode latent to image"), + "tile_exp_div": OptionDiv(), + "sd_vae_tiled_ops": OptionInfo(False, "Enable tiling optimizations for VAE").info("replace interpolate and Conv2D ops with tiled variants; reduce memory usage; slightly reduce speed").needs_restart(), + "sd_vae_tiled_size": OptionInfo(128, "Tile Size", gr.Slider, {"minimum": 64, "maximum": 256, "step": 64}).info("for the above setting").needs_restart(), }, ) ) @@ -239,13 +245,18 @@ options_templates.update( ("optimizations", "Optimizations", "sd"), { "cross_attention_optimization": OptionInfo("Automatic", "Cross Attention Optimization", gr.Dropdown, {"choices": ("Automatic",), "interactive": False}), - "skip_early_cond": OptionInfo(0.0, "Ignore Negative Prompt during Early Steps", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.05}, infotext="Skip Early CFG").info("in percentage of total steps; 0 = disable; higher = faster"), - "s_min_uncond": OptionInfo(0.0, "Skip Negative Prompt during Later Steps", gr.Slider, {"minimum": 0.0, "maximum": 8.0, "step": 0.05}).info('in "sigma"; 0 = disable; higher = faster'), "persistent_cond_cache": OptionInfo(True, "Persistent Cond Cache").info("do not recalculate conds if the prompts and parameters have not changed since previous generation"), - "div_precision": OptionDiv(), "fp8_storage": OptionInfo(False, "Store UNet Weights in fp8").info("store the weights in fp8; inference in fp16; reduce memory usage; reduce speed; reduce quality").needs_restart(), - "fp8_fast": OptionInfo(False, "Inference UNet in fast fp8 operations").info("inference in fp8 using torch._scaled_mm; increase speed; reduce quality; require RTX 40 or newer").needs_restart(), + "fp8_fast": OptionInfo(False, "Inference UNet in fast fp8 operations").info("inference in fp8 using torch._scaled_mm; increase speed; reduce quality; require RTX 40 or newer; require UNet Weights in fp8 option above").needs_restart(), "cublas_fast": OptionInfo(False, "Inference UNet in fast cublas operations").info('inference using CublasLinear; increase speed; require fp16; require manual installation').needs_restart(), + "div_skip_early": OptionDiv(), + "skip_early_cond": OptionInfo(0.0, "Ignore Negative Prompt during Early Steps", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.05}, infotext="Skip Early CFG").info("in percentage of total steps; 0 = disable; higher = faster"), + "skip_early_cond_img2img": OptionInfo(0.0, " - Skip Early for img2img", gr.Slider, {"minimum": -0.5, "maximum": 1.0, "step": 0.05}, infotext="Skip Early CFG").info("overrides base % if non-zero; disable if negative"), + "skip_early_cond_hr": OptionInfo(0.0, " - Skip Early for Hires. fix", gr.Slider, {"minimum": -0.5, "maximum": 1.0, "step": 0.05}, infotext="Skip Early CFG").info("overrides base % if non-zero; disable if negative"), + "div_ngms": OptionDiv(), + "s_min_uncond": OptionInfo(0.0, "Skip Negative Prompt during Later Steps", gr.Slider, {"minimum": 0.0, "maximum": 8.0, "step": 0.05}).info('in "sigma"; 0 = disable; higher = faster'), + "s_min_uncond_img2img": OptionInfo(0.0, " - Skip Later for img2img", gr.Slider, {"minimum": -0.5, "maximum": 8.0, "step": 0.05}).info("overrides base threshold if non-zero; disable if negative"), + "s_min_uncond_hr": OptionInfo(0.0, " - Skip Later for Hires. fix", gr.Slider, {"minimum": -0.5, "maximum": 8.0, "step": 0.05}).info("overrides base threshold if non-zero; disable if negative"), "div_tome": OptionDiv(), "token_merging_explanation": OptionHTML( """ @@ -293,6 +304,7 @@ However, the resulting UI is quite... sluggish. "extra_networks_tree_view_enable": OptionInfo(False, "Enable the Tree View").needs_reload_ui(), "extra_networks_tree_view_default_enabled": OptionInfo(False, "Show the Tree View by default").needs_reload_ui(), "div_tree": OptionDiv(), + "extra_networks_dir_btn_enable": OptionInfo(False, "Enable the Subdirectory Buttons").needs_reload_ui(), "extra_networks_hidden_models": OptionInfo("When searched", "Show the Extra Networks in hidden directories", gr.Radio, {"choices": ("Always", "When searched", "Never")}).info('"When searched" option will only show the item when the search string contains 4 characters or more'), "extra_networks_default_multiplier": OptionInfo(1.0, "Default Weight for Extra Networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.05}), "extra_networks_card_width": OptionInfo(0, "Card Width for Extra Networks").info("in pixels; 0 = auto"), @@ -338,6 +350,7 @@ options_templates.update( "sd_webui_modal_lightbox_icon_opacity": OptionInfo(1.0, "[Lightbox]: control icon unfocused opacity", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.05}, onchange=shared.reload_gradio_theme).info("for mouse only").needs_reload_ui(), "sd_webui_modal_lightbox_toolbar_opacity": OptionInfo(0.9, "[Lightbox]: tool bar opacity", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.05}, onchange=shared.reload_gradio_theme).info("for mouse only").needs_reload_ui(), "open_dir_button_choice": OptionInfo("Subdirectory", "What directory the [📂] button opens", gr.Radio, {"choices": ("Output Root", "Subdirectory", "Subdirectory (even temp dir)")}), + "hires_button_gallery_insert": OptionInfo(False, "When using the [✨] button, insert the upscaled image to the gallery").info("otherwise replace the selected image in the gallery"), }, ) ) @@ -377,6 +390,7 @@ options_templates.update( "gradio_themes_cache": OptionInfo(True, "Cache selected theme locally"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title"), "send_seed": OptionInfo(True, 'Send the Seed information when using the "Send to" buttons'), + "send_cfg": OptionInfo(True, 'Send the CFG information when using the "Send to" buttons'), "send_size": OptionInfo(True, 'Send the Resolution information when using the "Send to" buttons'), }, ) @@ -408,6 +422,8 @@ options_templates.update( """ ), + "div_exp_info": OptionDiv(), + "send_image_info_t2i_to_i2i": OptionInfo(False, 'When using the "Send to img2img" button (in txt2img tab), use the parameters in the infotext rather than the UI fields').info("e.g. to send the result of Wildcards instead of its syntax").needs_restart(), }, ) ) diff --git a/modules/sysinfo.py b/modules/sysinfo.py index 5b35c6c45a9b62cc91f43c3f7e3c9f191bbc05af..2dc8ae29f8d073a1c730617be358c045f14f1219 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -1,14 +1,14 @@ -from modules import paths_internal, timer, shared, extensions, errors - -import pkg_resources -import platform import hashlib -import psutil +import importlib.metadata import json -import sys -import re import os +import platform +import re +import sys +import psutil + +from modules import errors, extensions, paths_internal, shared, timer TOKEN = "EPIC_BRUH_MOMENT" @@ -102,7 +102,7 @@ def get_dict(): "Environment": get_environment(), "Config": get_config(), "Startup": timer.startup_record, - "Packages": sorted([f"{pkg.key}=={pkg.version}" for pkg in pkg_resources.working_set]), + "Packages": get_packages(), } return res @@ -165,3 +165,19 @@ def get_config(): return shared.opts.data except Exception as e: return str(e) + + +def get_packages(): + package_list = [] + + installed_packages = importlib.metadata.distributions() + for pkg in installed_packages: + try: + package_name = pkg.metadata.get("Name") + package_version = pkg.metadata.get("Version") + if package_name and package_version: + package_list.append(f"{package_name}=={package_version}") + except Exception: + continue + + return sorted(package_list) diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index b1485637edc0e20e82f4e3a828ce694f8f0e1ac5..16a4d09d8d4ef64cff419b28117c5814eef63658 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -5,7 +5,6 @@ import numpy as np import requests from modules import paths_internal from PIL import ImageDraw -from pkg_resources import parse_version GREEN = "#0F0" BLUE = "#00F" @@ -283,12 +282,8 @@ def is_square(w, h): model_dir_opencv = os.path.join(paths_internal.models_path, "opencv") -if parse_version(cv2.__version__) >= parse_version("4.8"): - model_file_path = os.path.join(model_dir_opencv, "face_detection_yunet_2023mar.onnx") - model_url = "https://github.com/opencv/opencv_zoo/blob/b6e370b10f641879a87890d44e42173077154a05/models/face_detection_yunet/face_detection_yunet_2023mar.onnx?raw=true" -else: - model_file_path = os.path.join(model_dir_opencv, "face_detection_yunet.onnx") - model_url = "https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true" +model_file_path = os.path.join(model_dir_opencv, "face_detection_yunet_2023mar.onnx") +model_url = "https://github.com/opencv/opencv_zoo/blob/b6e370b10f641879a87890d44e42173077154a05/models/face_detection_yunet/face_detection_yunet_2023mar.onnx?raw=true" def download_and_cache_models(): diff --git a/modules/txt2img.py b/modules/txt2img.py index 8800f761b850ae58f008eec525b3e6fe57b48642..378bb55cfc2707b54eb092b19928b6a491193fa2 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -7,7 +7,6 @@ from modules.infotext_utils import create_override_settings_dict, parse_generati from modules.shared import opts import modules.shared as shared from modules.ui import plaintext_to_html -from PIL import Image import gradio as gr from modules_forge import main_thread @@ -86,21 +85,25 @@ def txt2img_create_processing( return p -def txt2img_upscale_function(id_task: str, request: gr.Request, gallery, gallery_index, generation_info, *args): - assert len(gallery) > 0, "No image to upscale" - assert 0 <= gallery_index < len(gallery), f"Bad image index: {gallery_index}" +def txt2img_upscale_function(id_task: str, request: gr.Request, gallery: list[dict], gallery_index: int, generation_info: str, *args): + _gallery = [infotext_utils.image_from_url_text(info) for info in gallery] + + if len(gallery) == 0: + return _gallery, generation_info, "No image to upscale...", "" + if not (0 <= gallery_index < len(gallery)): + return _gallery, generation_info, f"Bad Index: {gallery_index}", "" + if len(gallery) > 1 and opts.return_grid and gallery_index == 0: + return _gallery, generation_info, "Cannot upscale the grid image...", "" p = txt2img_create_processing(id_task, request, *args, force_enable_hr=True) p.batch_size = 1 p.n_iter = 1 - # txt2img_upscale attribute that signifies this is called by txt2img_upscale p.txt2img_upscale = True - geninfo = json.loads(generation_info) - - image_info = gallery[gallery_index] if 0 <= gallery_index < len(gallery) else gallery[0] - p.firstpass_image = infotext_utils.image_from_url_text(image_info) + p.firstpass_image = _gallery[gallery_index] + p.width, p.height = p.firstpass_image.size + geninfo = json.loads(generation_info) parameters = parse_generation_parameters(geninfo.get("infotexts")[gallery_index], []) p.seed = parameters.get("Seed", -1) p.subseed = parameters.get("Variation seed", -1) @@ -116,16 +119,18 @@ def txt2img_upscale_function(id_task: str, request: gr.Request, gallery, gallery shared.total_tqdm.clear() new_gallery = [] - for i, image in enumerate(gallery): + for i, image in enumerate(_gallery): if i == gallery_index: - geninfo["infotexts"][gallery_index : gallery_index + 1] = processed.infotexts + if shared.opts.hires_button_gallery_insert: + new_gallery.append(image) new_gallery.extend(processed.images) else: - fake_image = Image.new(mode="RGB", size=(1, 1)) - fake_image.already_saved_as = image["name"].rsplit("?", 1)[0] - new_gallery.append(fake_image) + new_gallery.append(image) - geninfo["infotexts"][gallery_index] = processed.info + if shared.opts.hires_button_gallery_insert: + geninfo["infotexts"].insert(gallery_index + 1, processed.info) + else: + geninfo["infotexts"][gallery_index] = processed.info return new_gallery, json.dumps(geninfo), plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments") diff --git a/modules/ui.py b/modules/ui.py index f31eed0c1753a4cf3be63382cca8370fc157aa20..3ade1b5b8220c77173b827a6e57775b2997c2298 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -77,8 +77,8 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz def resize_from_to_html(width, height, scale_by): - target_width = int(width * scale_by) - target_height = int(height * scale_by) + target_width = round(width * scale_by / 64) * 64 + target_height = round(height * scale_by / 64) * 64 if not target_width or not target_height: return "no image selected" @@ -117,7 +117,7 @@ def update_token_counter(text, steps, styles, *, is_positive=True): cond_stage_model = sd_models.model_data.sd_model.cond_stage_model assert cond_stage_model is not None except Exception: - return f"?/?" + return "?/?" flat_prompts = reduce(lambda list1, list2: list1 + list2, prompt_schedules) prompts = [prompt_text for step, prompt_text in flat_prompts] @@ -201,6 +201,17 @@ def create_ui(): dummy_component = gr.Label(visible=False) + _t2i_gallery_index = gr.Number(value=0, visible=False, interactive=False, precision=0) + _t2i_gallery_index.change(fn=None, _js="(v) => { t2i_gallery_index = v; }", inputs=[_t2i_gallery_index]) + _i2i_gallery_index = gr.Number(value=0, visible=False, interactive=False, precision=0) + _i2i_gallery_index.change(fn=None, _js="(v) => { i2i_gallery_index = v; }", inputs=[_i2i_gallery_index]) + + def _reset() -> int: + return 0 + + def _update(evt: gr.SelectData) -> int: + return evt.index + extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs", elem_classes=["extra-networks"]) extra_tabs.__enter__() @@ -357,8 +368,9 @@ def create_ui(): show_progress=False, ) - toprow.prompt.submit(**txt2img_args) - toprow.submit.click(**txt2img_args) + toprow.prompt.submit(fn=_reset, outputs=[_t2i_gallery_index]).then(**txt2img_args) + toprow.submit.click(fn=_reset, outputs=[_t2i_gallery_index]).then(**txt2img_args) + output_panel.gallery.select(fn=_update, outputs=[_t2i_gallery_index], show_progress=False, queue=False) output_panel.button_upscale.click( fn=wrap_gradio_gpu_call(modules.txt2img.txt2img_upscale, extra_outputs=[None, "", ""]), @@ -416,22 +428,12 @@ def create_ui(): tabname="txt2img", source_text_component=toprow.prompt, source_image_component=None, + is_paste=True, ) ) steps = scripts.scripts_txt2img.script("Sampler").steps - txt2img_preview_params = [ - toprow.prompt, - toprow.negative_prompt, - steps, - scripts.scripts_txt2img.script("Sampler").sampler_name, - cfg_scale, - scripts.scripts_txt2img.script("Seed").seed, - width, - height, - ] - toprow.ui_styles.dropdown.change(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.token_counter]) toprow.ui_styles.dropdown.change(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.negative_token_counter]) toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.token_counter]) @@ -543,12 +545,15 @@ def create_ui(): fn=copy_image, inputs=[elem], outputs=[copy_image_destinations[name]], + show_progress=False, ) + + _tabname = name.replace(" ", "_") + button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], + fn=None, + _js=f"switch_to_{_tabname}", + show_progress=False, ) with FormRow(): @@ -585,7 +590,7 @@ def create_ui(): show_progress=False, ) - scale_by.release(**on_change_args) + scale_by.change(**on_change_args) button_update_resize_to.click(**on_change_args) tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) @@ -720,8 +725,9 @@ def create_ui(): show_progress=False, ) - toprow.prompt.submit(**img2img_args) - toprow.submit.click(**img2img_args) + toprow.prompt.submit(fn=_reset, outputs=[_i2i_gallery_index]).then(**img2img_args) + toprow.submit.click(fn=_reset, outputs=[_i2i_gallery_index]).then(**img2img_args) + output_panel.gallery.select(fn=_update, outputs=[_i2i_gallery_index], show_progress=False, queue=False) res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) @@ -778,6 +784,7 @@ def create_ui(): tabname="img2img", source_text_component=toprow.prompt, source_image_component=None, + is_paste=True, ) ) @@ -789,6 +796,9 @@ def create_ui(): if shared.opts.paste_safe_guard: toprow.hook_paste_guard() + shared.t2i_gallery_index = _t2i_gallery_index + shared.i2i_gallery_index = _i2i_gallery_index + scripts.scripts_current = None with gr.Blocks(analytics_enabled=False) as extras_interface: diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 59fed4ecfc967fc95855daef448039eb43412776..08d4486e125b63a1642915574fd2f4d4900c9eff 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -154,10 +154,8 @@ def add_pages_to_demo(app): app.add_api_route("/sd_extra_networks/get-single-card", get_single_card, methods=["GET"]) -def quote_js(s): - s = s.replace("\\", "\\\\") - s = s.replace('"', '\\"') - return f'"{s}"' +def quote_js(s: str): + return json.dumps(s, ensure_ascii=False) class ExtraNetworksPage: @@ -399,7 +397,7 @@ class ExtraNetworksPage: Returns: HTML string generated for this tree view """ - res = "" + res: list[str] = [] # Setup the tree dictionary roots = self.allowed_directories_for_previews() @@ -407,7 +405,7 @@ class ExtraNetworksPage: tree = get_tree([os.path.abspath(x) for x in roots], items=tree_items) if not tree: - return res + return "" def _build_tree(data: Optional[dict[str, ExtraNetworksItem]] = None) -> Optional[str]: """ @@ -442,9 +440,9 @@ class ExtraNetworksPage: item_html = self.create_tree_dir_item_html(tabname, k, _build_tree(v)) # Only add non-empty entries to the tree if item_html is not None: - res += item_html + res.append(item_html) - return f"" + return f"" def create_card_view_html(self, tabname: str, *, none_message) -> str: """ @@ -460,15 +458,52 @@ class ExtraNetworksPage: Returns: HTML formatted string """ - res = "" + res: list[str] = [] for item in self.items.values(): - res += self.create_item_html(tabname, item, self.card_tpl) + res.append(self.create_item_html(tabname, item, self.card_tpl)) - if res == "": + if not res: dirs = "".join([f"
  • {x}
  • " for x in self.allowed_directories_for_previews()]) - res = none_message or shared.html("extra-networks-no-cards.html").format(dirs=dirs) + res = [none_message or shared.html("extra-networks-no-cards.html").format(dirs=dirs)] - return res + return "".join(res) + + def create_dir_buttons_html(self, tabname: str) -> str: + """Generates HTML for the folder buttons""" + + subdirs: list[str] = [] + roots = [os.path.abspath(x) for x in self.allowed_directories_for_previews()] + + for parentdir in roots: + for root, dirs, _ in os.walk(parentdir, followlinks=True): + for dirname in dirs: + x = os.path.join(root, dirname) + + if not os.path.isdir(x): + continue + if len(os.listdir(x)) == 0: + continue + + subdir = os.path.abspath(x)[len(parentdir) :] + while subdir.startswith(os.path.sep): + subdir = subdir[1:] + + if subdir.startswith(".") and (not shared.opts.extra_networks_hidden_models == "Always"): + continue + + subdirs.append(subdir) + + if len(subdirs) > 0: + subdirs = ("all", *sorted(subdirs, key=shared.natural_sort_key)) + + return "".join( + f""" + + """ + for subdir in subdirs + ) def create_html(self, tabname, *, empty=False): """ @@ -522,6 +557,7 @@ class ExtraNetworksPage: "tree_view_btn_extra_class": tree_view_btn_extra_class, "tree_view_div_extra_class": tree_view_div_extra_class, "tree_html": self.create_tree_view_html(tabname) if shared.opts.extra_networks_tree_view_enable else "", + "dir_btns_html": self.create_dir_buttons_html(tabname) if shared.opts.extra_networks_dir_btn_enable else "", "items_html": self.create_card_view_html(tabname, none_message="Loading..." if empty else None), } ) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index ecd6bdec355a23684182f23d099de71a227e7eb9..57a6daed9b65088fba3a71f6de2cb30cadc2c3e2 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -13,29 +13,21 @@ from modules import shared Savedfile = namedtuple("Savedfile", ["name"]) -def register_tmp_file(gradio, filename): - if hasattr(gradio, 'temp_file_sets'): # gradio 3.15 - gradio.temp_file_sets[0] = gradio.temp_file_sets[0] | {os.path.abspath(filename)} +def register_tmp_file(gradio: gradio.Blocks, filename: str): + assert hasattr(gradio, "temp_file_sets") + gradio.temp_file_sets[0] = gradio.temp_file_sets[0] | {os.path.abspath(filename)} - if hasattr(gradio, 'temp_dirs'): # gradio 3.9 - gradio.temp_dirs = gradio.temp_dirs | {os.path.abspath(os.path.dirname(filename))} - -def check_tmp_file(gradio, filename): - if hasattr(gradio, 'temp_file_sets'): - return any(filename in fileset for fileset in gradio.temp_file_sets) - - if hasattr(gradio, 'temp_dirs'): - return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs) - - return False +def check_tmp_file(gradio: gradio.Blocks, filename: str): + assert hasattr(gradio, "temp_file_sets") + return any(filename in fileset for fileset in gradio.temp_file_sets) def save_pil_to_file(self, pil_image, dir=None, format="png"): - already_saved_as = getattr(pil_image, 'already_saved_as', None) + already_saved_as = getattr(pil_image, "already_saved_as", None) if already_saved_as and os.path.isfile(already_saved_as): register_tmp_file(shared.demo, already_saved_as) - filename_with_mtime = f'{already_saved_as}?{os.path.getmtime(already_saved_as)}' + filename_with_mtime = f"{already_saved_as}?{os.path.getmtime(already_saved_as)}" register_tmp_file(shared.demo, filename_with_mtime) return filename_with_mtime @@ -85,10 +77,8 @@ def cleanup_tmpdr(): os.remove(filename) -def is_gradio_temp_path(path): - """ - Check if the path is a temp dir used by gradio - """ +def is_gradio_temp_path(path: str): + """Check if the path is a temp dir used by gradio""" path = Path(path) if shared.opts.temp_dir and path.is_relative_to(shared.opts.temp_dir): return True diff --git a/modules/uni_pc/sampler.py b/modules/uni_pc/sampler.py index 0a9defa108b8e61adb8665b031c9770158654714..a52eaee78b69c3c809bed911040e7127b539834d 100644 --- a/modules/uni_pc/sampler.py +++ b/modules/uni_pc/sampler.py @@ -1,5 +1,3 @@ -"""SAMPLING ONLY.""" - import torch from .uni_pc import NoiseScheduleVP, model_wrapper, UniPC @@ -13,7 +11,7 @@ class UniPCSampler(object): to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.before_sample = None self.after_sample = None - self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) + self.register_buffer("alphas_cumprod", to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: @@ -27,30 +25,31 @@ class UniPCSampler(object): self.after_update = after_update @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): + def sample( + self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0.0, + mask=None, + x0=None, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs, + ): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] @@ -80,7 +79,7 @@ class UniPCSampler(object): else: img = x_T - ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) + ns = NoiseScheduleVP("discrete", alphas_cumprod=self.alphas_cumprod) # SD 1.X is "noise", SD 2.X is "v" model_type = "v" if self.model.parameterization == "v" else "noise" @@ -90,8 +89,8 @@ class UniPCSampler(object): ns, model_type=model_type, guidance_type="classifier-free", - #condition=conditioning, - #unconditional_condition=unconditional_conditioning, + # condition=conditioning, + # unconditional_condition=unconditional_conditioning, guidance_scale=unconditional_guidance_scale, ) diff --git a/modules/uni_pc/uni_pc.py b/modules/uni_pc/uni_pc.py index 4a365151344ecf80523c08df0795d69219088175..2cec0da818560ae03c216327d8cdf631a19de4da 100644 --- a/modules/uni_pc/uni_pc.py +++ b/modules/uni_pc/uni_pc.py @@ -5,14 +5,14 @@ import tqdm class NoiseScheduleVP: def __init__( - self, - schedule='discrete', - betas=None, - alphas_cumprod=None, - continuous_beta_0=0.1, - continuous_beta_1=20., - ): - """Create a wrapper class for the forward SDE (VP type). + self, + schedule="discrete", + betas=None, + alphas_cumprod=None, + continuous_beta_0=0.1, + continuous_beta_1=20.0, + ): + r"""Create a wrapper class for the forward SDE (VP type). *** Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. @@ -92,47 +92,52 @@ class NoiseScheduleVP: """ - if schedule not in ['discrete', 'linear', 'cosine']: + if schedule not in ["discrete", "linear", "cosine"]: raise ValueError(f"Unsupported noise schedule {schedule}. The schedule needs to be 'discrete' or 'linear' or 'cosine'") self.schedule = schedule - if schedule == 'discrete': + if schedule == "discrete": if betas is not None: log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) else: assert alphas_cumprod is not None log_alphas = 0.5 * torch.log(alphas_cumprod) self.total_N = len(log_alphas) - self.T = 1. - self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) - self.log_alpha_array = log_alphas.reshape((1, -1,)) + self.T = 1.0 + self.t_array = torch.linspace(0.0, 1.0, self.total_N + 1)[1:].reshape((1, -1)) + self.log_alpha_array = log_alphas.reshape( + ( + 1, + -1, + ) + ) else: self.total_N = 1000 self.beta_0 = continuous_beta_0 self.beta_1 = continuous_beta_1 self.cosine_s = 0.008 - self.cosine_beta_max = 999. - self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s - self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) + self.cosine_beta_max = 999.0 + self.cosine_t_max = math.atan(self.cosine_beta_max * (1.0 + self.cosine_s) / math.pi) * 2.0 * (1.0 + self.cosine_s) / math.pi - self.cosine_s + self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1.0 + self.cosine_s) * math.pi / 2.0)) self.schedule = schedule - if schedule == 'cosine': + if schedule == "cosine": # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. self.T = 0.9946 else: - self.T = 1. + self.T = 1.0 def marginal_log_mean_coeff(self, t): """ Compute log(alpha_t) of a given continuous-time label t in [0, T]. """ - if self.schedule == 'discrete': + if self.schedule == "discrete": return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1)) - elif self.schedule == 'linear': - return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 - elif self.schedule == 'cosine': - log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) - log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 + elif self.schedule == "linear": + return -0.25 * t**2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 + elif self.schedule == "cosine": + log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1.0 + self.cosine_s) * math.pi / 2.0)) + log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 return log_alpha_t def marginal_alpha(self, t): @@ -145,31 +150,31 @@ class NoiseScheduleVP: """ Compute sigma_t of a given continuous-time label t in [0, T]. """ - return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + return torch.sqrt(1.0 - torch.exp(2.0 * self.marginal_log_mean_coeff(t))) def marginal_lambda(self, t): """ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. """ log_mean_coeff = self.marginal_log_mean_coeff(t) - log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + log_std = 0.5 * torch.log(1.0 - torch.exp(2.0 * log_mean_coeff)) return log_mean_coeff - log_std def inverse_lambda(self, lamb): """ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. """ - if self.schedule == 'linear': - tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + if self.schedule == "linear": + tmp = 2.0 * (self.beta_1 - self.beta_0) * torch.logaddexp(-2.0 * lamb, torch.zeros((1,)).to(lamb)) Delta = self.beta_0**2 + tmp return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) - elif self.schedule == 'discrete': - log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) + elif self.schedule == "discrete": + log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2.0 * lamb) t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1])) return t.reshape((-1,)) else: - log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) - t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s + log_alpha = -0.5 * torch.logaddexp(-2.0 * lamb, torch.zeros((1,)).to(lamb)) + t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2.0 * (1.0 + self.cosine_s) / math.pi - self.cosine_s t = t_fn(log_alpha) return t @@ -180,9 +185,9 @@ def model_wrapper( model_type="noise", model_kwargs=None, guidance_type="uncond", - #condition=None, - #unconditional_condition=None, - guidance_scale=1., + # condition=None, + # unconditional_condition=None, + guidance_scale=1.0, classifier_fn=None, classifier_kwargs=None, ): @@ -284,8 +289,8 @@ def model_wrapper( For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. For continuous-time DPMs, we just use `t_continuous`. """ - if noise_schedule.schedule == 'discrete': - return (t_continuous - 1. / noise_schedule.total_N) * 1000. + if noise_schedule.schedule == "discrete": + return (t_continuous - 1.0 / noise_schedule.total_N) * 1000.0 else: return t_continuous @@ -323,7 +328,7 @@ def model_wrapper( def model_fn(x, t_continuous, condition, unconditional_condition): """ - The noise predicition model function that is used for DPM-Solver. + The noise prediction model function that is used for DPM-Solver. """ if t_continuous.reshape((-1,)).shape[0] == 1: t_continuous = t_continuous.expand((x.shape[0])) @@ -337,7 +342,7 @@ def model_wrapper( noise = noise_pred_fn(x, t_continuous) return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad elif guidance_type == "classifier-free": - if guidance_scale == 1. or unconditional_condition is None: + if guidance_scale == 1.0 or unconditional_condition is None: return noise_pred_fn(x, t_continuous, cond=condition) else: x_in = torch.cat([x] * 2) @@ -347,13 +352,9 @@ def model_wrapper( c_in = {} for k in condition: if isinstance(condition[k], list): - c_in[k] = [torch.cat([ - unconditional_condition[k][i], - condition[k][i]]) for i in range(len(condition[k]))] + c_in[k] = [torch.cat([unconditional_condition[k][i], condition[k][i]]) for i in range(len(condition[k]))] else: - c_in[k] = torch.cat([ - unconditional_condition[k], - condition[k]]) + c_in[k] = torch.cat([unconditional_condition[k], condition[k]]) elif isinstance(condition, list): c_in = [] assert isinstance(unconditional_condition, list) @@ -370,20 +371,7 @@ def model_wrapper( class UniPC: - def __init__( - self, - model_fn, - noise_schedule, - predict_x0=True, - thresholding=False, - max_val=1., - variant='bh1', - condition=None, - unconditional_condition=None, - before_sample=None, - after_sample=None, - after_update=None - ): + def __init__(self, model_fn, noise_schedule, predict_x0=True, thresholding=False, max_val=1.0, variant="bh1", condition=None, unconditional_condition=None, before_sample=None, after_sample=None, after_update=None): """Construct a UniPC. We support both data_prediction and noise_prediction. @@ -441,7 +429,7 @@ class UniPC: alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) if self.thresholding: - p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. + p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) x0 = torch.clamp(x0, -s, s) / s @@ -457,18 +445,17 @@ class UniPC: return self.noise_prediction_fn(x, t) def get_time_steps(self, skip_type, t_T, t_0, N, device): - """Compute the intermediate time steps for sampling. - """ - if skip_type == 'logSNR': + """Compute the intermediate time steps for sampling.""" + if skip_type == "logSNR": lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) return self.noise_schedule.inverse_lambda(logSNR_steps) - elif skip_type == 'time_uniform': + elif skip_type == "time_uniform": return torch.linspace(t_T, t_0, N + 1).to(device) - elif skip_type == 'time_quadratic': + elif skip_type == "time_quadratic": t_order = 2 - t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device) + t = torch.linspace(t_T ** (1.0 / t_order), t_0 ** (1.0 / t_order), N + 1).pow(t_order).to(device) return t else: raise ValueError(f"Unsupported skip_type {skip_type}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'") @@ -480,28 +467,58 @@ class UniPC: if order == 3: K = steps // 3 + 1 if steps % 3 == 0: - orders = [3,] * (K - 2) + [2, 1] + orders = [ + 3, + ] * ( + K - 2 + ) + [2, 1] elif steps % 3 == 1: - orders = [3,] * (K - 1) + [1] + orders = [ + 3, + ] * ( + K - 1 + ) + [1] else: - orders = [3,] * (K - 1) + [2] + orders = [ + 3, + ] * ( + K - 1 + ) + [2] elif order == 2: if steps % 2 == 0: K = steps // 2 - orders = [2,] * K + orders = [ + 2, + ] * K else: K = steps // 2 + 1 - orders = [2,] * (K - 1) + [1] + orders = [ + 2, + ] * ( + K - 1 + ) + [1] elif order == 1: K = steps - orders = [1,] * steps + orders = [ + 1, + ] * steps else: raise ValueError("'order' must be '1' or '2' or '3'.") - if skip_type == 'logSNR': + if skip_type == "logSNR": # To reproduce the results in DPM-Solver paper timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) else: - timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders), 0).to(device)] + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ + torch.cumsum( + torch.tensor( + [ + 0, + ] + + orders + ), + 0, + ).to(device) + ] return timesteps_outer, orders def denoise_to_zero_fn(self, x, s): @@ -513,14 +530,14 @@ class UniPC: def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, order, **kwargs): if len(t.shape) == 0: t = t.view(-1) - if 'bh' in self.variant: + if "bh" in self.variant: return self.multistep_uni_pc_bh_update(x, model_prev_list, t_prev_list, t, order, **kwargs) else: - assert self.variant == 'vary_coeff' + assert self.variant == "vary_coeff" return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs) def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True): - #print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)') + # print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)') ns = self.noise_schedule assert order <= len(model_prev_list) @@ -545,7 +562,7 @@ class UniPC: rks.append(rk) D1s.append((model_prev_i - model_prev_0) / rk) - rks.append(1.) + rks.append(1.0) rks = torch.tensor(rks, device=x.device) K = len(rks) @@ -559,12 +576,12 @@ class UniPC: C = torch.stack(C, dim=1) if len(D1s) > 0: - D1s = torch.stack(D1s, dim=1) # (B, K) + D1s = torch.stack(D1s, dim=1) # (B, K) C_inv_p = torch.linalg.inv(C[:-1, :-1]) A_p = C_inv_p if use_corrector: - #print('using corrector') + # print('using corrector') C_inv = torch.linalg.inv(C) A_c = C_inv @@ -576,54 +593,48 @@ class UniPC: for k in range(1, K + 2): h_phi_ks.append(h_phi_k) h_phi_k = h_phi_k / hh - 1 / factorial_k - factorial_k *= (k + 1) + factorial_k *= k + 1 model_t = None if self.predict_x0: - x_t_ = ( - sigma_t / sigma_prev_0 * x - - alpha_t * h_phi_1 * model_prev_0 - ) + x_t_ = sigma_t / sigma_prev_0 * x - alpha_t * h_phi_1 * model_prev_0 # now predictor x_t = x_t_ if len(D1s) > 0: # compute the residuals for predictor for k in range(K - 1): - x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k]) + x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum("bkchw,k->bchw", D1s, A_p[k]) # now corrector if use_corrector: model_t = self.model_fn(x_t, t) - D1_t = (model_t - model_prev_0) + D1_t = model_t - model_prev_0 x_t = x_t_ k = 0 for k in range(K - 1): - x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1]) + x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum("bkchw,k->bchw", D1s, A_c[k][:-1]) x_t = x_t - alpha_t * h_phi_ks[K] * (D1_t * A_c[k][-1]) else: log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) - x_t_ = ( - (torch.exp(log_alpha_t - log_alpha_prev_0)) * x - - (sigma_t * h_phi_1) * model_prev_0 - ) + x_t_ = (torch.exp(log_alpha_t - log_alpha_prev_0)) * x - (sigma_t * h_phi_1) * model_prev_0 # now predictor x_t = x_t_ if len(D1s) > 0: # compute the residuals for predictor for k in range(K - 1): - x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k]) + x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum("bkchw,k->bchw", D1s, A_p[k]) # now corrector if use_corrector: model_t = self.model_fn(x_t, t) - D1_t = (model_t - model_prev_0) + D1_t = model_t - model_prev_0 x_t = x_t_ k = 0 for k in range(K - 1): - x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1]) + x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum("bkchw,k->bchw", D1s, A_c[k][:-1]) x_t = x_t - sigma_t * h_phi_ks[K] * (D1_t * A_c[k][-1]) return x_t, model_t def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order, x_t=None, use_corrector=True): - #print(f'using unified predictor-corrector with order {order} (solver type: B(h))') + # print(f'using unified predictor-corrector with order {order} (solver type: B(h))') ns = self.noise_schedule assert order <= len(model_prev_list) dims = x.dim() @@ -649,21 +660,21 @@ class UniPC: rks.append(rk) D1s.append((model_prev_i - model_prev_0) / rk) - rks.append(1.) + rks.append(1.0) rks = torch.tensor(rks, device=x.device) R = [] b = [] hh = -h[0] if self.predict_x0 else h[0] - h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 + h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 h_phi_k = h_phi_1 / hh - 1 factorial_i = 1 - if self.variant == 'bh1': + if self.variant == "bh1": B_h = hh - elif self.variant == 'bh2': + elif self.variant == "bh2": B_h = torch.expm1(hh) else: raise NotImplementedError() @@ -671,7 +682,7 @@ class UniPC: for i in range(1, order + 1): R.append(torch.pow(rks, i - 1)) b.append(h_phi_k * factorial_i / B_h) - factorial_i *= (i + 1) + factorial_i *= i + 1 h_phi_k = h_phi_k / hh - 1 / factorial_i R = torch.stack(R) @@ -680,7 +691,7 @@ class UniPC: # now predictor use_predictor = len(D1s) > 0 and x_t is None if len(D1s) > 0: - D1s = torch.stack(D1s, dim=1) # (B, K) + D1s = torch.stack(D1s, dim=1) # (B, K) if x_t is None: # for order 2, we use a simplified version if order == 2: @@ -691,7 +702,7 @@ class UniPC: D1s = None if use_corrector: - #print('using corrector') + # print('using corrector') # for order 1, we use a simplified version if order == 1: rhos_c = torch.tensor([0.5], device=b.device) @@ -700,14 +711,11 @@ class UniPC: model_t = None if self.predict_x0: - x_t_ = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * h_phi_1, dims)* model_prev_0 - ) + x_t_ = expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * h_phi_1, dims) * model_prev_0 if x_t is None: if use_predictor: - pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s) + pred_res = torch.einsum("k,bkchw->bchw", rhos_p, D1s) else: pred_res = 0 x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * pred_res @@ -715,19 +723,16 @@ class UniPC: if use_corrector: model_t = self.model_fn(x_t, t) if D1s is not None: - corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s) + corr_res = torch.einsum("k,bkchw->bchw", rhos_c[:-1], D1s) else: corr_res = 0 - D1_t = (model_t - model_prev_0) + D1_t = model_t - model_prev_0 x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t) else: - x_t_ = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * h_phi_1, dims) * model_prev_0 - ) + x_t_ = expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * h_phi_1, dims) * model_prev_0 if x_t is None: if use_predictor: - pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s) + pred_res = torch.einsum("k,bkchw->bchw", rhos_p, D1s) else: pred_res = 0 x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * pred_res @@ -735,25 +740,36 @@ class UniPC: if use_corrector: model_t = self.model_fn(x_t, t) if D1s is not None: - corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s) + corr_res = torch.einsum("k,bkchw->bchw", rhos_c[:-1], D1s) else: corr_res = 0 - D1_t = (model_t - model_prev_0) + D1_t = model_t - model_prev_0 x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t) return x_t, model_t - - def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', - method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', - atol=0.0078, rtol=0.05, corrector=False, + def sample( + self, + x, + steps=20, + t_start=None, + t_end=None, + order=3, + skip_type="time_uniform", + method="singlestep", + lower_order_final=True, + denoise_to_zero=False, + solver_type="dpm_solver", + atol=0.0078, + rtol=0.05, + corrector=False, ): - t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + t_0 = 1.0 / self.noise_schedule.total_N if t_end is None else t_end t_T = self.noise_schedule.T if t_start is None else t_start device = x.device - if method == 'multistep': + if method == "multistep": assert steps >= order, "UniPC order must be < sampling steps" timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) - #print(f"Running UniPC Sampling with {timesteps.shape[0]} timesteps, order {order}") + # print(f"Running UniPC Sampling with {timesteps.shape[0]} timesteps, order {order}") assert timesteps.shape[0] - 1 == steps with torch.no_grad(): vec_t = timesteps[0].expand((x.shape[0])) @@ -778,13 +794,13 @@ class UniPC: step_order = min(order, steps + 1 - step) else: step_order = order - #print('this step order:', step_order) + # print('this step order:', step_order) if step == steps: - #print('do not run corrector at the last step') + # print('do not run corrector at the last step') use_corrector = False else: use_corrector = True - x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, step_order, use_corrector=use_corrector) + x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, step_order, use_corrector=use_corrector) if self.after_update is not None: self.after_update(x, model_x) for i in range(order - 1): @@ -808,6 +824,7 @@ class UniPC: # other utility functions ############################################################# + def interpolate_fn(x, xp, yp): """ A piecewise linear function y = f(x), using xp and yp as keypoints. @@ -830,7 +847,9 @@ def interpolate_fn(x, xp, yp): torch.eq(x_idx, 0), torch.tensor(1, device=x.device), torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + torch.eq(x_idx, K), + torch.tensor(K - 2, device=x.device), + cand_start_idx, ), ) end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) @@ -840,7 +859,9 @@ def interpolate_fn(x, xp, yp): torch.eq(x_idx, 0), torch.tensor(0, device=x.device), torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + torch.eq(x_idx, K), + torch.tensor(K - 2, device=x.device), + cand_start_idx, ), ) y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) @@ -860,4 +881,4 @@ def expand_dims(v, dims): Returns: a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. """ - return v[(...,) + (None,)*(dims - 1)] + return v[(...,) + (None,) * (dims - 1)] diff --git a/modules/upscaler.py b/modules/upscaler.py index eec2eb6c480d9ed4c2aecf9bcb1c622ded8ce02c..f4225a0655d5663d1bb932319aaf10858224e18c 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -3,7 +3,7 @@ from abc import abstractmethod from PIL import Image -from modules import devices, modelloader +from modules import devices, modelloader, shared from modules.images import LANCZOS, NEAREST from modules.shared import cmd_opts, models_path, opts @@ -52,6 +52,8 @@ class Upscaler: dest_h: int = (img.height * scale) // 8 * 8 for _ in range(UPSCALE_ITERATIONS): + if shared.state.interrupted: + break img = self.do_upscale(img, selected_model) if ((img.width >= dest_w) and (img.height >= dest_h)) or (int(scale) == 1): break diff --git a/modules/upscaler_utils.py b/modules/upscaler_utils.py index 06f35bc89465e10234f2f85021de0a1e2299480b..0b97fd151d2ba27992bd5952ae7c0d7c43d07186 100644 --- a/modules/upscaler_utils.py +++ b/modules/upscaler_utils.py @@ -1,4 +1,5 @@ import logging +from functools import wraps from typing import Callable import numpy as np @@ -11,43 +12,214 @@ from modules import devices, images, shared, torch_utils logger = logging.getLogger(__name__) +def try_patch_spandrel(): + try: + from spandrel.architectures.__arch_helpers.block import RRDB, ResidualDenseBlock_5C + + _orig_init: Callable = ResidualDenseBlock_5C.__init__ + _orig_5c_forward: Callable = ResidualDenseBlock_5C.forward + _orig_forward: Callable = RRDB.forward + + @wraps(_orig_init) + def RDB5C_init(self, *args, **kwargs): + _orig_init(self, *args, **kwargs) + self.nf, self.gc = kwargs.get("nf", 64), kwargs.get("gc", 32) + + @wraps(_orig_5c_forward) + def RDB5C_forward(self, x: torch.Tensor): + B, _, H, W = x.shape + nf, gc = self.nf, self.gc + + buf = torch.empty((B, nf + 4 * gc, H, W), dtype=x.dtype, device=x.device) + buf[:, :nf].copy_(x) + + x1 = self.conv1(x) + buf[:, nf : nf + gc].copy_(x1) + + x2 = self.conv2(buf[:, : nf + gc]) + if self.conv1x1: + x2.add_(self.conv1x1(x)) + buf[:, nf + gc : nf + 2 * gc].copy_(x2) + + x3 = self.conv3(buf[:, : nf + 2 * gc]) + buf[:, nf + 2 * gc : nf + 3 * gc].copy_(x3) + + x4 = self.conv4(buf[:, : nf + 3 * gc]) + if self.conv1x1: + x4.add_(x2) + buf[:, nf + 3 * gc : nf + 4 * gc].copy_(x4) + + x5 = self.conv5(buf) + return x5.mul_(0.2).add_(x) + + @wraps(_orig_forward) + def RRDB_forward(self, x): + return self.RDB3(self.RDB2(self.RDB1(x))).mul_(0.2).add_(x) + + ResidualDenseBlock_5C.__init__ = RDB5C_init + ResidualDenseBlock_5C.forward = RDB5C_forward + RRDB.forward = RRDB_forward + + logger.info("Successfully patched Spandrel blocks") + except Exception as e: + logger.info(f"Failed to patch Spandrel blocks\n{type(e).__name__}: {e}") + + +try_patch_spandrel() + + +def _model(model: Callable, x: torch.Tensor) -> torch.Tensor: + if x.dtype == torch.float32 or model.architecture.name not in ("ATD", "DAT"): + return model(x) + + # Spandrel does not correctly handle non-FP32 for ATD and DAT models + try: + # Force the upscaler to use the dtype it should for new tensors + torch.set_default_dtype(x.dtype) + # Using torch.device incurs a small amount of overhead, but makes sure we don't + # get errors when unsupported dtype tensors would be made on the CPU. + with torch.device(x.device): + return model(x) + finally: + torch.set_default_dtype(torch.float32) + + +def pil_rgb_to_tensor_bgr(img: Image.Image, param: torch.Tensor) -> torch.Tensor: + tensor = torch.from_numpy(np.asarray(img)).to(param.device) + tensor = tensor.to(param.dtype).mul_(1.0 / 255.0).permute(2, 0, 1) + return tensor[[2, 1, 0], ...].unsqueeze(0).contiguous() + + +def tensor_bgr_to_pil_rgb(tensor: torch.Tensor) -> Image.Image: + tensor = tensor[:, [2, 1, 0], ...] + tensor = tensor.squeeze(0).permute(1, 2, 0).mul_(255.0).round_().clamp_(0.0, 255.0) + return Image.fromarray(tensor.to(torch.uint8).cpu().numpy()) + + def pil_image_to_torch_bgr(img: Image.Image) -> torch.Tensor: img = np.array(img.convert("RGB")) - img = img[:, :, ::-1] # flip RGB to BGR - img = np.transpose(img, (2, 0, 1)) # HWC to CHW - img = np.ascontiguousarray(img) / 255 # Rescale to [0, 1] + img = img[:, :, ::-1] + img = np.transpose(img, (2, 0, 1)) + img = np.ascontiguousarray(img) / 255 return torch.from_numpy(img) def torch_bgr_to_pil_image(tensor: torch.Tensor) -> Image.Image: if tensor.ndim == 4: - # If we're given a tensor with a batch dimension, squeeze it out - # (but only if it's a batch of size 1). if tensor.shape[0] != 1: raise ValueError(f"{tensor.shape} does not describe a BCHW tensor") tensor = tensor.squeeze(0) assert tensor.ndim == 3, f"{tensor.shape} does not describe a CHW tensor" arr = tensor.detach().float().cpu().numpy() - arr = 255.0 * np.moveaxis(arr, 0, 2) # CHW to HWC, rescale - arr = np.clip(arr, 0, 255).astype(np.uint8) # clamp - arr = arr[:, :, ::-1] # flip BGR to RGB + arr = 255.0 * np.moveaxis(arr, 0, 2) + arr = np.clip(arr, 0, 255).astype(np.uint8) + arr = arr[:, :, ::-1] return Image.fromarray(arr, "RGB") +@torch.inference_mode() +def upscale_tensor_tiles(model: Callable, tensor: torch.Tensor, tile_size: int, overlap: int, desc: str) -> torch.Tensor: + _, _, H_in, W_in = tensor.shape + stride = tile_size - overlap + n_tiles_x, n_tiles_y = (W_in + stride - 1) // stride, (H_in + stride - 1) // stride + total_tiles = n_tiles_x * n_tiles_y + + if tile_size <= 0 or total_tiles <= 4: + return _model(model, tensor) + + device = tensor.device + dtype = tensor.dtype # Accumulate in native model dtype + + accum = None + model_scale = None + H_out = W_out = None + + last_mask = None + last_mask_key = None + + def get_weight_mask(h, w, y, x): + """Generate feathered mask for tile overlap""" + top, bottom, left, right = y > 0, y + h < H_out, x > 0, x + w < W_out + key = (h, w, top, bottom, left, right) + + if key == last_mask_key: + return key, last_mask + elif overlap == 0: + mask = torch.ones((1, 1, h, w), device=device, dtype=dtype) + else: + ov_h, ov_w = min(overlap, h), min(overlap, w) + + ramp_x, ramp_y = torch.ones(w, device=device, dtype=dtype), torch.ones(h, device=device, dtype=dtype) + fade_x, fade_y = torch.linspace(0, 1, ov_w, device=device, dtype=dtype), torch.linspace(0, 1, ov_h, device=device, dtype=dtype) + + ramp_x[:ov_w].lerp_(fade_x, float(left)) + ramp_x[-ov_w:].lerp_(fade_x.flip(0), float(right)) + ramp_y[:ov_h].lerp_(fade_y, float(top)) + ramp_y[-ov_h:].lerp_(fade_y.flip(0), float(bottom)) + + mask = (ramp_y[:, None] * ramp_x[None, :]).expand(1, 1, h, w) + return key, mask + + with tqdm.tqdm(desc=desc, total=total_tiles) as pbar: + for tile_idx in range(total_tiles): + if shared.state.interrupted: + return None + + # Loop in row-major or column-major, depending on aspect ratio to maximise hit-rate on cached mask + x_idx, y_idx = (tile_idx % n_tiles_x, tile_idx // n_tiles_x) if W_in >= H_in else (tile_idx // n_tiles_y, tile_idx % n_tiles_y) + x, y = x_idx * stride, y_idx * stride + + tile = tensor[:, :, y : y + tile_size, x : x + tile_size] + out = _model(model, tile) + + if model_scale is None: + model_scale = out.shape[-2] / tile.shape[-2] + H_out, W_out = int(H_in * model_scale), int(W_in * model_scale) + accum = torch.zeros((1, 4, H_out, W_out), dtype=dtype, device=device) + + h_out, w_out = out.shape[-2:] + y_out, x_out = int(y * model_scale), int(x * model_scale) + ys, ye = y_out, y_out + h_out + xs, xe = x_out, x_out + w_out + + last_mask_key, last_mask = get_weight_mask(h_out, w_out, y_out, x_out) + accum_slice = accum[:, :, ys:ye, xs:xe] + accum_slice[:, :3].addcmul_(out, last_mask) + accum_slice[:, 3:].add_(last_mask) + + del tile, out + pbar.update(1) + + del last_mask + return accum[:, :3].div_(accum[:, 3:].clamp_min_(1e-6)) + + +def upscale_with_model_gpu( + model: Callable[[torch.Tensor], torch.Tensor], + img: Image.Image, + *, + tile_size: int, + tile_overlap: int = 0, + desc="tiled upscale", +) -> Image.Image: + + tensor = pil_rgb_to_tensor_bgr(img, torch_utils.get_param(model)) + out = upscale_tensor_tiles(model, tensor, tile_size, tile_overlap, desc) + return img if out is None else tensor_bgr_to_pil_rgb(out) + + def upscale_pil_patch(model, img: Image.Image) -> Image.Image: - """ - Upscale a given PIL image using the given model. - """ + """Upscale a given PIL image using the given model""" param = torch_utils.get_param(model) with torch.inference_mode(): - tensor = pil_image_to_torch_bgr(img).unsqueeze(0) # add batch dimension + tensor = pil_image_to_torch_bgr(img).unsqueeze(0) tensor = tensor.to(device=param.device, dtype=param.dtype) with devices.without_autocast(): - return torch_bgr_to_pil_image(model(tensor)) + return torch_bgr_to_pil_image(_model(model, tensor)) -def upscale_with_model( +def upscale_with_model_cpu( model: Callable[[torch.Tensor], torch.Tensor], img: Image.Image, *, @@ -72,6 +244,8 @@ def upscale_with_model( for y, h, row in grid.tiles: newrow = [] for x, w, tile in row: + if shared.state.interrupted: + break logger.debug("Tile (%d, %d) %s...", x, y, tile) output = upscale_pil_patch(model, tile) scale_factor = output.width // tile.width @@ -91,109 +265,15 @@ def upscale_with_model( return images.combine_grid(newgrid) -def tiled_upscale_2( - img: torch.Tensor, - model, - *, - tile_size: int, - tile_overlap: int, - scale: int, - device: torch.device, - desc="Tiled upscale", -): - """ - Alternative implementation of `upscale_with_model` originally used by - SwinIR and ScuNET. It differs from `upscale_with_model` in that tiling and - weighting is done in PyTorch space, as opposed to `images.Grid` doing it in - Pillow space without weighting. - """ - - b, c, h, w = img.size() - tile_size = min(tile_size, h, w) - - if tile_size <= 0: - logger.debug("Upscaling %s without tiling", img.shape) - return model(img) - - stride = tile_size - tile_overlap - h_idx_list = list(range(0, h - tile_size, stride)) + [h - tile_size] - w_idx_list = list(range(0, w - tile_size, stride)) + [w - tile_size] - result = torch.zeros( - b, - c, - h * scale, - w * scale, - device=device, - dtype=img.dtype, - ) - weights = torch.zeros_like(result) - logger.debug("Upscaling %s to %s with tiles", img.shape, result.shape) - with tqdm.tqdm( - total=len(h_idx_list) * len(w_idx_list), - desc=desc, - disable=not shared.opts.enable_upscale_progressbar, - ) as pbar: - for h_idx in h_idx_list: - if shared.state.interrupted or shared.state.skipped: - break - - for w_idx in w_idx_list: - if shared.state.interrupted or shared.state.skipped: - break - - # Only move this patch to the device if it's not already there. - in_patch = img[ - ..., - h_idx : h_idx + tile_size, - w_idx : w_idx + tile_size, - ].to(device=device) - - out_patch = model(in_patch) - - result[ - ..., - h_idx * scale : (h_idx + tile_size) * scale, - w_idx * scale : (w_idx + tile_size) * scale, - ].add_(out_patch) - - out_patch_mask = torch.ones_like(out_patch) - - weights[ - ..., - h_idx * scale : (h_idx + tile_size) * scale, - w_idx * scale : (w_idx + tile_size) * scale, - ].add_(out_patch_mask) - - pbar.update(1) - - output = result.div_(weights) - - return output - - -def upscale_2( +def upscale_with_model( + model: Callable[[torch.Tensor], torch.Tensor], img: Image.Image, - model, *, tile_size: int, - tile_overlap: int, - scale: int, - desc: str, -): - """ - Convenience wrapper around `tiled_upscale_2` that handles PIL images. - """ - param = torch_utils.get_param(model) - tensor = pil_image_to_torch_bgr(img).to(dtype=param.dtype).unsqueeze(0) - - with torch.inference_mode(): - output = tiled_upscale_2( - tensor, - model, - tile_size=tile_size, - tile_overlap=tile_overlap, - scale=scale, - desc=desc, - device=param.device, - ) - return torch_bgr_to_pil_image(output) + tile_overlap: int = 0, + desc="tiled upscale", +) -> Image.Image: + if shared.opts.composite_tiles_on_gpu: + return upscale_with_model_gpu(model, img, tile_size=tile_size, tile_overlap=tile_overlap, desc=f"{desc} (GPU Composite)") + else: + return upscale_with_model_cpu(model, img, tile_size=tile_size, tile_overlap=tile_overlap, desc=f"{desc} (CPU Composite)") diff --git a/modules_forge/forge_alter_samplers.py b/modules_forge/forge_alter_samplers.py index c687e65ee9055c2a085194a582e9ab4e2223d424..dbe7a736d047fac80bd4cb1c86f4e86756089bf3 100644 --- a/modules_forge/forge_alter_samplers.py +++ b/modules_forge/forge_alter_samplers.py @@ -1,4 +1,5 @@ import logging +from typing import Callable from modules import sd_samplers_cfgpp, sd_samplers_common, sd_samplers_kdiffusion @@ -8,36 +9,46 @@ class AlterSampler(sd_samplers_kdiffusion.KDiffusionSampler): self.sampler_name = sampler_name self.scheduler_name = scheduler_name self.unet = sd_model.forge_objects.unet - sampler_function = getattr(sd_samplers_cfgpp, f"sample_{self.sampler_name}", None) + sampler_function: Callable = getattr(sd_samplers_cfgpp, f"sample_{self.sampler_name}", None) if sampler_function is None: raise ValueError(f"Unknown sampler: {sampler_name}") super().__init__(sampler_function, sd_model, None) def sample(self, p, *args, **kwargs): - # self.scheduler_name = p.scheduler if p.cfg_scale > 2.0: - logging.warning("Low CFG is recommended when using CFG++ samplers") + logging.warning("CFG between 1.0 ~ 2.0 is recommended when using CFG++ samplers") return super().sample(p, *args, **kwargs) def sample_img2img(self, p, *args, **kwargs): - # self.scheduler_name = p.scheduler if p.cfg_scale > 2.0: - logging.warning("Low CFG is recommended when using CFG++ samplers") + logging.warning("CFG between 1.0 ~ 2.0 is recommended when using CFG++ samplers") return super().sample_img2img(p, *args, **kwargs) -def build_constructor(sampler_name): +def build_constructor(sampler_key: str) -> Callable: def constructor(model): - return AlterSampler(model, sampler_name) + return AlterSampler(model, sampler_key) return constructor +def create_cfg_pp_sampler(sampler_name: str, sampler_key: str) -> "sd_samplers_common.SamplerData": + config = {} + base_name = sampler_name.removesuffix(" CFG++") + for name, _, _, params in sd_samplers_kdiffusion.samplers_k_diffusion: + if name == base_name: + config = params.copy() + break + + return sd_samplers_common.SamplerData(sampler_name, build_constructor(sampler_key=sampler_key), [sampler_key], config) + + samplers_data_alter = [ - sd_samplers_common.SamplerData("DPM++ 2M CFG++", build_constructor(sampler_name="dpmpp_2m_cfg_pp"), ["dpmpp_2m_cfg_pp"], {}), - sd_samplers_common.SamplerData("DPM++ SDE CFG++", build_constructor(sampler_name="dpmpp_sde_cfg_pp"), ["dpmpp_sde_cfg_pp"], {}), - sd_samplers_common.SamplerData("DPM++ 3M SDE CFG++", build_constructor(sampler_name="dpmpp_3m_sde_cfg_pp"), ["dpmpp_3m_sde_cfg_pp"], {}), - sd_samplers_common.SamplerData("Euler a CFG++", build_constructor(sampler_name="euler_ancestral_cfg_pp"), ["euler_ancestral_cfg_pp"], {}), - sd_samplers_common.SamplerData("Euler CFG++", build_constructor(sampler_name="euler_cfg_pp"), ["euler_cfg_pp"], {}), + create_cfg_pp_sampler("DPM++ 2M CFG++", "dpmpp_2m_cfg_pp"), + create_cfg_pp_sampler("DPM++ SDE CFG++", "dpmpp_sde_cfg_pp"), + create_cfg_pp_sampler("DPM++ 2M SDE CFG++", "dpmpp_2m_sde_cfg_pp"), + create_cfg_pp_sampler("DPM++ 3M SDE CFG++", "dpmpp_3m_sde_cfg_pp"), + create_cfg_pp_sampler("Euler a CFG++", "euler_ancestral_cfg_pp"), + create_cfg_pp_sampler("Euler CFG++", "euler_cfg_pp"), ] diff --git a/modules_forge/forge_loader.py b/modules_forge/forge_loader.py index 35e576a649dc7812e9b8e9232c3d708dc5a6bdac..2b1771e89cc4b2fa93fee67973da1df32d97c039 100644 --- a/modules_forge/forge_loader.py +++ b/modules_forge/forge_loader.py @@ -1,19 +1,20 @@ import contextlib +import torch +from omegaconf import OmegaConf + import ldm_patched.modules.clip_vision -import ldm_patched.modules.model_patcher import ldm_patched.modules.utils -import torch from ldm_patched.ldm.util import instantiate_from_config from ldm_patched.modules import model_detection, model_management from ldm_patched.modules.model_base import ModelType, model_sampling +from ldm_patched.modules.model_patcher import ModelPatcher from ldm_patched.modules.sd import CLIP, VAE, load_model_weights from modules import sd_hijack, shared from modules.sd_models_config import find_checkpoint_config from modules.sd_models_types import WebuiSdModel from modules_forge import forge_clip from modules_forge.unet_patcher import UnetPatcher -from omegaconf import OmegaConf class FakeObject: @@ -29,10 +30,10 @@ class FakeObject: class ForgeObjects: def __init__(self, unet, clip, vae, clipvision): - self.unet = unet - self.clip = clip - self.vae = vae - self.clipvision = clipvision + self.unet: UnetPatcher = unet + self.clip: CLIP = clip + self.vae: VAE = vae + self.clipvision: ModelPatcher = clipvision def shallow_copy(self): return ForgeObjects(self.unet, self.clip, self.vae, self.clipvision) diff --git a/modules_forge/unet_patcher.py b/modules_forge/unet_patcher.py index 0c46ccf61c1f5bfaf2cd0a4266da2e6035c67c8f..6b25b58bf19e2bd29a57bd10e16ba9d0ab99db9b 100644 --- a/modules_forge/unet_patcher.py +++ b/modules_forge/unet_patcher.py @@ -159,3 +159,20 @@ class UnetPatcher(ModelPatcher): for number in range(16): for transformer_index in range(16): self.set_model_patch_replace(patch, target, block_name, number, transformer_index) + + def load_frozen_patcher(self, state_dict, strength): + patch_dict = {} + for k, w in state_dict.items(): + model_key, patch_type, weight_index = k.split("::") + if model_key not in patch_dict: + patch_dict[model_key] = {} + if patch_type not in patch_dict[model_key]: + patch_dict[model_key][patch_type] = [None] * 16 + patch_dict[model_key][patch_type][int(weight_index)] = w + + patch_flat = {} + for model_key, v in patch_dict.items(): + for patch_type, weight_list in v.items(): + patch_flat[model_key] = (patch_type, weight_list) + + self.add_patches(patches=patch_flat, strength_patch=float(strength), strength_model=1.0) diff --git a/requirements.txt b/requirements.txt index 50a88661c06a24c3152c4d0965a6fdfaef807c41..41821a0783704078b9659c5dd9d292aaa47fd32c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,7 +22,7 @@ pillow-heif==0.22.0 protobuf==4.25.7 # controlnet psutil==5.9.8 pydantic==1.10.22 -pytorch_lightning==1.9.5 +lightning==2.5.1 resize-right==0.0.2 rich==13.9.4 safetensors==0.5.3 diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index c25aea3ccc874893ef8433f07425a13113c24da5..01a7d550f7f7eb6b5e6f1e01cb71d653a2129187 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -88,7 +88,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): ) with gr.Row(): extras_color_correction = gr.Checkbox( - value=True, + value=False, label="Color Correction", elem_id="extras_color_correction", ) @@ -125,10 +125,11 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): ) extras_upscaler_2_visibility.change( - fn=lambda vis: gr.update(interactive=(vis > 0.0)), + fn=lambda vis: gr.update(interactive=(vis > 0.04)), inputs=[extras_upscaler_2_visibility], outputs=[extras_upscaler_2], show_progress="hidden", + queue=False, ) return { @@ -218,7 +219,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): pp.shared.target_width = int(pp.image.width * upscale_by) pp.shared.target_height = int(pp.image.height * upscale_by) - if upscale_cc and "cc" not in upscale_cache: + if upscale_cc: upscale_cache["cc"] = setup_color_correction(pp.image) def process( @@ -265,9 +266,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): None, ) - assert ( - upscaler2 is not None - ), f'Could not find upscaler "{upscaler_2_name}"' + assert upscaler2 is not None, f'Could not find upscaler "{upscaler_2_name}"' second_upscale = self._upscale( pp.image, diff --git a/style.css b/style.css index 1d3d1428bd48caa0dff1d116bd7adf31a31c80ea..d8386364e24bd5ae15190513b40b87b9de8b41d0 100644 --- a/style.css +++ b/style.css @@ -914,16 +914,16 @@ footer { } .extra-network-pane .nocards { - margin: 1.25em 0.5em 0.5em 0.5em; + margin: 1em; + padding: 1em; } -.extra-network-pane .nocards h1 { - font-size: 1.5em; - margin-bottom: 1em; +.extra-network-pane .nocards h2 { + margin: 0em 0em 1em 0em; } .extra-network-pane .nocards li { - margin-left: 0.5em; + margin-left: 1em; } .extra-network-pane .card .button-row { @@ -1198,6 +1198,26 @@ body.resizing .resize-handle { border: 1px solid var(--block-border-color); } +.extra-network-pane .extra-network-dirs { + display: flex; + flex-direction: row; + background-color: var(--background-fill-secondary); + border-radius: 0.5em; + margin: 0.25em; + padding: 0.25em 1em; + gap: 0.5em; +} + +.extra-network-pane .extra-network-dirs:not(:has(*)) { + display: none; +} + +.extra-network-pane .extra-network-dirs button { + margin: var(--spacing-sm) 0px; + padding: 0.5em 1em; + border: 1px solid var(--block-border-color); +} + .extra-network-pane .extra-network-tree .tree-list { flex: 1; display: flex; diff --git a/webui.py b/webui.py index 530434769fa1e5aedca9738f93370803b4c63c24..4128733f926aa235c161fcef99d5807a9bbc063a 100644 --- a/webui.py +++ b/webui.py @@ -15,8 +15,10 @@ from modules_forge.initialization import initialize_forge startup_timer = timer.startup_timer startup_timer.record("launcher") +initialize.shush() initialize_forge() +startup_timer.record("forge init") initialize.imports()