John6666 commited on
Commit
b4b0951
·
verified ·
1 Parent(s): ae7af4f

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +12 -12
  2. app.py +424 -351
  3. requirements.txt +16 -14
README.md CHANGED
@@ -1,13 +1,13 @@
1
- ---
2
- title: Tile Upscaler V2
3
- emoji: 😻
4
- colorFrom: gray
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 4.38.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: Tile Upscaler V2
3
+ emoji: 😻
4
+ colorFrom: gray
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 5.38.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
12
+
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,351 +1,424 @@
1
- import spaces
2
- import os
3
- import requests
4
- import time
5
-
6
- import subprocess
7
- subprocess.run("pip install git+https://github.com/inference-sh/Real-ESRGAN.git --no-deps", shell=True)
8
-
9
- import torch
10
-
11
- from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, DDIMScheduler, DPMSolverMultistepScheduler
12
- from diffusers.models import AutoencoderKL
13
- from diffusers.models.attention_processor import AttnProcessor2_0
14
-
15
- from PIL import Image
16
- import cv2
17
- import numpy as np
18
-
19
- from RealESRGAN import RealESRGAN
20
-
21
- import random
22
- import math
23
-
24
- import gradio as gr
25
- from gradio_imageslider import ImageSlider
26
-
27
- from huggingface_hub import hf_hub_download
28
-
29
- USE_TORCH_COMPILE = False
30
- ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
31
-
32
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
33
-
34
- def download_models():
35
- models = {
36
- "MODEL": ("dantea1118/juggernaut_reborn", "juggernaut_reborn.safetensors", "models/models/Stable-diffusion"),
37
- "UPSCALER_X2": ("ai-forever/Real-ESRGAN", "RealESRGAN_x2.pth", "models/upscalers/"),
38
- "UPSCALER_X4": ("ai-forever/Real-ESRGAN", "RealESRGAN_x4.pth", "models/upscalers/"),
39
- "NEGATIVE_1": ("philz1337x/embeddings", "verybadimagenegative_v1.3.pt", "models/embeddings"),
40
- "NEGATIVE_2": ("philz1337x/embeddings", "JuggernautNegative-neg.pt", "models/embeddings"),
41
- "LORA_1": ("philz1337x/loras", "SDXLrender_v2.0.safetensors", "models/Lora"),
42
- "LORA_2": ("philz1337x/loras", "more_details.safetensors", "models/Lora"),
43
- "CONTROLNET": ("lllyasviel/ControlNet-v1-1", "control_v11f1e_sd15_tile.pth", "models/ControlNet"),
44
- "VAE": ("stabilityai/sd-vae-ft-mse-original", "vae-ft-mse-840000-ema-pruned.safetensors", "models/VAE"),
45
- }
46
-
47
- for model, (repo_id, filename, local_dir) in models.items():
48
- hf_hub_download(repo_id=repo_id, filename=filename, local_dir=local_dir)
49
-
50
- download_models()
51
-
52
- def timer_func(func):
53
- def wrapper(*args, **kwargs):
54
- start_time = time.time()
55
- result = func(*args, **kwargs)
56
- end_time = time.time()
57
- print(f"{func.__name__} took {end_time - start_time:.2f} seconds")
58
- return result
59
- return wrapper
60
-
61
- def get_scheduler(scheduler_name, config):
62
- if scheduler_name == "DDIM":
63
- return DDIMScheduler.from_config(config)
64
- elif scheduler_name == "DPM++ 3M SDE Karras":
65
- return DPMSolverMultistepScheduler.from_config(config, algorithm_type="sde-dpmsolver++", use_karras_sigmas=True)
66
- elif scheduler_name == "DPM++ 3M Karras":
67
- return DPMSolverMultistepScheduler.from_config(config, algorithm_type="dpmsolver++", use_karras_sigmas=True)
68
- else:
69
- raise ValueError(f"Unknown scheduler: {scheduler_name}")
70
-
71
- class LazyLoadPipeline:
72
- def __init__(self):
73
- self.pipe = None
74
-
75
- @timer_func
76
- def load(self):
77
- if self.pipe is None:
78
- print("Starting to load the pipeline...")
79
- self.pipe = self.setup_pipeline()
80
- print(f"Moving pipeline to device: {device}")
81
- self.pipe.to(device)
82
- if USE_TORCH_COMPILE:
83
- print("Compiling the model...")
84
- self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
85
-
86
- @timer_func
87
- def setup_pipeline(self):
88
- print("Setting up the pipeline...")
89
- controlnet = ControlNetModel.from_single_file(
90
- "models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=torch.float16
91
- )
92
- model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
93
- pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
94
- model_path,
95
- controlnet=controlnet,
96
- torch_dtype=torch.float16,
97
- use_safetensors=True,
98
- safety_checker=None
99
- )
100
- vae = AutoencoderKL.from_single_file(
101
- "models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
102
- torch_dtype=torch.float16
103
- )
104
- pipe.vae = vae
105
- pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
106
- pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
107
- pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
108
- pipe.fuse_lora(lora_scale=0.5)
109
- pipe.load_lora_weights("models/Lora/more_details.safetensors")
110
- pipe.fuse_lora(lora_scale=1.)
111
- pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
112
- pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
113
- return pipe
114
-
115
- def set_scheduler(self, scheduler_name):
116
- if self.pipe is not None:
117
- self.pipe.scheduler = get_scheduler(scheduler_name, self.pipe.scheduler.config)
118
-
119
- def __call__(self, *args, **kwargs):
120
- return self.pipe(*args, **kwargs)
121
-
122
- class LazyRealESRGAN:
123
- def __init__(self, device, scale):
124
- self.device = device
125
- self.scale = scale
126
- self.model = None
127
-
128
- def load_model(self):
129
- if self.model is None:
130
- self.model = RealESRGAN(self.device, scale=self.scale)
131
- self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
132
- def predict(self, img):
133
- self.load_model()
134
- return self.model.predict(img)
135
-
136
- lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
137
- lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
138
-
139
- @timer_func
140
- def resize_and_upscale(input_image, resolution):
141
- scale = 2 if resolution <= 2048 else 4
142
- input_image = input_image.convert("RGB")
143
- W, H = input_image.size
144
- k = float(resolution) / min(H, W)
145
- H = int(round(H * k / 64.0)) * 64
146
- W = int(round(W * k / 64.0)) * 64
147
- img = input_image.resize((W, H), resample=Image.LANCZOS)
148
- if scale == 2:
149
- img = lazy_realesrgan_x2.predict(img)
150
- else:
151
- img = lazy_realesrgan_x4.predict(img)
152
- return img
153
-
154
- @timer_func
155
- def create_hdr_effect(original_image, hdr):
156
- if hdr == 0:
157
- return original_image
158
- cv_original = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
159
- factors = [1.0 - 0.9 * hdr, 1.0 - 0.7 * hdr, 1.0 - 0.45 * hdr,
160
- 1.0 - 0.25 * hdr, 1.0, 1.0 + 0.2 * hdr,
161
- 1.0 + 0.4 * hdr, 1.0 + 0.6 * hdr, 1.0 + 0.8 * hdr]
162
- images = [cv2.convertScaleAbs(cv_original, alpha=factor) for factor in factors]
163
- merge_mertens = cv2.createMergeMertens()
164
- hdr_image = merge_mertens.process(images)
165
- hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
166
- return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
167
-
168
- lazy_pipe = LazyLoadPipeline()
169
- lazy_pipe.load()
170
-
171
- @timer_func
172
- def progressive_upscale(input_image, target_resolution, steps=3):
173
- current_image = input_image.convert("RGB")
174
- current_size = max(current_image.size)
175
-
176
- for _ in range(steps):
177
- if current_size >= target_resolution:
178
- break
179
-
180
- scale_factor = min(2, target_resolution / current_size)
181
- new_size = (int(current_image.width * scale_factor), int(current_image.height * scale_factor))
182
-
183
- if scale_factor <= 1.5:
184
- current_image = current_image.resize(new_size, Image.LANCZOS)
185
- else:
186
- current_image = lazy_realesrgan_x2.predict(current_image)
187
-
188
- current_size = max(current_image.size)
189
-
190
- # Final resize to exact target resolution
191
- if current_size != target_resolution:
192
- aspect_ratio = current_image.width / current_image.height
193
- if current_image.width > current_image.height:
194
- new_size = (target_resolution, int(target_resolution / aspect_ratio))
195
- else:
196
- new_size = (int(target_resolution * aspect_ratio), target_resolution)
197
- current_image = current_image.resize(new_size, Image.LANCZOS)
198
-
199
- return current_image
200
-
201
- def prepare_image(input_image, resolution, hdr):
202
- upscaled_image = progressive_upscale(input_image, resolution)
203
- return create_hdr_effect(upscaled_image, hdr)
204
-
205
- def create_gaussian_weight(tile_size, sigma=0.3):
206
- x = np.linspace(-1, 1, tile_size)
207
- y = np.linspace(-1, 1, tile_size)
208
- xx, yy = np.meshgrid(x, y)
209
- gaussian_weight = np.exp(-(xx**2 + yy**2) / (2 * sigma**2))
210
- return gaussian_weight
211
-
212
- def adaptive_tile_size(image_size, base_tile_size=512, max_tile_size=1024):
213
- w, h = image_size
214
- aspect_ratio = w / h
215
- if aspect_ratio > 1:
216
- tile_w = min(w, max_tile_size)
217
- tile_h = min(int(tile_w / aspect_ratio), max_tile_size)
218
- else:
219
- tile_h = min(h, max_tile_size)
220
- tile_w = min(int(tile_h * aspect_ratio), max_tile_size)
221
- return max(tile_w, base_tile_size), max(tile_h, base_tile_size)
222
-
223
- def process_tile(tile, num_inference_steps, strength, guidance_scale, controlnet_strength):
224
- prompt = "masterpiece, best quality, highres"
225
- negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
226
-
227
- options = {
228
- "prompt": prompt,
229
- "negative_prompt": negative_prompt,
230
- "image": tile,
231
- "control_image": tile,
232
- "num_inference_steps": num_inference_steps,
233
- "strength": strength,
234
- "guidance_scale": guidance_scale,
235
- "controlnet_conditioning_scale": float(controlnet_strength),
236
- "generator": torch.Generator(device=device).manual_seed(random.randint(0, 2147483647)),
237
- }
238
-
239
- return np.array(lazy_pipe(**options).images[0])
240
-
241
- @spaces.GPU
242
- @timer_func
243
- def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale, controlnet_strength, scheduler_name):
244
- print("Starting image processing...")
245
- torch.cuda.empty_cache()
246
- lazy_pipe.set_scheduler(scheduler_name)
247
-
248
- # Convert input_image to numpy array
249
- input_array = np.array(input_image)
250
-
251
- # Prepare the condition image
252
- condition_image = prepare_image(input_image, resolution, hdr)
253
- W, H = condition_image.size
254
-
255
- # Adaptive tiling
256
- tile_width, tile_height = adaptive_tile_size((W, H))
257
-
258
- # Calculate the number of tiles
259
- overlap = min(64, tile_width // 8, tile_height // 8) # Adaptive overlap
260
- num_tiles_x = math.ceil((W - overlap) / (tile_width - overlap))
261
- num_tiles_y = math.ceil((H - overlap) / (tile_height - overlap))
262
-
263
- # Create a blank canvas for the result
264
- result = np.zeros((H, W, 3), dtype=np.float32)
265
- weight_sum = np.zeros((H, W, 1), dtype=np.float32)
266
-
267
- # Create gaussian weight
268
- gaussian_weight = create_gaussian_weight(max(tile_width, tile_height))
269
-
270
- for i in range(num_tiles_y):
271
- for j in range(num_tiles_x):
272
- # Calculate tile coordinates
273
- left = j * (tile_width - overlap)
274
- top = i * (tile_height - overlap)
275
- right = min(left + tile_width, W)
276
- bottom = min(top + tile_height, H)
277
-
278
- # Adjust tile size if it's at the edge
279
- current_tile_size = (bottom - top, right - left)
280
-
281
- tile = condition_image.crop((left, top, right, bottom))
282
- tile = tile.resize((tile_width, tile_height))
283
-
284
- # Process the tile
285
- result_tile = process_tile(tile, num_inference_steps, strength, guidance_scale, controlnet_strength)
286
-
287
- # Apply gaussian weighting
288
- if current_tile_size != (tile_width, tile_height):
289
- result_tile = cv2.resize(result_tile, current_tile_size[::-1])
290
- tile_weight = cv2.resize(gaussian_weight, current_tile_size[::-1])
291
- else:
292
- tile_weight = gaussian_weight[:current_tile_size[0], :current_tile_size[1]]
293
-
294
- # Add the tile to the result with gaussian weighting
295
- result[top:bottom, left:right] += result_tile * tile_weight[:, :, np.newaxis]
296
- weight_sum[top:bottom, left:right] += tile_weight[:, :, np.newaxis]
297
-
298
- # Normalize the result
299
- final_result = (result / weight_sum).astype(np.uint8)
300
-
301
- print("Image processing completed successfully")
302
-
303
- return [input_array, final_result]
304
-
305
- title = """<h1 align="center">Tile Upscaler V2</h1>
306
- <p align="center">Creative version of Tile Upscaler. The main ideas come from</p>
307
- <p><center>
308
- <a href="https://huggingface.co/spaces/gokaygokay/Tile-Upscaler" target="_blank">[Tile Upscaler]</a>
309
- <a href="https://github.com/philz1337x/clarity-upscaler" target="_blank">[philz1337x]</a>
310
- <a href="https://github.com/BatouResearch/controlnet-tile-upscale" target="_blank">[Pau-Lozano]</a>
311
- </center></p>
312
- """
313
-
314
- with gr.Blocks() as demo:
315
- gr.HTML(title)
316
- with gr.Row():
317
- with gr.Column():
318
- input_image = gr.Image(type="pil", label="Input Image")
319
- run_button = gr.Button("Enhance Image")
320
- with gr.Column():
321
- output_slider = ImageSlider(label="Before / After", type="numpy")
322
- with gr.Accordion("Advanced Options", open=False):
323
- resolution = gr.Slider(minimum=128, maximum=2048, value=1024, step=128, label="Resolution")
324
- num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
325
- strength = gr.Slider(minimum=0, maximum=1, value=0.2, step=0.01, label="Strength")
326
- hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
327
- guidance_scale = gr.Slider(minimum=0, maximum=20, value=6, step=0.5, label="Guidance Scale")
328
- controlnet_strength = gr.Slider(minimum=0.0, maximum=2.0, value=0.75, step=0.05, label="ControlNet Strength")
329
- scheduler_name = gr.Dropdown(
330
- choices=["DDIM", "DPM++ 3M SDE Karras", "DPM++ 3M Karras"],
331
- value="DDIM",
332
- label="Scheduler"
333
- )
334
-
335
- run_button.click(fn=gradio_process_image,
336
- inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale, controlnet_strength, scheduler_name],
337
- outputs=output_slider)
338
-
339
- gr.Examples(
340
- examples=[
341
- ["image1.jpg", 1536, 20, 0.4, 0, 6, 0.75, "DDIM"],
342
- ["image2.png", 512, 20, 0.55, 0, 6, 0.6, "DDIM"],
343
- ["image3.png", 1024, 20, 0.3, 0, 6, 0.65, "DDIM"]
344
- ],
345
- inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale, controlnet_strength, scheduler_name],
346
- outputs=output_slider,
347
- fn=gradio_process_image,
348
- cache_examples=True,
349
- )
350
-
351
- demo.launch(debug=True, share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ import requests
4
+ import time
5
+
6
+ import subprocess
7
+ subprocess.run("pip install git+https://github.com/inference-sh/Real-ESRGAN.git --no-deps", shell=True)
8
+
9
+ import torch
10
+
11
+ from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, DDIMScheduler, DPMSolverMultistepScheduler, StableDiffusionXLControlNetImg2ImgPipeline
12
+ from diffusers.models import AutoencoderKL
13
+ from diffusers.models.attention_processor import AttnProcessor2_0
14
+
15
+ from PIL import Image
16
+ import cv2
17
+ import numpy as np
18
+
19
+ from RealESRGAN import RealESRGAN
20
+
21
+ import random
22
+ import math
23
+ import gc
24
+ from typing import List
25
+
26
+ import gradio as gr
27
+ #from gradio_imageslider import ImageSlider
28
+ from gradio_huggingfacehub_search import HuggingfaceHubSearch
29
+
30
+ from huggingface_hub import hf_hub_download, HfApi
31
+
32
+ USE_TORCH_COMPILE = False
33
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
34
+
35
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
36
+
37
+ def download_models():
38
+ models = {
39
+ "MODEL": ("dantea1118/juggernaut_reborn", "juggernaut_reborn.safetensors", "models/models/Stable-diffusion"),
40
+ "UPSCALER_X2": ("ai-forever/Real-ESRGAN", "RealESRGAN_x2.pth", "models/upscalers/"),
41
+ "UPSCALER_X4": ("ai-forever/Real-ESRGAN", "RealESRGAN_x4.pth", "models/upscalers/"),
42
+ "NEGATIVE_1": ("philz1337x/embeddings", "verybadimagenegative_v1.3.pt", "models/embeddings"),
43
+ "NEGATIVE_2": ("philz1337x/embeddings", "JuggernautNegative-neg.pt", "models/embeddings"),
44
+ "LORA_1": ("philz1337x/loras", "SDXLrender_v2.0.safetensors", "models/Lora"),
45
+ "LORA_2": ("philz1337x/loras", "more_details.safetensors", "models/Lora"),
46
+ "CONTROLNET": ("lllyasviel/ControlNet-v1-1", "control_v11f1e_sd15_tile.pth", "models/ControlNet"),
47
+ "VAE": ("stabilityai/sd-vae-ft-mse-original", "vae-ft-mse-840000-ema-pruned.safetensors", "models/VAE"),
48
+ }
49
+
50
+ for model, (repo_id, filename, local_dir) in models.items():
51
+ hf_hub_download(repo_id=repo_id, filename=filename, local_dir=local_dir)
52
+
53
+ download_models()
54
+
55
+ def timer_func(func):
56
+ def wrapper(*args, **kwargs):
57
+ start_time = time.time()
58
+ result = func(*args, **kwargs)
59
+ end_time = time.time()
60
+ print(f"{func.__name__} took {end_time - start_time:.2f} seconds")
61
+ return result
62
+ return wrapper
63
+
64
+ def get_scheduler(scheduler_name, config):
65
+ if scheduler_name == "DDIM":
66
+ return DDIMScheduler.from_config(config)
67
+ elif scheduler_name == "DPM++ 3M SDE Karras":
68
+ return DPMSolverMultistepScheduler.from_config(config, algorithm_type="sde-dpmsolver++", use_karras_sigmas=True)
69
+ elif scheduler_name == "DPM++ 3M Karras":
70
+ return DPMSolverMultistepScheduler.from_config(config, algorithm_type="dpmsolver++", use_karras_sigmas=True)
71
+ else:
72
+ raise ValueError(f"Unknown scheduler: {scheduler_name}")
73
+
74
+ MODEL_TYPE_CLASS = {
75
+ "diffusers:StableDiffusionPipeline": "SD 1.5",
76
+ "diffusers:StableDiffusionXLPipeline": "SDXL",
77
+ "diffusers:FluxPipeline": "FLUX",
78
+ }
79
+
80
+ def get_model_type_from_repo_id(repo_id: str) -> str:
81
+ api = HfApi()
82
+ default = "FLUX"
83
+ try:
84
+ model = api.model_info(repo_id=repo_id, timeout=5.0)
85
+ tags = model.tags
86
+ for tag in tags:
87
+ if tag in MODEL_TYPE_CLASS.keys(): return MODEL_TYPE_CLASS.get(tag, default)
88
+ except Exception:
89
+ return default
90
+ return default
91
+
92
+ DEFAULT_MODEL = "Default"
93
+ DEFAULT_MODELS = [DEFAULT_MODEL, "Yntec/epiCPhotoGasm"]
94
+ DEFAULT_SCHEDULER = "Default"
95
+
96
+ class LazyLoadPipeline:
97
+ def __init__(self):
98
+ self.dtype = torch.float16
99
+ self.pipes = {}
100
+ self.max_pipes = 3
101
+
102
+ #@timer_func
103
+ def load(self, model_id=DEFAULT_MODEL, use_default_vae=False, progress=gr.Progress(track_tqdm=True)):
104
+ if model_id not in self.pipes.keys():
105
+ print("Starting to load the pipeline...")
106
+ self.setup_pipeline(model_id, use_default_vae)
107
+ print(f"Moving pipeline to device: {device}")
108
+ self.clean_pipes()
109
+ if self.pipes[model_id].device != device: self.pipes[model_id].to(device)
110
+ if USE_TORCH_COMPILE:
111
+ print("Compiling the model...")
112
+ self.pipes[model_id].unet = torch.compile(self.pipes[model_id].unet, mode="reduce-overhead", fullgraph=True)
113
+
114
+ #@timer_func
115
+ def setup_pipeline(self, model_id, use_default_vae, progress=gr.Progress(track_tqdm=True)):
116
+ print("Setting up the pipeline...")
117
+ if model_id == DEFAULT_MODEL:
118
+ controlnet = ControlNetModel.from_single_file(
119
+ "models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=self.dtype
120
+ )
121
+ model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
122
+ pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
123
+ model_path,
124
+ controlnet=controlnet,
125
+ torch_dtype=self.dtype,
126
+ use_safetensors=True,
127
+ safety_checker=None
128
+ )
129
+ if use_default_vae:
130
+ vae = AutoencoderKL.from_single_file(
131
+ "models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
132
+ torch_dtype=self.dtype
133
+ )
134
+ pipe.vae = vae
135
+ pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
136
+ pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
137
+ pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
138
+ pipe.fuse_lora(lora_scale=0.5)
139
+ pipe.load_lora_weights("models/Lora/more_details.safetensors")
140
+ pipe.fuse_lora(lora_scale=1.)
141
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
142
+ pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
143
+ self.pipes[DEFAULT_MODEL] = pipe
144
+ else:
145
+ model_type = get_model_type_from_repo_id(model_id)
146
+ if model_type == "SD 1.5":
147
+ controlnet = ControlNetModel.from_single_file("models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=self.dtype)
148
+ pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(model_id, controlnet=controlnet, torch_dtype=self.dtype, use_safetensors=True, safety_checker=None)
149
+ if use_default_vae: pipe.vae = AutoencoderKL.from_single_file("models/VAE/vae-ft-mse-840000-ema-pruned.safetensors", torch_dtype=self.dtype)
150
+ elif model_type == "SDXL": # https://huggingface.co/xinsir/controlnet-tile-sdxl-1.0
151
+ controlnet = ControlNetModel.from_pretrained("xinsir/controlnet-tile-sdxl-1.0", torch_dtype=self.dtype)
152
+ pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(model_id, controlnet=controlnet, torch_dtype=self.dtype, use_safetensors=True)
153
+ if use_default_vae: pipe.vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=self.dtype)
154
+ if pipe.device != device: pipe.to(device)
155
+ self.pipes[model_id] = pipe
156
+ return pipe
157
+
158
+ def clean_pipes(self):
159
+ pipes = [x for x in self.pipes.keys() if x != DEFAULT_MODEL]
160
+ new_pipes = list(reversed(pipes))[:self.max_pipes]
161
+ for name in pipes:
162
+ if name not in new_pipes:
163
+ print(f"Unloading the pipeline... {name}")
164
+ self.pipes[name].to("cpu")
165
+ self.pipes.pop(name)
166
+ gc.collect()
167
+ torch.cuda.empty_cache()
168
+
169
+ def set_scheduler(self, model_id, scheduler_name):
170
+ if model_id in self.pipes.keys() and scheduler_name != DEFAULT_SCHEDULER:
171
+ self.pipes[model_id].scheduler = get_scheduler(scheduler_name, self.pipe.scheduler.config)
172
+
173
+ def __call__(self, model_id, *args, **kwargs):
174
+ return self.pipes[model_id](*args, **kwargs)
175
+
176
+ class LazyRealESRGAN:
177
+ def __init__(self, device, scale):
178
+ self.device = device
179
+ self.scale = scale
180
+ self.model = None
181
+
182
+ def load_model(self):
183
+ if self.model is None:
184
+ self.model = RealESRGAN(self.device, scale=self.scale)
185
+ self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
186
+
187
+ def predict(self, img):
188
+ self.load_model()
189
+ return self.model.predict(img)
190
+
191
+ lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
192
+ lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
193
+
194
+ @timer_func
195
+ def resize_and_upscale(input_image, resolution):
196
+ scale = 2 if resolution <= 2048 else 4
197
+ input_image = input_image.convert("RGB")
198
+ W, H = input_image.size
199
+ k = float(resolution) / min(H, W)
200
+ H = int(round(H * k / 64.0)) * 64
201
+ W = int(round(W * k / 64.0)) * 64
202
+ img = input_image.resize((W, H), resample=Image.LANCZOS)
203
+ if scale == 2:
204
+ img = lazy_realesrgan_x2.predict(img)
205
+ else:
206
+ img = lazy_realesrgan_x4.predict(img)
207
+ return img
208
+
209
+ @timer_func
210
+ def create_hdr_effect(original_image, hdr):
211
+ if hdr == 0:
212
+ return original_image
213
+ cv_original = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
214
+ factors = [1.0 - 0.9 * hdr, 1.0 - 0.7 * hdr, 1.0 - 0.45 * hdr,
215
+ 1.0 - 0.25 * hdr, 1.0, 1.0 + 0.2 * hdr,
216
+ 1.0 + 0.4 * hdr, 1.0 + 0.6 * hdr, 1.0 + 0.8 * hdr]
217
+ images = [cv2.convertScaleAbs(cv_original, alpha=factor) for factor in factors]
218
+ merge_mertens = cv2.createMergeMertens()
219
+ hdr_image = merge_mertens.process(images)
220
+ hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
221
+ return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
222
+
223
+ lazy_pipe = LazyLoadPipeline()
224
+ lazy_pipe.load()
225
+ for model_id in DEFAULT_MODELS:
226
+ if model_id != DEFAULT_MODEL: lazy_pipe.load(model_id)
227
+
228
+ @timer_func
229
+ def progressive_upscale(input_image, target_resolution, steps=3):
230
+ current_image = input_image.convert("RGB")
231
+ current_size = max(current_image.size)
232
+
233
+ for _ in range(steps):
234
+ if current_size >= target_resolution:
235
+ break
236
+
237
+ scale_factor = min(2, target_resolution / current_size)
238
+ new_size = (int(current_image.width * scale_factor), int(current_image.height * scale_factor))
239
+
240
+ if scale_factor <= 1.5:
241
+ current_image = current_image.resize(new_size, Image.LANCZOS)
242
+ else:
243
+ current_image = lazy_realesrgan_x2.predict(current_image)
244
+
245
+ current_size = max(current_image.size)
246
+
247
+ # Final resize to exact target resolution
248
+ if current_size != target_resolution:
249
+ aspect_ratio = current_image.width / current_image.height
250
+ if current_image.width > current_image.height:
251
+ new_size = (target_resolution, int(target_resolution / aspect_ratio))
252
+ else:
253
+ new_size = (int(target_resolution * aspect_ratio), target_resolution)
254
+ current_image = current_image.resize(new_size, Image.LANCZOS)
255
+
256
+ return current_image
257
+
258
+ def prepare_image(input_image, resolution, hdr):
259
+ upscaled_image = progressive_upscale(input_image, resolution)
260
+ return create_hdr_effect(upscaled_image, hdr)
261
+
262
+ def create_gaussian_weight(tile_size, sigma=0.3):
263
+ x = np.linspace(-1, 1, tile_size)
264
+ y = np.linspace(-1, 1, tile_size)
265
+ xx, yy = np.meshgrid(x, y)
266
+ gaussian_weight = np.exp(-(xx**2 + yy**2) / (2 * sigma**2))
267
+ return gaussian_weight
268
+
269
+ def adaptive_tile_size(image_size, base_tile_size=512, max_tile_size=1024):
270
+ w, h = image_size
271
+ aspect_ratio = w / h
272
+ if aspect_ratio > 1:
273
+ tile_w = min(w, max_tile_size)
274
+ tile_h = min(int(tile_w / aspect_ratio), max_tile_size)
275
+ else:
276
+ tile_h = min(h, max_tile_size)
277
+ tile_w = min(int(tile_h * aspect_ratio), max_tile_size)
278
+ return max(tile_w, base_tile_size), max(tile_h, base_tile_size)
279
+
280
+ def process_tile(tile, num_inference_steps, strength, guidance_scale, controlnet_strength, model_id):
281
+ prompt = "masterpiece, best quality, highres"
282
+ negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
283
+
284
+ options = {
285
+ "prompt": prompt,
286
+ "negative_prompt": negative_prompt,
287
+ "image": tile,
288
+ "control_image": tile,
289
+ "num_inference_steps": num_inference_steps,
290
+ "strength": strength,
291
+ "guidance_scale": guidance_scale,
292
+ "controlnet_conditioning_scale": float(controlnet_strength),
293
+ "generator": torch.Generator(device=device).manual_seed(random.randint(0, 2147483647)),
294
+ }
295
+
296
+ return np.array(lazy_pipe(model_id, **options).images[0])
297
+
298
+ @spaces.GPU(duration=59)
299
+ #@timer_func
300
+ def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale, controlnet_strength, scheduler_name,
301
+ model_id, is_default_vae, progress=gr.Progress(track_tqdm=True)):
302
+ print("Starting image processing...")
303
+ torch.cuda.empty_cache()
304
+ lazy_pipe.load(model_id, is_default_vae)
305
+ lazy_pipe.set_scheduler(model_id, scheduler_name)
306
+
307
+ # Convert input_image to numpy array
308
+ input_array = np.array(input_image)
309
+
310
+ # Prepare the condition image
311
+ condition_image = prepare_image(input_image, resolution, hdr)
312
+ W, H = condition_image.size
313
+
314
+ # Adaptive tiling
315
+ tile_width, tile_height = adaptive_tile_size((W, H))
316
+
317
+ # Calculate the number of tiles
318
+ overlap = min(64, tile_width // 8, tile_height // 8) # Adaptive overlap
319
+ num_tiles_x = math.ceil((W - overlap) / (tile_width - overlap))
320
+ num_tiles_y = math.ceil((H - overlap) / (tile_height - overlap))
321
+
322
+ # Create a blank canvas for the result
323
+ result = np.zeros((H, W, 3), dtype=np.float32)
324
+ weight_sum = np.zeros((H, W, 1), dtype=np.float32)
325
+
326
+ # Create gaussian weight
327
+ gaussian_weight = create_gaussian_weight(max(tile_width, tile_height))
328
+
329
+ for i in range(num_tiles_y):
330
+ for j in range(num_tiles_x):
331
+ # Calculate tile coordinates
332
+ left = j * (tile_width - overlap)
333
+ top = i * (tile_height - overlap)
334
+ right = min(left + tile_width, W)
335
+ bottom = min(top + tile_height, H)
336
+
337
+ # Adjust tile size if it's at the edge
338
+ current_tile_size = (bottom - top, right - left)
339
+
340
+ tile = condition_image.crop((left, top, right, bottom))
341
+ tile = tile.resize((tile_width, tile_height))
342
+
343
+ # Process the tile
344
+ result_tile = process_tile(tile, num_inference_steps, strength, guidance_scale, controlnet_strength, model_id)
345
+
346
+ # Apply gaussian weighting
347
+ if current_tile_size != (tile_width, tile_height):
348
+ result_tile = cv2.resize(result_tile, current_tile_size[::-1])
349
+ tile_weight = cv2.resize(gaussian_weight, current_tile_size[::-1])
350
+ else:
351
+ tile_weight = gaussian_weight[:current_tile_size[0], :current_tile_size[1]]
352
+
353
+ # Add the tile to the result with gaussian weighting
354
+ result[top:bottom, left:right] += result_tile * tile_weight[:, :, np.newaxis]
355
+ weight_sum[top:bottom, left:right] += tile_weight[:, :, np.newaxis]
356
+
357
+ # Normalize the result
358
+ final_result = (result / weight_sum).astype(np.uint8)
359
+
360
+ print("Image processing completed successfully")
361
+
362
+ return [input_array, final_result]
363
+
364
+ def update_models(model_id: str, models: List[str]):
365
+ model_type = "SD 1.5" if model_id == DEFAULT_MODEL else get_model_type_from_repo_id(model_id)
366
+ if model_type in ["SD 1.5", "SDXL"]:
367
+ if model_id not in models: models.append(model_id)
368
+ else: gr.Info(f"{model_id} cannot be used for Upscaling.")
369
+ return gr.update(choices=models), models
370
+
371
+ title = """<h1 align="center">Tile Upscaler V2</h1>
372
+ <p align="center">Creative version of Tile Upscaler. The main ideas come from</p>
373
+ <p><center>
374
+ <a href="https://huggingface.co/spaces/gokaygokay/Tile-Upscaler" target="_blank">[Tile Upscaler]</a>
375
+ <a href="https://github.com/philz1337x/clarity-upscaler" target="_blank">[philz1337x]</a>
376
+ <a href="https://github.com/BatouResearch/controlnet-tile-upscale" target="_blank">[Pau-Lozano]</a>
377
+ </center></p>
378
+ """
379
+
380
+ with gr.Blocks() as demo:
381
+ gr.HTML(title)
382
+ with gr.Row():
383
+ with gr.Column():
384
+ input_image = gr.Image(type="pil", label="Input Image")
385
+ run_button = gr.Button("Enhance Image")
386
+ with gr.Column():
387
+ output_slider = gr.ImageSlider(label="Before / After", type="numpy")
388
+ with gr.Accordion("Advanced Options", open=False):
389
+ resolution = gr.Slider(minimum=128, maximum=2048, value=1024, step=128, label="Resolution")
390
+ num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
391
+ strength = gr.Slider(minimum=0, maximum=1, value=0.2, step=0.01, label="Strength")
392
+ hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
393
+ guidance_scale = gr.Slider(minimum=0, maximum=20, value=6, step=0.5, label="Guidance Scale")
394
+ controlnet_strength = gr.Slider(minimum=0.0, maximum=2.0, value=0.75, step=0.05, label="ControlNet Strength")
395
+ scheduler_name = gr.Dropdown(
396
+ choices=[DEFAULT_SCHEDULER, "DDIM", "DPM++ 3M SDE Karras", "DPM++ 3M Karras"],
397
+ value=DEFAULT_SCHEDULER,
398
+ label="Scheduler"
399
+ )
400
+ with gr.Row():
401
+ model_id = gr.Dropdown(choices=DEFAULT_MODELS, value=DEFAULT_MODEL, allow_custom_value=True, label="Model")
402
+ models = gr.State(DEFAULT_MODELS)
403
+ search_hub = HuggingfaceHubSearch(label="Add from Huggingface Hub", placeholder="Search for models on Huggingface", search_type="model", show_label=True, sumbit_on_select=True)
404
+ is_default_vae = gr.Checkbox(value=True, label="Use Default VAE")
405
+
406
+ run_button.click(fn=gradio_process_image,
407
+ inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale, controlnet_strength, scheduler_name, model_id, is_default_vae],
408
+ outputs=output_slider).then(update_models, [model_id, models], [model_id, models])
409
+ search_hub.submit(update_models, [search_hub, models], [model_id, models])
410
+
411
+ gr.Examples(
412
+ examples=[
413
+ ["image1.jpg", 1536, 20, 0.4, 0, 6, 0.75, DEFAULT_SCHEDULER, DEFAULT_MODEL, True],
414
+ ["image2.png", 512, 20, 0.55, 0, 6, 0.6, DEFAULT_SCHEDULER, DEFAULT_MODEL, True],
415
+ ["image3.png", 1024, 20, 0.3, 0, 6, 0.65, DEFAULT_SCHEDULER, DEFAULT_MODEL, True]
416
+ ],
417
+ inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale, controlnet_strength, scheduler_name, model_id, is_default_vae],
418
+ outputs=output_slider,
419
+ fn=gradio_process_image,
420
+ cache_examples=True,
421
+ cache_mode="lazy",
422
+ )
423
+
424
+ demo.launch(debug=True, share=True, ssr_mode=False)
requirements.txt CHANGED
@@ -1,15 +1,17 @@
1
- opencv-python
2
- spaces
3
- diffusers
4
- torch==2.4.0
5
- torchvision
6
- pipeline
7
- transformers<=4.49.0
8
- accelerate
9
- safetensors
10
- spaces
11
- peft
12
- gradio
13
- pillow
14
- gradio-imageslider
 
 
15
  pydantic==2.10.6
 
1
+ opencv-python
2
+ spaces
3
+ diffusers
4
+ torch==2.4.0
5
+ torchvision
6
+ pipeline
7
+ transformers<=4.49.0
8
+ accelerate
9
+ safetensors
10
+ peft
11
+ #gradio
12
+ pillow
13
+ #gradio-imageslider
14
+ gradio_huggingfacehub_search
15
+ huggingface_hub
16
+ hf_xet
17
  pydantic==2.10.6