From 433b3ab7017556a19173a86d1215ed0a0b5b1396 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 28 Mar 2023 20:36:57 +0300
Subject: [PATCH] Revert "Merge pull request #7931 from
space-nuko/img2img-enhance"
This reverts commit 426875937048e21305ac24bea53df06523bdaa81, reversing
changes made to 1b63afbedc7789c0eb9a4742b780ab304d7a9caf.
---
javascript/ui.js | 22 +------
modules/generation_parameters_copypaste.py | 3 -
modules/img2img.py | 4 +-
modules/processing.py | 37 ++---------
modules/ui.py | 73 ++--------------------
scripts/xyz_grid.py | 1 -
style.css | 4 +-
7 files changed, 13 insertions(+), 131 deletions(-)
diff --git a/javascript/ui.js b/javascript/ui.js
index a73eeaa2..4a440193 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -132,14 +132,7 @@ function create_tab_index_args(tabId, args){
function get_img2img_tab_index() {
let res = args_to_array(arguments)
- res.splice(-2) // gradio also sends outputs to the arguments, pop them off
- res[0] = get_tab_index('mode_img2img')
- return res
-}
-
-function get_img2img_tab_index_for_res_preview() {
- let res = args_to_array(arguments)
- res.splice(-1) // gradio also sends outputs to the arguments, pop them off
+ res.splice(-2)
res[0] = get_tab_index('mode_img2img')
return res
}
@@ -368,16 +361,3 @@ function selectCheckpoint(name){
desiredCheckpointName = name;
gradioApp().getElementById('change_checkpoint').click()
}
-
-
-function onCalcResolutionImg2Img(mode, scale, width, height, resize_mode, init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint){
- i2iScale = gradioApp().getElementById('img2img_scale')
- i2iWidth = gradioApp().getElementById('img2img_width')
- i2iHeight = gradioApp().getElementById('img2img_height')
-
- setInactive(i2iScale, scale == 1)
- setInactive(i2iWidth, scale > 1)
- setInactive(i2iHeight, scale > 1)
-
- return [];
-}
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 0ad2ad4f..6df76858 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -282,9 +282,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
res["Hires resize-1"] = 0
res["Hires resize-2"] = 0
- if "Img2Img upscale" not in res:
- res["Img2Img upscale"] = 1
-
restore_old_hires_fix_params(res)
return res
diff --git a/modules/img2img.py b/modules/img2img.py
index 959dd96e..c973b770 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, filename))
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, upscaler: str, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
@@ -149,8 +149,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
- scale=scale,
- upscaler=upscaler,
)
p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/processing.py b/modules/processing.py
index 509b80b9..6d9c6a8d 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -946,7 +946,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
- def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs):
+ def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
@@ -966,37 +966,11 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.mask = None
self.nmask = None
self.image_conditioning = None
- self.scale = scale
- self.upscaler = upscaler
-
- def get_final_size(self):
- if self.scale > 1:
- img = self.init_images[0]
- width = int(img.width * self.scale)
- height = int(img.height * self.scale)
- return width, height
- else:
- return self.width, self.height
-
def init(self, all_prompts, all_seeds, all_subseeds):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
crop_region = None
- if self.scale > 1:
- self.extra_generation_params["Img2Img upscale"] = self.scale
-
- # Non-latent upscalers are run before sampling
- # Latent upscalers are run during sampling
- init_upscaler = None
- if self.upscaler is not None:
- self.extra_generation_params["Img2Img upscaler"] = self.upscaler
- if self.upscaler not in shared.latent_upscale_modes:
- assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}"
- init_upscaler = self.upscaler
-
- self.width, self.height = self.get_final_size()
-
image_mask = self.image_mask
if image_mask is not None:
@@ -1019,7 +993,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1)
else:
- image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler)
+ image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
np_mask = np.array(image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask)
@@ -1036,7 +1010,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image = images.flatten(img, opts.img2img_background_color)
if crop_region is None and self.resize_mode != 3:
- image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler)
+ image = images.resize_image(self.resize_mode, image, self.width, self.height)
if image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height))
@@ -1081,9 +1055,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
- latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
- if latent_scale_mode is not None:
- self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
+ if self.resize_mode == 3:
+ self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
if image_mask is not None:
init_mask = latent_mask
diff --git a/modules/ui.py b/modules/ui.py
index f22da16a..eb5fcd3f 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -15,7 +15,6 @@ import warnings
import gradio as gr
import gradio.routes
import gradio.utils
-from gradio.events import Releaseable
import numpy as np
from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
@@ -128,26 +127,6 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz
return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}"
-def calc_resolution_img2img(mode, scale, resize_x, resize_y, resize_mode, *i2i_images):
- init_img = None
- if mode in {0, 1, 3, 4}:
- init_img = i2i_images[mode]
- elif mode == 2:
- init_img = i2i_images[mode]["image"]
-
- if not init_img:
- return ""
-
- if scale > 1:
- width = int(init_img.width * scale)
- height = int(init_img.height * scale)
- else:
- width = resize_x
- height = resize_y
-
- return f"resize: from {init_img.width}x{init_img.height} to {width}x{height}"
-
-
def apply_styles(prompt, prompt_neg, styles):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles)
@@ -756,7 +735,7 @@ def create_ui():
)
with FormRow():
- resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
+ resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
for category in ordered_ui_categories():
if category == "sampler":
@@ -765,13 +744,8 @@ def create_ui():
elif category == "dimensions":
with FormRow():
with gr.Column(elem_id="img2img_column_size", scale=4):
- with FormRow(variant="compact"):
- final_resolution = FormHTML(value="", elem_id="img2img_finalres", label="Upscaled resolution", interactive=False)
- with FormRow(variant="compact"):
- scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=1.0, elem_id="img2img_scale")
- with FormRow(variant="compact"):
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
@@ -786,9 +760,7 @@ def create_ui():
with FormRow():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
- with FormRow():
- upscaler = gr.Dropdown(label="Upscaler", elem_id="img2img_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
elif category == "seed":
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
@@ -841,39 +813,6 @@ def create_ui():
outputs=[inpaint_controls, mask_alpha],
)
- img2img_resolution_preview_inputs = [dummy_component, # filled in by selected img2img tab index in _js
- scale, width, height, resize_mode,
- init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint]
- for input in img2img_resolution_preview_inputs[1:]:
- if isinstance(input, Releaseable):
- input.release(
- fn=calc_resolution_img2img,
- _js="get_img2img_tab_index_for_res_preview",
- inputs=img2img_resolution_preview_inputs,
- outputs=[final_resolution],
- show_progress=False,
- ).success(
- None,
- _js="onCalcResolutionImg2Img",
- inputs=img2img_resolution_preview_inputs,
- outputs=[],
- show_progress=False,
- )
- else:
- input.change(
- fn=calc_resolution_img2img,
- _js="get_img2img_tab_index_for_res_preview",
- inputs=img2img_resolution_preview_inputs,
- outputs=[final_resolution],
- show_progress=False,
- ).success(
- None,
- _js="onCalcResolutionImg2Img",
- inputs=img2img_resolution_preview_inputs,
- outputs=[],
- show_progress=False,
- )
-
img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
@@ -922,8 +861,6 @@ def create_ui():
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
- scale,
- upscaler,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
@@ -1009,8 +946,6 @@ def create_ui():
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
- (scale, "Img2Img upscale"),
- (upscaler, "Img2Img upscaler"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 3f6c1997..3895a795 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -220,7 +220,6 @@ axis_options = [
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
- AxisOptionImg2Img("Upscaler", str, apply_field("upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)),
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
diff --git a/style.css b/style.css
index 379a89dc..de16a7f2 100644
--- a/style.css
+++ b/style.css
@@ -287,13 +287,13 @@ button.custom-button{
border-radius: 0 0.5rem 0.5rem 0;
}
-#txtimg_hr_finalres, #img2img_finalres {
+#txtimg_hr_finalres{
min-height: 0 !important;
padding: .625rem .75rem;
margin-left: -0.75em
}
-#txtimg_hr_finalres .resolution, #img2img_finalres .resolution{
+#txtimg_hr_finalres .resolution{
font-weight: bold;
}