From dc25a31d1a3816a7fb0cd5ef186559b3c085db43 Mon Sep 17 00:00:00 2001 From: Ju1-js <40339350+Ju1-js@users.noreply.github.com> Date: Fri, 27 Jan 2023 22:43:10 -0800 Subject: [PATCH 01/53] Gradio Auth Read from External File Usage: `--gradio-auth-path {PATH}` It adds the credentials to the already existing `--gradio-auth` credentials. It can also handle line breaks. The file should look like: `{u1}:{p1},{u2}:{p2}` or ``` {u1}:{p1}, {u2}:{p2} ``` Will gradio handle duplicate credentials if it happens? --- modules/shared.py | 1 + webui.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 474fcc42..36e9762f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -81,6 +81,7 @@ parser.add_argument("--freeze-settings", action='store_true', help="disable edit parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json')) parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option") parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) +parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None) parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything') parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything") parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") diff --git a/webui.py b/webui.py index 41f32f5c..0e2b28b9 100644 --- a/webui.py +++ b/webui.py @@ -205,7 +205,7 @@ def webui(): ssl_keyfile=cmd_opts.tls_keyfile, ssl_certfile=cmd_opts.tls_certfile, debug=cmd_opts.gradio_debug, - auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None, + auth=[tuple(cred.split(':')) for cred in (cmd_opts.gradio_auth.strip('"').replace('\n','').split(',') + (open(cmd_opts.gradio_auth_path, 'r').read().strip().replace('\n','').split(',') if cmd_opts.gradio_auth_path and os.path.exists(cmd_opts.gradio_auth_path) else None))] if cmd_opts.gradio_auth or (cmd_opts.gradio_auth_path and os.path.exists(cmd_opts.gradio_auth_path)) else None, inbrowser=cmd_opts.autolaunch, prevent_thread_lock=True ) From 17b24e45e8839d889af35ee0b2fb0825306ddafe Mon Sep 17 00:00:00 2001 From: Francesco Manzali Date: Tue, 31 Jan 2023 18:58:36 +0100 Subject: [PATCH 02/53] Fix prompt matrix #rows/#cols when using hires - images.draw_prompt_matrix() should be called with the final width/height of the generated images, after upscaling. Otherwise, the number of rows/cols computed in images.draw_grid_annotations will increase by the upscaling factor. - Round the number of cols/rows in images.draw_grid_annotations, since the final images width may be a bit less than the required hr_upscale_to_x/y --- modules/images.py | 4 ++-- scripts/prompt_matrix.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/images.py b/modules/images.py index ae3cdaf4..4be0e74d 100644 --- a/modules/images.py +++ b/modules/images.py @@ -171,8 +171,8 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts): pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4 - cols = im.width // width - rows = im.height // height + cols = round(im.width / width) + rows = round(im.height / height) assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}' assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}' diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index dd95e588..f6575b6b 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -79,7 +79,7 @@ class Script(scripts.Script): processed = process_images(p) grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) - grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts) + grid = images.draw_prompt_matrix(grid, max(p.width, p.hr_upscale_to_x), max(p.height, p.hr_upscale_to_y), prompt_matrix_parts) processed.images.insert(0, grid) processed.index_of_first_image = 1 processed.infotexts.insert(0, processed.infotexts[0]) From 5afd9e82c3829348c58803cd85b02c87308fffae Mon Sep 17 00:00:00 2001 From: Francesco Manzali Date: Wed, 1 Feb 2023 21:16:52 +0100 Subject: [PATCH 03/53] Use the real images size, not the process - Use the width/height of the first image in processed.images - No more need for rounding in prompt_matrix --- modules/images.py | 4 ++-- scripts/prompt_matrix.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/images.py b/modules/images.py index 4be0e74d..ae3cdaf4 100644 --- a/modules/images.py +++ b/modules/images.py @@ -171,8 +171,8 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts): pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4 - cols = round(im.width / width) - rows = round(im.height / height) + cols = im.width // width + rows = im.height // height assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}' assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}' diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index f6575b6b..50c7f3cb 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -78,8 +78,8 @@ class Script(scripts.Script): p.prompt_for_display = original_prompt processed = process_images(p) - grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) - grid = images.draw_prompt_matrix(grid, max(p.width, p.hr_upscale_to_x), max(p.height, p.hr_upscale_to_y), prompt_matrix_parts) + grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) + grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[1].height, prompt_matrix_parts) processed.images.insert(0, grid) processed.index_of_first_image = 1 processed.infotexts.insert(0, processed.infotexts[0]) From dd20fc0fda5ac7cbb93af0b93222ee5a17f7705e Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 4 Feb 2023 23:23:20 +0900 Subject: [PATCH 04/53] fix --help show correct help message --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 9fd766d1..a68bb3a9 100644 --- a/launch.py +++ b/launch.py @@ -242,7 +242,7 @@ def prepare_environment(): sys.argv += shlex.split(commandline_args) - parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser(add_help=False) parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default='config.json') args, _ = parser.parse_known_args(sys.argv) From 6d11cda4188633ba19f4d8948139e510d5678059 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 5 Feb 2023 23:12:42 +0900 Subject: [PATCH 05/53] configurable image downscale allowing the user to configure the image downscale parameters in setting --- modules/images.py | 4 ++-- modules/shared.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/images.py b/modules/images.py index c2ca8849..cf4aea22 100644 --- a/modules/images.py +++ b/modules/images.py @@ -575,9 +575,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i image.already_saved_as = fullfn - target_side_length = 4000 + target_side_length = int(opts.target_side_length) oversize = image.width > target_side_length or image.height > target_side_length - if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024): + if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024): ratio = image.width / image.height if oversize and ratio > 1: diff --git a/modules/shared.py b/modules/shared.py index 79fbf724..a93a0299 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -325,7 +325,9 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"), + "export_for_4chan": OptionInfo(True, "If PNG image is larger than Downscale threshold or any dimension is larger than Target length, downscale the image to dimensions and save a copy as JPG"), + "img_downscale_threshold": OptionInfo(4, "Downscale threshold (MB)"), + "target_side_length": OptionInfo(4000, "Target length"), "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), From fe33be6cac140ff83b481029106968f39209cd90 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 5 Feb 2023 23:33:05 +0900 Subject: [PATCH 06/53] use Default if ValueError --- modules/images.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/modules/images.py b/modules/images.py index cf4aea22..34d08b73 100644 --- a/modules/images.py +++ b/modules/images.py @@ -575,9 +575,17 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i image.already_saved_as = fullfn - target_side_length = int(opts.target_side_length) + try: + target_side_length = int(opts.target_side_length) + except ValueError: + target_side_length = 4000 + try: + img_downscale_threshold = float(opts.img_downscale_threshold) + except ValueError: + img_downscale_threshold = 4 + oversize = image.width > target_side_length or image.height > target_side_length - if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024): + if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > img_downscale_threshold * 1024 * 1024): ratio = image.width / image.height if oversize and ratio > 1: From c8109f0dea0af10336597fecc200ff1e53b701d0 Mon Sep 17 00:00:00 2001 From: Kyle Date: Sun, 5 Feb 2023 15:18:18 -0500 Subject: [PATCH 07/53] Add Image CFG Scale to XYZ Grid --- scripts/xyz_grid.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 5982cfba..db4396b4 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -186,6 +186,7 @@ axis_options = [ AxisOption("Steps", int, apply_field("steps")), AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")), AxisOption("CFG Scale", float, apply_field("cfg_scale")), + AxisOption("Image CFG Scale", float, apply_field("image_cfg_scale")), AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), From 67303fd5fc7b1970d509e4afa576a905ed664955 Mon Sep 17 00:00:00 2001 From: Kyle Date: Sun, 5 Feb 2023 15:34:26 -0500 Subject: [PATCH 08/53] Img2Img Only Will still show up as an option with regular img2img models, but outputs no changes. --- scripts/xyz_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index db4396b4..1f29bf69 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -186,7 +186,7 @@ axis_options = [ AxisOption("Steps", int, apply_field("steps")), AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")), AxisOption("CFG Scale", float, apply_field("cfg_scale")), - AxisOption("Image CFG Scale", float, apply_field("image_cfg_scale")), + AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")), AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), From 7dd23973f7e7e3b116ce1a2ba427d409914bd921 Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Mon, 6 Feb 2023 00:28:31 +0300 Subject: [PATCH 09/53] Optionally append interrogated prompt in loopback script --- scripts/loopback.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/scripts/loopback.py b/scripts/loopback.py index 1dab9476..ec1f85e5 100644 --- a/scripts/loopback.py +++ b/scripts/loopback.py @@ -8,6 +8,7 @@ from modules import processing, shared, sd_samplers, images from modules.processing import Processed from modules.sd_samplers import samplers from modules.shared import opts, cmd_opts, state +from modules import deepbooru class Script(scripts.Script): @@ -20,10 +21,11 @@ class Script(scripts.Script): def ui(self, is_img2img): loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops")) denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=self.elem_id("denoising_strength_change_factor")) + append_interrogation = gr.Dropdown(label="Append interrogated prompt at each iteration", choices=["None", "CLIP", "DeepBooru"], value="None") - return [loops, denoising_strength_change_factor] + return [loops, denoising_strength_change_factor, append_interrogation] - def run(self, p, loops, denoising_strength_change_factor): + def run(self, p, loops, denoising_strength_change_factor, append_interrogation): processing.fix_seed(p) batch_count = p.n_iter p.extra_generation_params = { @@ -40,6 +42,7 @@ class Script(scripts.Script): grids = [] all_images = [] original_init_image = p.init_images + original_prompt = p.prompt state.job_count = loops * batch_count initial_color_corrections = [processing.setup_color_correction(p.init_images[0])] @@ -58,6 +61,13 @@ class Script(scripts.Script): if opts.img2img_color_correction: p.color_corrections = initial_color_corrections + if append_interrogation != "None": + p.prompt = original_prompt + ", " if original_prompt != "" else "" + if append_interrogation == "CLIP": + p.prompt += shared.interrogator.interrogate(p.init_images[0]) + elif append_interrogation == "DeepBooru": + p.prompt += deepbooru.model.tag(p.init_images[0]) + state.job = f"Iteration {i + 1}/{loops}, batch {n + 1}/{batch_count}" processed = processing.process_images(p) From 584f782391b42c182385a56ae46d1674649713e6 Mon Sep 17 00:00:00 2001 From: CurtisDS <20732674+CurtisDS@users.noreply.github.com> Date: Sun, 5 Feb 2023 16:42:45 -0500 Subject: [PATCH 10/53] Update ui_extra_networks.py update the string used to build the ID handle to replace spaces with underscore --- modules/ui_extra_networks.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 90abec0a..fb7f2d6c 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -94,11 +94,13 @@ class ExtraNetworksPage: dirs = "".join([f"
  • {x}
  • " for x in self.allowed_directories_for_previews()]) items_html = shared.html("extra-networks-no-cards.html").format(dirs=dirs) + self_name_id = self.name.replace(" ", "_") + res = f""" -
    +
    {subdirs_html}
    -
    +
    {items_html}
    """ From df8ee5f6b059b81bce35db05c27209df9009646e Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Mon, 6 Feb 2023 00:52:57 +0300 Subject: [PATCH 11/53] Update batch count/size hints --- javascript/hints.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 9aa82f24..f1199009 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -8,8 +8,8 @@ titles = { "DDIM": "Denoising Diffusion Implicit Models - best at inpainting", "DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution", - "Batch count": "How many batches of images to create", - "Batch size": "How many image to create in a single batch", + "Batch count": "How many batches of images to create (has no impact on generation performance or VRAM usage)", + "Batch size": "How many image to create in a single batch (increases generation performance at cost of higher VRAM usage)", "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results", "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result", "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time", From 9a22c63f47e895cfa17704ae18dd62fd3a831e9f Mon Sep 17 00:00:00 2001 From: EllangoK Date: Mon, 6 Feb 2023 00:52:31 -0500 Subject: [PATCH 12/53] call modules.sd_vae.refresh_vae_list() --- modules/shared_items.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_items.py b/modules/shared_items.py index 8b5ec96d..b72b2bae 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -20,4 +20,4 @@ def sd_vae_items(): def refresh_vae_list(): import modules.sd_vae - return modules.sd_vae.refresh_vae_list + return modules.sd_vae.refresh_vae_list() From 5d483bf307c766aee97caec857d414946fad47db Mon Sep 17 00:00:00 2001 From: Gerschel Date: Mon, 6 Feb 2023 08:18:04 -0800 Subject: [PATCH 13/53] aspect ratio for dim's; sliders adjust by ratio MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Default choices added to settings in user interface section Choices are editable by user User selects from dropdown. When you move one slider, the other adjusts according to the ratio chosen. Vice versa for the other slider. Number fields for changes work as well. For disabling ratio, an unlock pad "🔓" is available as a default. This string can be changed to anything to serve as a disable, as long as there is no colon ":". Ratios are entered in this format, floats or ints with a colon "1:1". The string is split at the colon, parses left and right as floats to perform the math. --- javascript/ComponentControllers.js | 257 +++++++++++++++++++++++++++++ javascript/aspectRatioSliders.js | 41 +++++ modules/shared.py | 14 ++ modules/ui.py | 6 + 4 files changed, 318 insertions(+) create mode 100644 javascript/ComponentControllers.js create mode 100644 javascript/aspectRatioSliders.js diff --git a/javascript/ComponentControllers.js b/javascript/ComponentControllers.js new file mode 100644 index 00000000..194589c7 --- /dev/null +++ b/javascript/ComponentControllers.js @@ -0,0 +1,257 @@ +/* This is a basic library that allows controlling elements that take some form of user input. + +This was previously written in typescript, where all controllers implemented an interface. Not +all methods were needed in all the controllers, but it was done to keep a common interface, so +your main app can serve as a controller of controllers. + +These controllers were built to work on the shapes of html elements that gradio components use. + +There may be some notes in it that only applied to my use case, but I left them to help others +along. + +You will need the parent element for these to work. +The parent element can be defined as the element (div) that gets the element id when assigning +an element id to a gradio component. + +Example: + gr.TextBox(value="...", elem_id="THISID") + +Basic usage, grab an element that is the parent container for the component. + +Send it in to the class, like a function, don't forget the "new" keyword so it calls the constructor +and sends back a new object. + +Example: + +let txt2imgPrompt = new TextComponentController(gradioApp().querySelector("#txt2img_prompt")) + +Then use the getVal() method to get the value, or use the setVal(myValue) method to set the value. + +Input types that are groups, like Checkbox groups (not individual checkboxes), take in an array of values. + +Checkbox group has to reset all values to False (unchecked), then set the values in your array to true (checked). +If you don't hold a reference to the values (the labels in string format), you can acquire them using the getVal() method. +*/ +class DropdownComponentController { + constructor(element) { + this.element = element; + this.childSelector = this.element.querySelector('select'); + this.children = new Map(); + Array.from(this.childSelector.querySelectorAll('option')).forEach(opt => this.children.set(opt.value, opt)); + } + getVal() { + return this.childSelector.value; + } + updateVal(optionElement) { + optionElement.selected = true; + } + setVal(name) { + this.updateVal(this.children.get(name)); + this.eventHandler(); + } + eventHandler() { + this.childSelector.dispatchEvent(new Event("change")); + } +} +class CheckboxComponentController { + constructor(element) { + this.element = element; + this.child = this.element.querySelector('input'); + } + getVal() { + return this.child.checked; + } + updateVal(checked) { + this.child.checked = checked; + } + setVal(checked) { + this.updateVal(checked); + this.eventHandler(); + } + eventHandler() { + this.child.dispatchEvent(new Event("change")); + } +} +class CheckboxGroupComponentController { + constructor(element) { + this.element = element; + //this.checkBoxes = new Object; + this.children = new Map(); + Array.from(this.element.querySelectorAll('input')).forEach(input => this.children.set(input.nextElementSibling.innerText, input)); + /* element id gets use fieldset, grab all inputs (the bool val) get the userfriendly label, use as key, put bool value in mapping */ + //Array.from(this.component.querySelectorAll("input")).forEach( _input => this.checkBoxes[_input.nextElementSibling.innerText] = _input) + /*Checkboxgroup structure +
    +
    css makes translucent + + serves as label for component + +
    container for checkboxes + + ... +
    +
    + */ + } + updateVal(label) { + /********* + calls updates using a throttle or else the backend does not get updated properly + * ********/ + setTimeout(() => this.conditionalToggle(true, this.children.get(label)), 2); + } + setVal(labels) { + /* Handles reset and updates all in array to true */ + this.reupdateVals(); + labels.forEach(l => this.updateVal(l)); + } + getVal() { + //return the list of values that are true + return [...this.children].filter(([k, v]) => v.checked).map(arr => arr[0]); + } + reupdateVals() { + /************** + * for reupdating all vals, first set to false + **************/ + this.children.forEach(inputChild => this.conditionalToggle(false, inputChild)); + } + conditionalToggle(desiredVal, inputChild) { + //This method behaves like 'set this value to this' + //Using element.checked = true/false, does not register the change, even if you called change afterwards, + // it only sets what it looks like in our case, because there is no form submit, a person then has to click on it twice. + //Options are to use .click() or dispatch an event + if (desiredVal != inputChild.checked) { + inputChild.dispatchEvent(new Event("change")); //using change event instead of click, in case browser ad-blockers blocks the click method + } + } + eventHandler(checkbox) { + checkbox.dispatchEvent(new Event("change")); + } +} +class RadioComponentController { + constructor(element) { + this.element = element; + this.children = new Map(); + Array.from(this.element.querySelectorAll("input")).forEach(input => this.children.set(input.value, input)); + } + getVal() { + //radio groups have a single element that's checked is true + // as array arr k,v pair element.checked ) -> array of len(1) with [k,v] so either [0] [1].value + return [...this.children].filter(([l, e]) => e.checked)[0][0]; + //return Array.from(this.children).filter( ([label, input]) => input.checked)[0][1].value + } + updateVal(child) { + this.eventHandler(child); + } + setVal(name) { + //radio will trigger all false except the one that get the event change + //to keep the api similar, other methods are still called + this.updateVal(this.children.get(name)); + } + eventHandler(child) { + child.dispatchEvent(new Event("change")); + } +} +class NumberComponentController { + constructor(element) { + this.element = element; + this.childNumField = element.querySelector('input[type=number]'); + } + getVal() { + return this.childNumField.value; + } + updateVal(text) { + this.childNumField.value = text; + } + eventHandler() { + this.element.dispatchEvent(new Event("input")); + } + setVal(text) { + this.updateVal(text); + this.eventHandler(); + } +} +class SliderComponentController { + constructor(element) { + this.element = element; + this.childNumField = this.element.querySelector('input[type=number]'); + this.childRangeField = this.element.querySelector('input[type=range]'); + } + getVal() { + return this.childNumField.value; + } + updateVal(text) { + //both are not needed, either works, both are left in so one is a fallback in case of gradio changes + this.childNumField.value = text; + this.childRangeField.value = text; + } + eventHandler() { + this.element.dispatchEvent(new Event("input")); + } + setVal(text) { + this.updateVal(text); + this.eventHandler(); + } +} +class TextComponentController { + constructor(element) { + this.element = element; + this.child = element.querySelector('textarea'); + } + getVal() { + return this.child.value; + } + eventHandler() { + this.element.dispatchEvent(new Event("input")); + this.child.dispatchEvent(new Event("change")); + //Workaround to solve no target with v(o) on eventhandler, define my own target + let ne = new Event("input"); + Object.defineProperty(ne, "target", { value: this.child }); + this.child.dispatchEvent(ne); + } + updateVal(text) { + this.child.value = text; + } + appendValue(text) { + //might add delimiter option + this.child.value += ` ${text}`; + } + setVal(text, append = false) { + if (append) { + this.appendValue(text); + } + else { + this.updateVal(text); + } + this.eventHandler(); + } +} +class JsonComponentController extends TextComponentController { + constructor(element) { + super(element); + } + getVal() { + return JSON.parse(this.child.value); + } +} +class ColorComponentController { + constructor(element) { + this.element = element; + this.child = this.element.querySelector('input[type=color]'); + } + updateVal(text) { + this.child.value = text; + } + getVal() { + return this.child.value; + } + setVal(text) { + this.updateVal(text); + this.eventHandler(); + } + eventHandler() { + this.child.dispatchEvent(new Event("input")); + } +} diff --git a/javascript/aspectRatioSliders.js b/javascript/aspectRatioSliders.js new file mode 100644 index 00000000..f577750a --- /dev/null +++ b/javascript/aspectRatioSliders.js @@ -0,0 +1,41 @@ +class AspectRatioSliderController { + constructor(widthSlider, heightSlider, ratioSource) { + this.widthSlider = new SliderComponentController(widthSlider); + this.heightSlider = new SliderComponentController(heightSlider); + this.ratioSource = new DropdownComponentController(ratioSource); + this.widthSlider.childRangeField.addEventListener("change", () => this.resize("width")); + this.widthSlider.childNumField.addEventListener("change", () => this.resize("width")); + this.heightSlider.childRangeField.addEventListener("change", () => this.resize("height")); + this.heightSlider.childNumField.addEventListener("change", () => this.resize("height")); + } + resize(dimension) { + let val = this.ratioSource.getVal(); + if (!val.includes(":")) { + return; + } + let [width, height] = val.split(":").map(Number); + let ratio = width / height; + if (dimension == 'width') { + this.heightSlider.setVal(Math.round(parseFloat(this.widthSlider.getVal()) / ratio).toString()); + } + else if (dimension == "height") { + this.widthSlider.setVal(Math.round(parseFloat(this.heightSlider.getVal()) * ratio).toString()); + } + } + static observeStartup(widthSliderId, heightSliderId, ratioSourceId) { + let observer = new MutationObserver(() => { + let widthSlider = document.querySelector("gradio-app").shadowRoot.getElementById(widthSliderId); + let heightSlider = document.querySelector("gradio-app").shadowRoot.getElementById(heightSliderId); + let ratioSource = document.querySelector("gradio-app").shadowRoot.getElementById(ratioSourceId); + if (widthSlider && heightSlider && ratioSource) { + observer.disconnect(); + new AspectRatioSliderController(widthSlider, heightSlider, ratioSource); + } + }); + observer.observe(gradioApp(), { childList: true, subtree: true }); + } +} +document.addEventListener("DOMContentLoaded", () => { + AspectRatioSliderController.observeStartup("txt2img_width", "txt2img_height", "txt2img_ratio"); + AspectRatioSliderController.observeStartup("img2img_width", "img2img_height", "img2img_ratio"); +}); diff --git a/modules/shared.py b/modules/shared.py index 79fbf724..ead7be36 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -139,6 +139,19 @@ ui_reorder_categories = [ "scripts", ] +aspect_ratio_defaults = [ + "🔓" + "1:1", + "1:2", + "2:1", + "2:3", + "3:2", + "4:3", + "5:4", + "9:16", + "16:9", +] + cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -456,6 +469,7 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), + "aspect_ratios": OptionInfo(", ".join(aspect_ratio_defaults), "txt2img/img2img aspect ratios"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), })) diff --git a/modules/ui.py b/modules/ui.py index f5df1ffe..6853485c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -424,6 +424,10 @@ def ordered_ui_categories(): yield category +def aspect_ratio_list(): + return [ratio.strip() for ratio in shared.opts.aspect_ratios.split(",")] + + def get_value_for_setting(key): value = getattr(opts, key) @@ -480,6 +484,7 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") + aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="txt2img_ratio", show_label=False, label="Aspect Ratio") if opts.dimensions_and_batch_together: with gr.Column(elem_id="txt2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") @@ -758,6 +763,7 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") + aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="img2img_ratio", show_label=False, label="Aspect Ratio") if opts.dimensions_and_batch_together: with gr.Column(elem_id="img2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") From 4738486d8f528a98a525970ac06a109431fd7344 Mon Sep 17 00:00:00 2001 From: brkirch Date: Mon, 6 Feb 2023 18:10:55 -0500 Subject: [PATCH 14/53] Support for hypernetworks with --upcast-sampling --- modules/hypernetworks/hypernetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 825a93b2..a15bae18 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -380,8 +380,8 @@ def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None): layer.hyper_k = hypernetwork_layers[0] layer.hyper_v = hypernetwork_layers[1] - context_k = hypernetwork_layers[0](context_k) - context_v = hypernetwork_layers[1](context_v) + context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k))) + context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v))) return context_k, context_v From 2016733814433ca2b69d10764bfa0ab4c7088782 Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 7 Feb 2023 00:05:54 -0500 Subject: [PATCH 15/53] Apply hijacks in ddpm_edit for upcast sampling To avoid import errors, ddpm_edit hijacks are done after an instruct pix2pix model is loaded. --- modules/sd_hijack.py | 3 +++ modules/sd_hijack_unet.py | 11 +++++++++++ 2 files changed, 14 insertions(+) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 8fdc5990..fca418cd 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -104,6 +104,9 @@ class StableDiffusionModelHijack: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) + if m.cond_stage_key == "edit": + sd_hijack_unet.hijack_ddpm_edit() + self.optimization_method = apply_optimizations() self.clip = m.cond_stage_model diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index 45cf2b18..843ab66c 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -44,6 +44,7 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs): with devices.autocast(): return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float() + class GELUHijack(torch.nn.GELU, torch.nn.Module): def __init__(self, *args, **kwargs): torch.nn.GELU.__init__(self, *args, **kwargs) @@ -53,6 +54,16 @@ class GELUHijack(torch.nn.GELU, torch.nn.Module): else: return torch.nn.GELU.forward(self, x) + +ddpm_edit_hijack = None +def hijack_ddpm_edit(): + global ddpm_edit_hijack + if not ddpm_edit_hijack: + CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond) + CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond) + ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) + + unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) From 4c562a9832343f21ce86410aea6b26904cb13b2c Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 8 Feb 2023 07:03:36 -0500 Subject: [PATCH 16/53] convert rgba to rgb some image format (e.g. jpg) do not support rgba --- modules/img2img.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/img2img.py b/modules/img2img.py index bcc158dc..c973b770 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -73,6 +73,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): if not save_normally: os.makedirs(output_dir, exist_ok=True) + if processed_image.mode == 'RGBA': + processed_image = processed_image.convert("RGB") processed_image.save(os.path.join(output_dir, filename)) From 3ee9ca5cb06bcb36a633aea0dbbc6fd2478921df Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 8 Feb 2023 07:08:09 -0500 Subject: [PATCH 17/53] add missing import used later in line 418 --- modules/esrgan_model_arch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index bc9ceb2a..1b52b0f5 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -1,5 +1,6 @@ # this file is adapted from https://github.com/victorca25/iNNfer +from collections import OrderedDict import math import functools import torch From 3ca41dbded119509bb66d7952d403091a5543257 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 8 Feb 2023 07:10:13 -0500 Subject: [PATCH 18/53] add missing import used later in line 70 --- modules/sd_hijack_inpainting.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 478cd499..55a2ce4d 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -11,6 +11,7 @@ import ldm.models.diffusion.plms from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like +from ldm.models.diffusion.sampling_util import norm_thresholding @torch.no_grad() From 374fe636b80169c78b4b5f92013681d75fa2fad6 Mon Sep 17 00:00:00 2001 From: Gerschel Date: Wed, 8 Feb 2023 18:57:32 -0800 Subject: [PATCH 19/53] Squashed commit of the following: commit b030b67ad005bfe29bcda692238a00042dcae816 Author: Gerschel Date: Wed Feb 8 16:38:56 2023 -0800 styling adjustements commit 80a2acb0230dd77489b0eb466f2efe827a053f6d Author: Gerschel Date: Wed Feb 8 10:49:47 2023 -0800 badge indicator toggles visibility by selection commit 898922e025a6422ac947fb45c1fa4f1109882f0a Merge: 745382a0 31bbfa72 Author: Gerschel <9631031+Gerschel@users.noreply.github.com> Date: Wed Feb 8 08:35:26 2023 -0800 Merge pull request #1 from w-e-w/Rounding-Method Rounding Method commit 31bbfa729a15ef35fa1f905345d3ba2b17b26ab9 Author: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed Feb 8 19:41:45 2023 +0900 use switch commit 85dbe511c33521d3ac62224bf0e0f3a48194ce63 Author: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed Feb 8 16:47:52 2023 +0900 Rounding Method commit 745382a0f4b8d16241545a3460d5206915959255 Author: Gerschel Date: Tue Feb 7 21:19:20 2023 -0800 default set to round commit 728579c618af30ec98a5af0991bd3f28bdaca399 Author: Gerschel Date: Tue Feb 7 21:17:03 2023 -0800 cleaned some commented code out; added indicator commit 5b288c24a1edd8a5c2f35214b9634316d05b8dae Author: Gerschel Date: Tue Feb 7 18:19:00 2023 -0800 needs cleaning; attempt at rounding commit d9f18ae92b929576b0b8c5f1ef8b3b38e441e381 Author: Gerschel Date: Tue Feb 7 15:46:25 2023 -0800 add rounding option in setting for aspect ratio commit af22106802c9e42205649e4c71c23fcf5b8c62f6 Author: Gerschel Date: Tue Feb 7 13:18:45 2023 -0800 added some ratios, sorted ratios by commonality commit 11e2fba73cffe8cdbf4cd0860641b94428ca0e74 Author: Gerschel Date: Tue Feb 7 10:46:53 2023 -0800 snaps to mulitples of 8 and along ratio commit fa00387e07460b10ee82671a1bfa8687e00ee60b Author: Gerschel Date: Mon Feb 6 14:54:59 2023 -0800 updated slidercomponentcontroller commit 8059bc111c3e2d1edb3314e05ab21b65120fa1dd Author: Gerschel Date: Mon Feb 6 14:29:11 2023 -0800 added step size adjustment on number field commit 641157b9f27a874a24ee7b0a854a092e9eac3eec Author: Gerschel Date: Mon Feb 6 14:12:03 2023 -0800 added return step size to default when ratio is disabled commit 5fb75ad28f2476f36100ec93922a8199adbd2a68 Author: Gerschel Date: Mon Feb 6 14:09:34 2023 -0800 added step size adjustment commit e33532883bc4709cd41c3775cbb646d1d5ab0584 Author: Gerschel Date: Mon Feb 6 11:56:15 2023 -0800 adjusted dropdown size, padding, text-align commit 81937329cee77f466c5a5b23c268d0c810128f84 Author: Gerschel Date: Mon Feb 6 11:39:57 2023 -0800 added positioning and styling commit 86eb4583782d92880a9a113a54ffbac9d92f3753 Author: Gerschel Date: Mon Feb 6 08:54:45 2023 -0800 fix typo in defaults; added preventDefault in event --- javascript/ComponentControllers.js | 2 + javascript/aspectRatioSliders.js | 164 ++++++++++++++++++++++++++--- modules/shared.py | 17 ++- modules/ui.py | 10 +- style.css | 46 ++++++++ 5 files changed, 218 insertions(+), 21 deletions(-) diff --git a/javascript/ComponentControllers.js b/javascript/ComponentControllers.js index 194589c7..2888679b 100644 --- a/javascript/ComponentControllers.js +++ b/javascript/ComponentControllers.js @@ -189,6 +189,8 @@ class SliderComponentController { } eventHandler() { this.element.dispatchEvent(new Event("input")); + this.childNumField.dispatchEvent(new Event("input")); + this.childRangeField.dispatchEvent(new Event("input")); } setVal(text) { this.updateVal(text); diff --git a/javascript/aspectRatioSliders.js b/javascript/aspectRatioSliders.js index f577750a..d9c4f675 100644 --- a/javascript/aspectRatioSliders.js +++ b/javascript/aspectRatioSliders.js @@ -1,14 +1,61 @@ class AspectRatioSliderController { - constructor(widthSlider, heightSlider, ratioSource) { + constructor(widthSlider, heightSlider, ratioSource, roundingSource, roundingMethod) { + //References this.widthSlider = new SliderComponentController(widthSlider); this.heightSlider = new SliderComponentController(heightSlider); this.ratioSource = new DropdownComponentController(ratioSource); - this.widthSlider.childRangeField.addEventListener("change", () => this.resize("width")); - this.widthSlider.childNumField.addEventListener("change", () => this.resize("width")); - this.heightSlider.childRangeField.addEventListener("change", () => this.resize("height")); - this.heightSlider.childNumField.addEventListener("change", () => this.resize("height")); + this.roundingSource = new CheckboxComponentController(roundingSource); + this.roundingMethod = new RadioComponentController(roundingMethod); + this.roundingIndicatorBadge = document.createElement("div"); + // Badge implementation + this.roundingIndicatorBadge.innerText = "📐"; + this.roundingIndicatorBadge.classList.add("rounding-badge"); + this.ratioSource.element.appendChild(this.roundingIndicatorBadge); + // Check initial value of ratioSource to set badge visbility + let initialRatio = this.ratioSource.getVal(); + if (!initialRatio.includes(":")) { + this.roundingIndicatorBadge.style.display = "none"; + } + //Adjust badge icon if rounding is on + if (this.roundingSource.getVal()) { + this.roundingIndicatorBadge.classList.add("active"); + this.roundingIndicatorBadge.innerText = "⚠️"; + } + //Make badge clickable to toggle setting + this.roundingIndicatorBadge.addEventListener("click", () => { + this.roundingSource.setVal(!this.roundingSource.getVal()); + }); + //Make rounding setting toggle badge text and style if setting changes + this.roundingSource.child.addEventListener("change", () => { + if (this.roundingSource.getVal()) { + this.roundingIndicatorBadge.classList.add("active"); + this.roundingIndicatorBadge.innerText = "⚠️"; + } + else { + this.roundingIndicatorBadge.classList.remove("active"); + this.roundingIndicatorBadge.innerText = "📐"; + } + this.adjustStepSize(); + }); + //Other event listeners + this.widthSlider.childRangeField.addEventListener("change", (e) => { e.preventDefault(); this.resize("width"); }); + this.widthSlider.childNumField.addEventListener("change", (e) => { e.preventDefault(); this.resize("width"); }); + this.heightSlider.childRangeField.addEventListener("change", (e) => { e.preventDefault(); this.resize("height"); }); + this.heightSlider.childNumField.addEventListener("change", (e) => { e.preventDefault(); this.resize("height"); }); + this.ratioSource.childSelector.addEventListener("change", (e) => { + e.preventDefault(); + //Check and toggle display of badge conditionally on dropdown selection + if (!this.ratioSource.getVal().includes(":")) { + this.roundingIndicatorBadge.style.display = 'none'; + } + else { + this.roundingIndicatorBadge.style.display = 'block'; + } + this.adjustStepSize(); + }); } resize(dimension) { + //For moving slider or number field let val = this.ratioSource.getVal(); if (!val.includes(":")) { return; @@ -16,26 +63,119 @@ class AspectRatioSliderController { let [width, height] = val.split(":").map(Number); let ratio = width / height; if (dimension == 'width') { - this.heightSlider.setVal(Math.round(parseFloat(this.widthSlider.getVal()) / ratio).toString()); + let newHeight = parseInt(this.widthSlider.getVal()) / ratio; + if (this.roundingSource.getVal()) { + switch (this.roundingMethod.getVal()) { + case 'Round': + newHeight = Math.round(newHeight / 8) * 8; + break; + case 'Ceiling': + newHeight = Math.ceil(newHeight / 8) * 8; + break; + case 'Floor': + newHeight = Math.floor(newHeight / 8) * 8; + break; + } + } + this.heightSlider.setVal(newHeight.toString()); } else if (dimension == "height") { - this.widthSlider.setVal(Math.round(parseFloat(this.heightSlider.getVal()) * ratio).toString()); + let newWidth = parseInt(this.heightSlider.getVal()) * ratio; + if (this.roundingSource.getVal()) { + switch (this.roundingMethod.getVal()) { + case 'Round': + newWidth = Math.round(newWidth / 8) * 8; + break; + case 'Ceiling': + newWidth = Math.ceil(newWidth / 8) * 8; + break; + case 'Floor': + newWidth = Math.floor(newWidth / 8) * 8; + break; + } + } + this.widthSlider.setVal(newWidth.toString()); } } - static observeStartup(widthSliderId, heightSliderId, ratioSourceId) { + adjustStepSize() { + /* Sets scales/precision/rounding steps;*/ + let val = this.ratioSource.getVal(); + if (!val.includes(":")) { + //If ratio unlocked + this.widthSlider.childRangeField.step = "8"; + this.widthSlider.childRangeField.min = "64"; + this.widthSlider.childNumField.step = "8"; + this.widthSlider.childNumField.min = "64"; + this.heightSlider.childRangeField.step = "8"; + this.heightSlider.childRangeField.min = "64"; + this.heightSlider.childNumField.step = "8"; + this.heightSlider.childNumField.min = "64"; + return; + } + //Format string and calculate step sizes + let [width, height] = val.split(":").map(Number); + let decimalPlaces = (width.toString().split(".")[1] || []).length; + //keep upto 6 decimal points of precision of ratio + //euclidean gcd does not support floats, so we scale it up + decimalPlaces = decimalPlaces > 6 ? 6 : decimalPlaces; + let gcd = this.gcd(width * 10 ** decimalPlaces, height * 10 ** decimalPlaces) / 10 ** decimalPlaces; + let stepSize = 8 * height / gcd; + let stepSizeOther = 8 * width / gcd; + if (this.roundingSource.getVal()) { + //If rounding is on set/keep default stepsizes + this.widthSlider.childRangeField.step = "8"; + this.widthSlider.childRangeField.min = "64"; + this.widthSlider.childNumField.step = "8"; + this.widthSlider.childNumField.min = "64"; + this.heightSlider.childRangeField.step = "8"; + this.heightSlider.childRangeField.min = "64"; + this.heightSlider.childNumField.step = "8"; + this.heightSlider.childNumField.min = "64"; + } + else { + //if rounding is off, set step sizes so they enforce snapping + //min is changed, because it offsets snap positions + this.widthSlider.childRangeField.step = stepSizeOther.toString(); + this.widthSlider.childRangeField.min = stepSizeOther.toString(); + this.widthSlider.childNumField.step = stepSizeOther.toString(); + this.widthSlider.childNumField.min = stepSizeOther.toString(); + this.heightSlider.childRangeField.step = stepSize.toString(); + this.heightSlider.childRangeField.min = stepSize.toString(); + this.heightSlider.childNumField.step = stepSize.toString(); + this.heightSlider.childNumField.min = stepSize.toString(); + } + let currentWidth = parseInt(this.widthSlider.getVal()); + //Rounding treated kinda like pythons divmod + let stepsTaken = Math.round(currentWidth / stepSizeOther); + //this snaps it to closest rule matches (rules being html step points, and ratio) + let newWidth = stepsTaken * stepSizeOther; + this.widthSlider.setVal(newWidth.toString()); + this.heightSlider.setVal(Math.round(newWidth / (width / height)).toString()); + } + gcd(a, b) { + //euclidean gcd + if (b === 0) { + return a; + } + return this.gcd(b, a % b); + } + static observeStartup(widthSliderId, heightSliderId, ratioSourceId, roundingSourceId, roundingMethodId) { let observer = new MutationObserver(() => { let widthSlider = document.querySelector("gradio-app").shadowRoot.getElementById(widthSliderId); let heightSlider = document.querySelector("gradio-app").shadowRoot.getElementById(heightSliderId); let ratioSource = document.querySelector("gradio-app").shadowRoot.getElementById(ratioSourceId); - if (widthSlider && heightSlider && ratioSource) { + let roundingSource = document.querySelector("gradio-app").shadowRoot.getElementById(roundingSourceId); + let roundingMethod = document.querySelector("gradio-app").shadowRoot.getElementById(roundingMethodId); + if (widthSlider && heightSlider && ratioSource && roundingSource && roundingMethod) { observer.disconnect(); - new AspectRatioSliderController(widthSlider, heightSlider, ratioSource); + new AspectRatioSliderController(widthSlider, heightSlider, ratioSource, roundingSource, roundingMethod); } }); observer.observe(gradioApp(), { childList: true, subtree: true }); } } document.addEventListener("DOMContentLoaded", () => { - AspectRatioSliderController.observeStartup("txt2img_width", "txt2img_height", "txt2img_ratio"); - AspectRatioSliderController.observeStartup("img2img_width", "img2img_height", "img2img_ratio"); + //Register mutation observer for self start-up; + AspectRatioSliderController.observeStartup("txt2img_width", "txt2img_height", "txt2img_ratio", "setting_aspect_ratios_rounding", "setting_aspect_ratios_rounding_method"); + AspectRatioSliderController.observeStartup("img2img_width", "img2img_height", "img2img_ratio", "setting_aspect_ratios_rounding", "setting_aspect_ratios_rounding_method"); }); diff --git a/modules/shared.py b/modules/shared.py index ead7be36..fcd6eadf 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -140,16 +140,21 @@ ui_reorder_categories = [ ] aspect_ratio_defaults = [ - "🔓" + "🔓", "1:1", - "1:2", - "2:1", - "2:3", "3:2", "4:3", "5:4", - "9:16", "16:9", + "9:16", + "1.85:1", + "2.35:1", + "2.39:1", + "2.40:1", + "21:9", + "1.375:1", + "1.66:1", + "1.75:1" ] cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access @@ -469,6 +474,8 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), + "aspect_ratios_rounding": OptionInfo(True, "Round aspect ratios for more flexibility?", gr.Checkbox), + "aspect_ratios_rounding_method": OptionInfo("Ceiling", "Aspect ratios rounding method", gr.Radio,{"choices": ["Round", "Ceiling", "Floor"]}), "aspect_ratios": OptionInfo(", ".join(aspect_ratio_defaults), "txt2img/img2img aspect ratios"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), diff --git a/modules/ui.py b/modules/ui.py index 6853485c..873c857a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -483,8 +483,9 @@ def create_ui(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") - aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="txt2img_ratio", show_label=False, label="Aspect Ratio") + with gr.Column(elem_id="txt2img_size_toolbox", scale=0): + aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="txt2img_ratio", show_label=False, label="Aspect Ratio") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") if opts.dimensions_and_batch_together: with gr.Column(elem_id="txt2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") @@ -762,8 +763,9 @@ def create_ui(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="img2img_ratio", show_label=False, label="Aspect Ratio") + with gr.Column(elem_id="img2img_size_toolbox", scale=0): + aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="img2img_ratio", show_label=False, label="Aspect Ratio") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") if opts.dimensions_and_batch_together: with gr.Column(elem_id="img2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") diff --git a/style.css b/style.css index 05572f66..5b979841 100644 --- a/style.css +++ b/style.css @@ -747,6 +747,52 @@ footer { margin-left: 0em; } +#txt2img_size_toolbox, #img2img_size_toolbox{ + min-width: unset !important; + gap: 0; +} + +#txt2img_ratio, #img2img_ratio { + padding: 0px; + min-width: unset; + max-width: fit-content; +} +#txt2img_ratio select, #img2img_ratio select{ + -o-appearance: none; + -ms-appearance: none; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + background-image: unset; + padding-right: unset; + min-width: 40px; + max-width: 40px; + min-height: 40px; + max-height: 40px; + line-height: 40px; + padding: 0; + text-align: center; +} +.rounding-badge { + display: inline-block; + border-radius: 0px; + background-color: #ccc; + cursor: pointer; + position: absolute; + top: -10px; + right: -10px; + width: 20px; + height: 20px; + padding: 1px; + line-height: 16px; + font-size: 14px; +} + +.rounding-badge.active { + background-color: #007bff; + border-radius: 50%; +} + .inactive{ opacity: 0.5; } From 463ab841803b45ea421ad7f9769b836f3000ef8c Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 9 Feb 2023 02:13:49 -0700 Subject: [PATCH 20/53] Convert 16-bit greyscale to 8-bit when saving as JPEG --- modules/images.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/images.py b/modules/images.py index c2ca8849..c0c68913 100644 --- a/modules/images.py +++ b/modules/images.py @@ -553,6 +553,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i elif extension.lower() in (".jpg", ".jpeg", ".webp"): if image_to_save.mode == 'RGBA': image_to_save = image_to_save.convert("RGB") + elif image_to_save.mode == 'I;16': + image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("L") image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality) From b313221ca6d12e441f2f5041490e2fc665ff5f60 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 10 Feb 2023 08:34:21 +0900 Subject: [PATCH 21/53] =?UTF-8?q?remove=20Badge=20color=20and=20=E2=9A=A0?= =?UTF-8?q?=EF=B8=8F=20->=F0=9F=93=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- javascript/aspectRatioSliders.js | 10 +++++----- style.css | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/javascript/aspectRatioSliders.js b/javascript/aspectRatioSliders.js index d9c4f675..3def5158 100644 --- a/javascript/aspectRatioSliders.js +++ b/javascript/aspectRatioSliders.js @@ -18,8 +18,8 @@ class AspectRatioSliderController { } //Adjust badge icon if rounding is on if (this.roundingSource.getVal()) { - this.roundingIndicatorBadge.classList.add("active"); - this.roundingIndicatorBadge.innerText = "⚠️"; + //this.roundingIndicatorBadge.classList.add("active"); + this.roundingIndicatorBadge.innerText = "📏"; } //Make badge clickable to toggle setting this.roundingIndicatorBadge.addEventListener("click", () => { @@ -28,11 +28,11 @@ class AspectRatioSliderController { //Make rounding setting toggle badge text and style if setting changes this.roundingSource.child.addEventListener("change", () => { if (this.roundingSource.getVal()) { - this.roundingIndicatorBadge.classList.add("active"); - this.roundingIndicatorBadge.innerText = "⚠️"; + //this.roundingIndicatorBadge.classList.add("active"); + this.roundingIndicatorBadge.innerText = "📏"; } else { - this.roundingIndicatorBadge.classList.remove("active"); + //this.roundingIndicatorBadge.classList.remove("active"); this.roundingIndicatorBadge.innerText = "📐"; } this.adjustStepSize(); diff --git a/style.css b/style.css index 5b979841..55baefb7 100644 --- a/style.css +++ b/style.css @@ -776,7 +776,7 @@ footer { .rounding-badge { display: inline-block; border-radius: 0px; - background-color: #ccc; + /*background-color: #ccc;*/ cursor: pointer; position: absolute; top: -10px; From 73a97cac11456adcda05872364c605ebcd3982ad Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 9 Feb 2023 17:04:55 -0700 Subject: [PATCH 22/53] Use RGB for webp Doesn't support greyscale (L) --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index c0c68913..b335502b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -554,7 +554,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i if image_to_save.mode == 'RGBA': image_to_save = image_to_save.convert("RGB") elif image_to_save.mode == 'I;16': - image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("L") + image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L") image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality) From 33947a3c6631179bba02285f09b5853b9ecf5782 Mon Sep 17 00:00:00 2001 From: minux302 Date: Fri, 10 Feb 2023 17:58:35 +0900 Subject: [PATCH 23/53] fix arg for hypernetwork train api --- modules/api/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index eb7b1da5..5a9ac5f1 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -498,7 +498,7 @@ class Api: if not apply_optimizations: sd_hijack.undo_optimizations() try: - hypernetwork, filename = train_hypernetwork(*args) + hypernetwork, filename = train_hypernetwork(**args) except Exception as e: error = e finally: From bf9b1d64a3101b592713f584d5ef0533b6df1e0f Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 10 Feb 2023 15:27:08 -0700 Subject: [PATCH 24/53] Fix face restorers setting --- modules/shared.py | 2 +- webui.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 79fbf724..8bc6923a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -364,7 +364,7 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { })) options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), + "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), })) diff --git a/webui.py b/webui.py index 5b5c2139..077c10be 100644 --- a/webui.py +++ b/webui.py @@ -97,7 +97,6 @@ def initialize(): modules.sd_models.setup_model() codeformer.setup_model(cmd_opts.codeformer_models_path) gfpgan.setup_model(cmd_opts.gfpgan_models_path) - shared.face_restorers.append(modules.face_restoration.FaceRestoration()) modelloader.list_builtin_upscalers() modules.scripts.load_scripts() From 9e27af76d14dc6d8a5062ab9c0db128a917ada17 Mon Sep 17 00:00:00 2001 From: RcINS Date: Sat, 11 Feb 2023 10:12:16 +0800 Subject: [PATCH 25/53] Fix DPM++ SDE not deterministic across different batch sizes (#5210) --- modules/sd_samplers_kdiffusion.py | 37 ++++++++++++++++++++++++------- modules/shared.py | 1 + 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index f076fc55..d143d41e 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -269,6 +269,15 @@ class KDiffusionSampler: return sigmas + def create_noise_sampler(self, x, sigmas, seeds): + """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" + if shared.opts.no_dpmpp_sde_batch_determinism: + return None + + from k_diffusion.sampling import BrownianTreeNoiseSampler + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seeds) + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) @@ -278,18 +287,24 @@ class KDiffusionSampler: xi = x + noise * sigma_sched[0] extra_params_kwargs = self.initialize(p) - if 'sigma_min' in inspect.signature(self.func).parameters: + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in inspect.signature(self.func).parameters: + if 'sigma_max' in parameters: extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in inspect.signature(self.func).parameters: + if 'n' in parameters: extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in inspect.signature(self.func).parameters: + if 'sigma_sched' in parameters: extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in inspect.signature(self.func).parameters: + if 'sigmas' in parameters: extra_params_kwargs['sigmas'] = sigma_sched + if self.funcname == 'sample_dpmpp_sde': + noise_sampler = self.create_noise_sampler(x, sigmas, p.all_seeds) + extra_params_kwargs['noise_sampler'] = noise_sampler + self.model_wrap_cfg.init_latent = x self.last_latent = x extra_args={ @@ -303,7 +318,7 @@ class KDiffusionSampler: return samples - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None): + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps = steps or p.steps sigmas = self.get_sigmas(p, steps) @@ -311,14 +326,20 @@ class KDiffusionSampler: x = x * sigmas[0] extra_params_kwargs = self.initialize(p) - if 'sigma_min' in inspect.signature(self.func).parameters: + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in inspect.signature(self.func).parameters: + if 'n' in parameters: extra_params_kwargs['n'] = steps else: extra_params_kwargs['sigmas'] = sigmas + if self.funcname == 'sample_dpmpp_sde': + noise_sampler = self.create_noise_sampler(x, sigmas, p.all_seeds) + extra_params_kwargs['noise_sampler'] = noise_sampler + self.last_latent = x samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ 'cond': conditioning, diff --git a/modules/shared.py b/modules/shared.py index 79fbf724..22344431 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -414,6 +414,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { options_templates.update(options_section(('compatibility', "Compatibility"), { "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), + "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), })) From b78c5e87baaf8c88d039bf60082c3b5ae35ec4ff Mon Sep 17 00:00:00 2001 From: opparco Date: Sat, 11 Feb 2023 11:18:38 +0900 Subject: [PATCH 26/53] Add cfg_denoised_callback --- modules/script_callbacks.py | 29 +++++++++++++++++++++++++++++ modules/sd_samplers_kdiffusion.py | 4 ++++ 2 files changed, 33 insertions(+) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 4bb45ec7..edd0e2a7 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -46,6 +46,18 @@ class CFGDenoiserParams: """Total number of sampling steps planned""" +class CFGDenoisedParams: + def __init__(self, x, sampling_step, total_sampling_steps): + self.x = x + """Latent image representation in the process of being denoised""" + + self.sampling_step = sampling_step + """Current Sampling step number""" + + self.total_sampling_steps = total_sampling_steps + """Total number of sampling steps planned""" + + class UiTrainTabParams: def __init__(self, txt2img_preview_params): self.txt2img_preview_params = txt2img_preview_params @@ -68,6 +80,7 @@ callback_map = dict( callbacks_before_image_saved=[], callbacks_image_saved=[], callbacks_cfg_denoiser=[], + callbacks_cfg_denoised=[], callbacks_before_component=[], callbacks_after_component=[], callbacks_image_grid=[], @@ -150,6 +163,14 @@ def cfg_denoiser_callback(params: CFGDenoiserParams): report_exception(c, 'cfg_denoiser_callback') +def cfg_denoised_callback(params: CFGDenoisedParams): + for c in callback_map['callbacks_cfg_denoised']: + try: + c.callback(params) + except Exception: + report_exception(c, 'cfg_denoised_callback') + + def before_component_callback(component, **kwargs): for c in callback_map['callbacks_before_component']: try: @@ -283,6 +304,14 @@ def on_cfg_denoiser(callback): add_callback(callback_map['callbacks_cfg_denoiser'], callback) +def on_cfg_denoised(callback): + """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. + The callback is called with one argument: + - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details. + """ + add_callback(callback_map['callbacks_cfg_denoised'], callback) + + def on_before_component(callback): """register a function to be called before a component is created. The callback is called with arguments: diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index f076fc55..28847397 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -8,6 +8,7 @@ from modules import prompt_parser, devices, sd_samplers_common from modules.shared import opts, state import modules.shared as shared from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback samplers_k_diffusion = [ ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}), @@ -136,6 +137,9 @@ class CFGDenoiser(torch.nn.Module): x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]}) + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) + cfg_denoised_callback(denoised_params) + devices.test_for_nans(x_out, "unet") if opts.live_preview_content == "Prompt": From 0a4917ac4021eb6cf0da27c060c13bdd5b5d2a9f Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Mon, 13 Feb 2023 03:33:28 -0800 Subject: [PATCH 27/53] Apply extra networks per-batch instead of per-session (fixes wildcards) --- modules/processing.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index e1b53ac0..e4b989d4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -543,8 +543,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() - _, extra_network_data = extra_networks.parse_prompts(p.all_prompts[0:1]) - if p.scripts is not None: p.scripts.process(p) @@ -582,9 +580,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": sd_vae_approx.model() - if not p.disable_extra_networks: - extra_networks.activate(p, extra_network_data) - with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) @@ -609,7 +604,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if len(prompts) == 0: break - prompts, _ = extra_networks.parse_prompts(prompts) + prompts, extra_network_data = extra_networks.parse_prompts(prompts) + + if not p.disable_extra_networks: + with devices.autocast(): + extra_networks.activate(p, extra_network_data) if p.scripts is not None: p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) From 7893533674e37de258d647f22b06430e0f924bd7 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Mon, 13 Feb 2023 11:04:34 -0500 Subject: [PATCH 28/53] add version to extensions table --- modules/extensions.py | 6 ++++++ modules/ui_extensions.py | 8 +++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index 5e12b1aa..1975fca1 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -2,6 +2,7 @@ import os import sys import traceback +import time import git from modules import paths, shared @@ -25,6 +26,7 @@ class Extension: self.status = '' self.can_update = False self.is_builtin = is_builtin + self.version = '' repo = None try: @@ -40,6 +42,10 @@ class Extension: try: self.remote = next(repo.remote().urls, None) self.status = 'unknown' + head = repo.head.commit + ts = time.asctime(time.gmtime(repo.head.commit.committed_date)) + self.version = f'{head.hexsha[:7]} ({ts})' + except Exception: self.remote = None diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 37d30e1f..bd4308ef 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -80,6 +80,7 @@ def extension_table(): Extension URL + Version Update @@ -87,11 +88,7 @@ def extension_table(): """ for ext in extensions.extensions: - remote = "" - if ext.is_builtin: - remote = "built-in" - elif ext.remote: - remote = f"""{html.escape("built-in" if ext.is_builtin else ext.remote or '')}""" + remote = f"""{html.escape("built-in" if ext.is_builtin else ext.remote or '')}""" if ext.can_update: ext_status = f"""""" @@ -102,6 +99,7 @@ def extension_table(): {remote} + {ext.version} {ext_status} """ From 1615f786eeb5e407f7414835fbb73e7b6f8337de Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 14 Feb 2023 20:54:02 -0700 Subject: [PATCH 29/53] Download model if none are found --- README.md | 3 +-- modules/sd_models.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2149dcc5..f0dcb104 100644 --- a/README.md +++ b/README.md @@ -104,8 +104,7 @@ Alternatively, use online services (like Google Colab): 1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH" 2. Install [git](https://git-scm.com/download/win). 3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. -4. Place stable diffusion checkpoint (`model.ckpt`) in the `models/Stable-diffusion` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it). -5. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. +4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. ### Automatic Installation on Linux 1. Install the dependencies: diff --git a/modules/sd_models.py b/modules/sd_models.py index d847d358..07072e5c 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -105,7 +105,7 @@ def checkpoint_tiles(): def list_models(): checkpoints_list.clear() checkpoint_alisases.clear() - model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], ext_blacklist=[".vae.safetensors"]) + model_list = modelloader.load_models(model_path=model_path, model_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors", command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.safetensors"]) cmd_ckpt = shared.cmd_opts.ckpt if os.path.exists(cmd_ckpt): From f55a7e04d812e8cb07d622efb321abbad54d2d4a Mon Sep 17 00:00:00 2001 From: RcINS Date: Wed, 15 Feb 2023 16:57:18 +0800 Subject: [PATCH 30/53] Fix error when batch count > 1 --- modules/sd_samplers_kdiffusion.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index d143d41e..86d657e2 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -269,14 +269,15 @@ class KDiffusionSampler: return sigmas - def create_noise_sampler(self, x, sigmas, seeds): + def create_noise_sampler(self, x, sigmas, p): """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" if shared.opts.no_dpmpp_sde_batch_determinism: return None from k_diffusion.sampling import BrownianTreeNoiseSampler sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seeds) + current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) @@ -302,7 +303,7 @@ class KDiffusionSampler: extra_params_kwargs['sigmas'] = sigma_sched if self.funcname == 'sample_dpmpp_sde': - noise_sampler = self.create_noise_sampler(x, sigmas, p.all_seeds) + noise_sampler = self.create_noise_sampler(x, sigmas, p) extra_params_kwargs['noise_sampler'] = noise_sampler self.model_wrap_cfg.init_latent = x @@ -337,7 +338,7 @@ class KDiffusionSampler: extra_params_kwargs['sigmas'] = sigmas if self.funcname == 'sample_dpmpp_sde': - noise_sampler = self.create_noise_sampler(x, sigmas, p.all_seeds) + noise_sampler = self.create_noise_sampler(x, sigmas, p) extra_params_kwargs['noise_sampler'] = noise_sampler self.last_latent = x From c4bfd20f317243d7ceac6e2fbf30b18bbebd3e6d Mon Sep 17 00:00:00 2001 From: Shondoit Date: Thu, 12 Jan 2023 15:03:46 +0100 Subject: [PATCH 31/53] Hijack to add weighted_forward to model: return loss * weight map --- modules/sd_hijack.py | 52 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 8fdc5990..57ed5635 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -1,5 +1,6 @@ import torch from torch.nn.functional import silu +from types import MethodType import modules.textual_inversion.textual_inversion from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint @@ -76,6 +77,54 @@ def fix_checkpoint(): pass +def weighted_loss(sd_model, pred, target, mean=True): + #Calculate the weight normally, but ignore the mean + loss = sd_model._old_get_loss(pred, target, mean=False) + + #Check if we have weights available + weight = getattr(sd_model, '_custom_loss_weight', None) + if weight is not None: + loss *= weight + + #Return the loss, as mean if specified + return loss.mean() if mean else loss + +def weighted_forward(sd_model, x, c, w, *args, **kwargs): + try: + #Temporarily append weights to a place accessible during loss calc + sd_model._custom_loss_weight = w + + #Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely + #Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set + if not hasattr(sd_model, '_old_get_loss'): + sd_model._old_get_loss = sd_model.get_loss + sd_model.get_loss = MethodType(weighted_loss, sd_model) + + #Run the standard forward function, but with the patched 'get_loss' + return sd_model.forward(x, c, *args, **kwargs) + finally: + try: + #Delete temporary weights if appended + del sd_model._custom_loss_weight + except AttributeError as e: + pass + + #If we have an old loss function, reset the loss function to the original one + if hasattr(sd_model, '_old_get_loss'): + sd_model.get_loss = sd_model._old_get_loss + del sd_model._old_get_loss + +def apply_weighted_forward(sd_model): + #Add new function 'weighted_forward' that can be called to calc weighted loss + sd_model.weighted_forward = MethodType(weighted_forward, sd_model) + +def undo_weighted_forward(sd_model): + try: + del sd_model.weighted_forward + except AttributeError as e: + pass + + class StableDiffusionModelHijack: fixes = None comments = [] @@ -104,6 +153,8 @@ class StableDiffusionModelHijack: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) + apply_weighted_forward(m) + self.optimization_method = apply_optimizations() self.clip = m.cond_stage_model @@ -132,6 +183,7 @@ class StableDiffusionModelHijack: m.cond_stage_model = m.cond_stage_model.wrapped undo_optimizations() + undo_weighted_forward(m) self.apply_circular(False) self.layers = None From 21642000b33a3069e3408ea1a50239006176badb Mon Sep 17 00:00:00 2001 From: Shondoit Date: Thu, 12 Jan 2023 15:29:19 +0100 Subject: [PATCH 32/53] Add PNG alpha channel as weight maps to data entries --- modules/textual_inversion/dataset.py | 51 +++++++++++++++++++++------- 1 file changed, 38 insertions(+), 13 deletions(-) diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index d31963d4..f4ce4552 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -19,9 +19,10 @@ re_numbers_at_start = re.compile(r"^[-\d]+\s*") class DatasetEntry: - def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None): + def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None, weight=None): self.filename = filename self.filename_text = filename_text + self.weight = weight self.latent_dist = latent_dist self.latent_sample = latent_sample self.cond = cond @@ -56,10 +57,16 @@ class PersonalizedBase(Dataset): print("Preparing dataset...") for path in tqdm.tqdm(self.image_paths): + alpha_channel = None if shared.state.interrupted: raise Exception("interrupted") try: - image = Image.open(path).convert('RGB') + image = Image.open(path) + #Currently does not work for single color transparency + #We would need to read image.info['transparency'] for that + if 'A' in image.getbands(): + alpha_channel = image.getchannel('A') + image = image.convert('RGB') if not varsize: image = image.resize((width, height), PIL.Image.BICUBIC) except Exception: @@ -87,17 +94,33 @@ class PersonalizedBase(Dataset): with devices.autocast(): latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0)) - if latent_sampling_method == "once" or (latent_sampling_method == "deterministic" and not isinstance(latent_dist, DiagonalGaussianDistribution)): - latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) - latent_sampling_method = "once" - entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample) - elif latent_sampling_method == "deterministic": - # Works only for DiagonalGaussianDistribution - latent_dist.std = 0 - latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) - entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample) - elif latent_sampling_method == "random": - entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist) + #Perform latent sampling, even for random sampling. + #We need the sample dimensions for the weights + if latent_sampling_method == "deterministic": + if isinstance(latent_dist, DiagonalGaussianDistribution): + # Works only for DiagonalGaussianDistribution + latent_dist.std = 0 + else: + latent_sampling_method = "once" + latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) + + if alpha_channel is not None: + channels, *latent_size = latent_sample.shape + weight_img = alpha_channel.resize(latent_size) + npweight = np.array(weight_img).astype(np.float32) + #Repeat for every channel in the latent sample + weight = torch.tensor([npweight] * channels).reshape([channels] + latent_size) + #Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default. + weight -= weight.min() + weight /= weight.mean() + else: + #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later + weight = torch.ones([channels] + latent_size) + + if latent_sampling_method == "random": + entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight) + else: + entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample, weight=weight) if not (self.tag_drop_out != 0 or self.shuffle_tags): entry.cond_text = self.create_text(filename_text) @@ -110,6 +133,7 @@ class PersonalizedBase(Dataset): del torchdata del latent_dist del latent_sample + del weight self.length = len(self.dataset) self.groups = list(groups.values()) @@ -195,6 +219,7 @@ class BatchLoader: self.cond_text = [entry.cond_text for entry in data] self.cond = [entry.cond for entry in data] self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1) + self.weight = torch.stack([entry.weight for entry in data]).squeeze(1) #self.emb_index = [entry.emb_index for entry in data] #print(self.latent_sample.device) From bc50936745e1a349afdc28cf1540109ba20bc71a Mon Sep 17 00:00:00 2001 From: Shondoit Date: Thu, 12 Jan 2023 15:34:11 +0100 Subject: [PATCH 33/53] Call weighted_forward during training --- modules/hypernetworks/hypernetwork.py | 3 ++- modules/textual_inversion/textual_inversion.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 825a93b2..9c79b7d0 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -640,13 +640,14 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) + w = batch.weight.to(devices.device, non_blocking=pin_memory) if tag_drop_out != 0 or shuffle_tags: shared.sd_model.cond_stage_model.to(devices.device) c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory) shared.sd_model.cond_stage_model.to(devices.cpu) else: c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) - loss = shared.sd_model(x, c)[0] / gradient_step + loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step del x del c diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index a1a406c2..8853c868 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -480,6 +480,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) + w = batch.weight.to(devices.device, non_blocking=pin_memory) c = shared.sd_model.cond_stage_model(batch.cond_text) if is_training_inpainting_model: @@ -490,7 +491,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st else: cond = c - loss = shared.sd_model(x, cond)[0] / gradient_step + loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step del x _loss_step += loss.item() From edb10092de516dda5271130ed53628387780a859 Mon Sep 17 00:00:00 2001 From: Shondoit Date: Thu, 12 Jan 2023 16:29:00 +0100 Subject: [PATCH 34/53] Add ability to choose using weighted loss or not --- modules/hypernetworks/hypernetwork.py | 13 +++++++++---- modules/textual_inversion/dataset.py | 15 ++++++++++----- modules/textual_inversion/textual_inversion.py | 13 +++++++++---- modules/ui.py | 4 ++++ 4 files changed, 32 insertions(+), 13 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 9c79b7d0..f4fb69e0 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -496,7 +496,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, shared.reload_hypernetworks() -def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # images allows training previews to have infotext. Importing it at the top causes a circular import problem. from modules import images @@ -554,7 +554,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi pin_memory = shared.opts.pin_memory - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize) + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight) if shared.opts.save_training_settings_to_txt: saved_params = dict( @@ -640,14 +640,19 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) - w = batch.weight.to(devices.device, non_blocking=pin_memory) + if use_weight: + w = batch.weight.to(devices.device, non_blocking=pin_memory) if tag_drop_out != 0 or shuffle_tags: shared.sd_model.cond_stage_model.to(devices.device) c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory) shared.sd_model.cond_stage_model.to(devices.cpu) else: c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) - loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step + if use_weight: + loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step + del w + else: + loss = shared.sd_model.forward(x, c)[0] / gradient_step del x del c diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index f4ce4552..1568b2b8 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -31,7 +31,7 @@ class DatasetEntry: class PersonalizedBase(Dataset): - def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False): + def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False, use_weight=False): re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None self.placeholder_token = placeholder_token @@ -64,7 +64,7 @@ class PersonalizedBase(Dataset): image = Image.open(path) #Currently does not work for single color transparency #We would need to read image.info['transparency'] for that - if 'A' in image.getbands(): + if use_weight and 'A' in image.getbands(): alpha_channel = image.getchannel('A') image = image.convert('RGB') if not varsize: @@ -104,7 +104,7 @@ class PersonalizedBase(Dataset): latent_sampling_method = "once" latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) - if alpha_channel is not None: + if use_weight and alpha_channel is not None: channels, *latent_size = latent_sample.shape weight_img = alpha_channel.resize(latent_size) npweight = np.array(weight_img).astype(np.float32) @@ -113,9 +113,11 @@ class PersonalizedBase(Dataset): #Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default. weight -= weight.min() weight /= weight.mean() - else: + elif use_weight: #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later weight = torch.ones([channels] + latent_size) + else: + weight = None if latent_sampling_method == "random": entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight) @@ -219,7 +221,10 @@ class BatchLoader: self.cond_text = [entry.cond_text for entry in data] self.cond = [entry.cond for entry in data] self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1) - self.weight = torch.stack([entry.weight for entry in data]).squeeze(1) + if all(entry.weight is not None for entry in data): + self.weight = torch.stack([entry.weight for entry in data]).squeeze(1) + else: + self.weight = None #self.emb_index = [entry.emb_index for entry in data] #print(self.latent_sample.device) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 8853c868..c63c7d1d 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -351,7 +351,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat assert log_directory, "Log directory is empty" -def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): save_embedding_every = save_embedding_every or 0 create_image_every = create_image_every or 0 template_file = textual_inversion_templates.get(template_filename, None) @@ -410,7 +410,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st pin_memory = shared.opts.pin_memory - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize) + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight) if shared.opts.save_training_settings_to_txt: save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()}) @@ -480,7 +480,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) - w = batch.weight.to(devices.device, non_blocking=pin_memory) + if use_weight: + w = batch.weight.to(devices.device, non_blocking=pin_memory) c = shared.sd_model.cond_stage_model(batch.cond_text) if is_training_inpainting_model: @@ -491,7 +492,11 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st else: cond = c - loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step + if use_weight: + loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step + del w + else: + loss = shared.sd_model.forward(x, cond)[0] / gradient_step del x _loss_step += loss.item() diff --git a/modules/ui.py b/modules/ui.py index f5df1ffe..efb87c23 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1191,6 +1191,8 @@ def create_ui(): create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") @@ -1304,6 +1306,7 @@ def create_ui(): shuffle_tags, tag_drop_out, latent_sampling_method, + use_weight, create_image_every, save_embedding_every, template_file, @@ -1337,6 +1340,7 @@ def create_ui(): shuffle_tags, tag_drop_out, latent_sampling_method, + use_weight, create_image_every, save_embedding_every, template_file, From c4ea16a03f8f9c9a9add4049ce5be1a8060464f5 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Wed, 15 Feb 2023 19:47:30 -0700 Subject: [PATCH 35/53] Add ".vae.ckpt" to ext_blacklist --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 07072e5c..127e9663 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -105,7 +105,7 @@ def checkpoint_tiles(): def list_models(): checkpoints_list.clear() checkpoint_alisases.clear() - model_list = modelloader.load_models(model_path=model_path, model_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors", command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.safetensors"]) + model_list = modelloader.load_models(model_path=model_path, model_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors", command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"]) cmd_ckpt = shared.cmd_opts.ckpt if os.path.exists(cmd_ckpt): From 9691ca5f59d8a126dc6595b9a217b1c2e4e36776 Mon Sep 17 00:00:00 2001 From: asdfire1 <45483619+asdfire1@users.noreply.github.com> Date: Thu, 16 Feb 2023 11:59:14 +0100 Subject: [PATCH 36/53] Fixed the Linux installation instructions --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2149dcc5..a0371a21 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,8 @@ sudo pacman -S wget git python3 ```bash bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) ``` - +3. Place stable diffusion checkpoint (`model.ckpt`) in the `models/Stable-diffusion` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it). +4. Run `webui.sh`. ### Installation on Apple Silicon Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). From b20737815a55cd90cfab2a1a3d60d682a67b127a Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Thu, 16 Feb 2023 21:44:46 -0800 Subject: [PATCH 37/53] Fix params.txt saving for infotexts modified by process_batch --- modules/processing.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index e1b53ac0..73894822 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -585,10 +585,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if not p.disable_extra_networks: extra_networks.activate(p, extra_network_data) - with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: - processed = Processed(p, [], p.seed, "") - file.write(processed.infotext(p, 0)) - if state.job_count == -1: state.job_count = p.n_iter @@ -614,6 +610,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) + # params.txt should be saved after scripts.process_batch, since the + # infotext could be modified by that callback + # Example: a wildcard processed by process_batch sets an extra model + # strength, which is saved as "Model Strength: 1.0" in the infotext + if n == 0: + with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: + processed = Processed(p, [], p.seed, "") + file.write(processed.infotext(p, 0)) + uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) From 9c7e6d5bbaa55205d0678369588c019108fb30a7 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 18 Feb 2023 11:31:02 -0500 Subject: [PATCH 38/53] store and print real torch version --- modules/ui.py | 7 ++++++- webui.py | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index f5df1ffe..d9df3781 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1779,10 +1779,15 @@ def versions_html(): else: xformers_version = "N/A" + try: + torch_version = torch.__long_version__ + except: + torch_version = torch.__version__ + return f""" python: {python_version}  •  -torch: {torch.__version__} +torch: {torch_version}  •  xformers: {xformers_version}  •  diff --git a/webui.py b/webui.py index 5b5c2139..2363ea4e 100644 --- a/webui.py +++ b/webui.py @@ -20,6 +20,7 @@ import torch # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors if ".dev" in torch.__version__ or "+git" in torch.__version__: + torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks From b5f69ad6afcdb8ba718da636b4a3f8aad5bd7cbf Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 08:38:38 +0300 Subject: [PATCH 39/53] simply long version display for torch in UI --- modules/ui.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index d9df3781..54efb6a4 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1779,15 +1779,10 @@ def versions_html(): else: xformers_version = "N/A" - try: - torch_version = torch.__long_version__ - except: - torch_version = torch.__version__ - return f""" python: {python_version}  •  -torch: {torch_version} +torch: {getattr(torch, '__long_version__',torch.__version__)}  •  xformers: {xformers_version}  •  From 75e03785fe1fb47c3b288105e2638ef06d81aef2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 09:12:01 +0300 Subject: [PATCH 40/53] remove download instruction --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 5a726872..2ceb4d2d 100644 --- a/README.md +++ b/README.md @@ -120,8 +120,7 @@ sudo pacman -S wget git python3 ```bash bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) ``` -3. Place stable diffusion checkpoint (`model.ckpt`) in the `models/Stable-diffusion` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it). -4. Run `webui.sh`. +3. Run `webui.sh`. ### Installation on Apple Silicon Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). From a742facd95189eb078087bce9cafbfad0723cff4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 09:30:49 +0300 Subject: [PATCH 41/53] make PNG info tab work properly with parameter overrides --- modules/generation_parameters_copypaste.py | 7 ++++--- modules/ui.py | 10 +++++----- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fc9e17aa..89dc23bf 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -74,8 +74,8 @@ def image_from_url_text(filedata): return image -def add_paste_fields(tabname, init_img, fields): - paste_fields[tabname] = {"init_img": init_img, "fields": fields} +def add_paste_fields(tabname, init_img, fields, override_settings_component=None): + paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component} # backwards compatibility for existing extensions import modules.ui @@ -110,6 +110,7 @@ def connect_paste_params_buttons(): for binding in registered_param_bindings: destination_image_component = paste_fields[binding.tabname]["init_img"] fields = paste_fields[binding.tabname]["fields"] + override_settings_component = binding.override_settings_component or paste_fields[binding.tabname]["override_settings_component"] destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None) destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None) @@ -130,7 +131,7 @@ def connect_paste_params_buttons(): ) if binding.source_text_component is not None and fields is not None: - connect_paste(binding.paste_button, fields, binding.source_text_component, binding.override_settings_component, binding.tabname) + connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname) if binding.source_tabname is not None and fields is not None: paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) diff --git a/modules/ui.py b/modules/ui.py index 54efb6a4..2fdbda42 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -631,9 +631,9 @@ def create_ui(): (hr_resize_y, "Hires resize-2"), *modules.scripts.scripts_txt2img.infotext_fields ] - parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields) + parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, override_settings_component=override_settings, + paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, )) txt2img_preview_params = [ @@ -963,10 +963,10 @@ def create_ui(): (mask_blur, "Mask blur"), *modules.scripts.scripts_img2img.infotext_fields ] - parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields) - parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields) + parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) + parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, override_settings_component=override_settings, + paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, )) modules.scripts.scripts_current = None From 15f4b217b10448449ae211df24c86a7cb0e187f4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 09:50:14 +0300 Subject: [PATCH 42/53] fix the a merge conflict resolve i did that entirely breaks image generation --- modules/processing.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 269a1a9f..2009d3bf 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -580,9 +580,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": sd_vae_approx.model() - if not p.disable_extra_networks: - extra_networks.activate(p, extra_network_data) - if state.job_count == -1: state.job_count = p.n_iter From 164699163718a73a273b86f67a16d3807bccda0e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 09:54:04 +0300 Subject: [PATCH 43/53] display 8 (rather than 7) characters of the extension commit hash in the installed extensions table --- modules/extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/extensions.py b/modules/extensions.py index 1975fca1..3eef9eaf 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -44,7 +44,7 @@ class Extension: self.status = 'unknown' head = repo.head.commit ts = time.asctime(time.gmtime(repo.head.commit.committed_date)) - self.version = f'{head.hexsha[:7]} ({ts})' + self.version = f'{head.hexsha[:8]} ({ts})' except Exception: self.remote = None From fb2354cb2ae47f9e9b70f0e04f34925bbb31b1ac Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 10:12:45 +0300 Subject: [PATCH 44/53] reword settings for 4chan export, remove unneded try/excepts, add try/except for actually saving JPG --- modules/images.py | 24 +++++++++--------------- modules/shared.py | 6 +++--- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/modules/images.py b/modules/images.py index 34d08b73..dcf5d90c 100644 --- a/modules/images.py +++ b/modules/images.py @@ -18,7 +18,7 @@ import string import json import hashlib -from modules import sd_samplers, shared, script_callbacks +from modules import sd_samplers, shared, script_callbacks, errors from modules.shared import opts, cmd_opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) @@ -575,25 +575,19 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i image.already_saved_as = fullfn - try: - target_side_length = int(opts.target_side_length) - except ValueError: - target_side_length = 4000 - try: - img_downscale_threshold = float(opts.img_downscale_threshold) - except ValueError: - img_downscale_threshold = 4 - - oversize = image.width > target_side_length or image.height > target_side_length - if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > img_downscale_threshold * 1024 * 1024): + oversize = image.width > opts.target_side_length or image.height > opts.target_side_length + if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024): ratio = image.width / image.height if oversize and ratio > 1: - image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS) + image = image.resize((opts.target_side_length, image.height * opts.target_side_length // image.width), LANCZOS) elif oversize: - image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS) + image = image.resize((image.width * opts.target_side_length // image.height, opts.target_side_length), LANCZOS) - _atomically_save_image(image, fullfn_without_extension, ".jpg") + try: + _atomically_save_image(image, fullfn_without_extension, ".jpg") + except Exception as e: + errors.display(e, "saving image as downscaled JPG") if opts.save_txt and info is not None: txt_fullfn = f"{fullfn_without_extension}.txt" diff --git a/modules/shared.py b/modules/shared.py index d68c366c..4f496533 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -325,9 +325,9 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "export_for_4chan": OptionInfo(True, "If PNG image is larger than Downscale threshold or any dimension is larger than Target length, downscale the image to dimensions and save a copy as JPG"), - "img_downscale_threshold": OptionInfo(4, "Downscale threshold (MB)"), - "target_side_length": OptionInfo(4000, "Target length"), + "export_for_4chan": OptionInfo(True, "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG"), + "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), + "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), From fd4ac5187a1ae42be3f131770ea21e2158f75dcd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 19 Feb 2023 10:55:39 +0300 Subject: [PATCH 45/53] Revert "Aspect ratio sliders" --- javascript/ComponentControllers.js | 259 ----------------------------- javascript/aspectRatioSliders.js | 181 -------------------- modules/shared.py | 21 --- modules/ui.py | 12 +- style.css | 46 ----- 5 files changed, 2 insertions(+), 517 deletions(-) delete mode 100644 javascript/ComponentControllers.js delete mode 100644 javascript/aspectRatioSliders.js diff --git a/javascript/ComponentControllers.js b/javascript/ComponentControllers.js deleted file mode 100644 index 2888679b..00000000 --- a/javascript/ComponentControllers.js +++ /dev/null @@ -1,259 +0,0 @@ -/* This is a basic library that allows controlling elements that take some form of user input. - -This was previously written in typescript, where all controllers implemented an interface. Not -all methods were needed in all the controllers, but it was done to keep a common interface, so -your main app can serve as a controller of controllers. - -These controllers were built to work on the shapes of html elements that gradio components use. - -There may be some notes in it that only applied to my use case, but I left them to help others -along. - -You will need the parent element for these to work. -The parent element can be defined as the element (div) that gets the element id when assigning -an element id to a gradio component. - -Example: - gr.TextBox(value="...", elem_id="THISID") - -Basic usage, grab an element that is the parent container for the component. - -Send it in to the class, like a function, don't forget the "new" keyword so it calls the constructor -and sends back a new object. - -Example: - -let txt2imgPrompt = new TextComponentController(gradioApp().querySelector("#txt2img_prompt")) - -Then use the getVal() method to get the value, or use the setVal(myValue) method to set the value. - -Input types that are groups, like Checkbox groups (not individual checkboxes), take in an array of values. - -Checkbox group has to reset all values to False (unchecked), then set the values in your array to true (checked). -If you don't hold a reference to the values (the labels in string format), you can acquire them using the getVal() method. -*/ -class DropdownComponentController { - constructor(element) { - this.element = element; - this.childSelector = this.element.querySelector('select'); - this.children = new Map(); - Array.from(this.childSelector.querySelectorAll('option')).forEach(opt => this.children.set(opt.value, opt)); - } - getVal() { - return this.childSelector.value; - } - updateVal(optionElement) { - optionElement.selected = true; - } - setVal(name) { - this.updateVal(this.children.get(name)); - this.eventHandler(); - } - eventHandler() { - this.childSelector.dispatchEvent(new Event("change")); - } -} -class CheckboxComponentController { - constructor(element) { - this.element = element; - this.child = this.element.querySelector('input'); - } - getVal() { - return this.child.checked; - } - updateVal(checked) { - this.child.checked = checked; - } - setVal(checked) { - this.updateVal(checked); - this.eventHandler(); - } - eventHandler() { - this.child.dispatchEvent(new Event("change")); - } -} -class CheckboxGroupComponentController { - constructor(element) { - this.element = element; - //this.checkBoxes = new Object; - this.children = new Map(); - Array.from(this.element.querySelectorAll('input')).forEach(input => this.children.set(input.nextElementSibling.innerText, input)); - /* element id gets use fieldset, grab all inputs (the bool val) get the userfriendly label, use as key, put bool value in mapping */ - //Array.from(this.component.querySelectorAll("input")).forEach( _input => this.checkBoxes[_input.nextElementSibling.innerText] = _input) - /*Checkboxgroup structure -
    -
    css makes translucent - - serves as label for component - -
    container for checkboxes - - ... -
    -
    - */ - } - updateVal(label) { - /********* - calls updates using a throttle or else the backend does not get updated properly - * ********/ - setTimeout(() => this.conditionalToggle(true, this.children.get(label)), 2); - } - setVal(labels) { - /* Handles reset and updates all in array to true */ - this.reupdateVals(); - labels.forEach(l => this.updateVal(l)); - } - getVal() { - //return the list of values that are true - return [...this.children].filter(([k, v]) => v.checked).map(arr => arr[0]); - } - reupdateVals() { - /************** - * for reupdating all vals, first set to false - **************/ - this.children.forEach(inputChild => this.conditionalToggle(false, inputChild)); - } - conditionalToggle(desiredVal, inputChild) { - //This method behaves like 'set this value to this' - //Using element.checked = true/false, does not register the change, even if you called change afterwards, - // it only sets what it looks like in our case, because there is no form submit, a person then has to click on it twice. - //Options are to use .click() or dispatch an event - if (desiredVal != inputChild.checked) { - inputChild.dispatchEvent(new Event("change")); //using change event instead of click, in case browser ad-blockers blocks the click method - } - } - eventHandler(checkbox) { - checkbox.dispatchEvent(new Event("change")); - } -} -class RadioComponentController { - constructor(element) { - this.element = element; - this.children = new Map(); - Array.from(this.element.querySelectorAll("input")).forEach(input => this.children.set(input.value, input)); - } - getVal() { - //radio groups have a single element that's checked is true - // as array arr k,v pair element.checked ) -> array of len(1) with [k,v] so either [0] [1].value - return [...this.children].filter(([l, e]) => e.checked)[0][0]; - //return Array.from(this.children).filter( ([label, input]) => input.checked)[0][1].value - } - updateVal(child) { - this.eventHandler(child); - } - setVal(name) { - //radio will trigger all false except the one that get the event change - //to keep the api similar, other methods are still called - this.updateVal(this.children.get(name)); - } - eventHandler(child) { - child.dispatchEvent(new Event("change")); - } -} -class NumberComponentController { - constructor(element) { - this.element = element; - this.childNumField = element.querySelector('input[type=number]'); - } - getVal() { - return this.childNumField.value; - } - updateVal(text) { - this.childNumField.value = text; - } - eventHandler() { - this.element.dispatchEvent(new Event("input")); - } - setVal(text) { - this.updateVal(text); - this.eventHandler(); - } -} -class SliderComponentController { - constructor(element) { - this.element = element; - this.childNumField = this.element.querySelector('input[type=number]'); - this.childRangeField = this.element.querySelector('input[type=range]'); - } - getVal() { - return this.childNumField.value; - } - updateVal(text) { - //both are not needed, either works, both are left in so one is a fallback in case of gradio changes - this.childNumField.value = text; - this.childRangeField.value = text; - } - eventHandler() { - this.element.dispatchEvent(new Event("input")); - this.childNumField.dispatchEvent(new Event("input")); - this.childRangeField.dispatchEvent(new Event("input")); - } - setVal(text) { - this.updateVal(text); - this.eventHandler(); - } -} -class TextComponentController { - constructor(element) { - this.element = element; - this.child = element.querySelector('textarea'); - } - getVal() { - return this.child.value; - } - eventHandler() { - this.element.dispatchEvent(new Event("input")); - this.child.dispatchEvent(new Event("change")); - //Workaround to solve no target with v(o) on eventhandler, define my own target - let ne = new Event("input"); - Object.defineProperty(ne, "target", { value: this.child }); - this.child.dispatchEvent(ne); - } - updateVal(text) { - this.child.value = text; - } - appendValue(text) { - //might add delimiter option - this.child.value += ` ${text}`; - } - setVal(text, append = false) { - if (append) { - this.appendValue(text); - } - else { - this.updateVal(text); - } - this.eventHandler(); - } -} -class JsonComponentController extends TextComponentController { - constructor(element) { - super(element); - } - getVal() { - return JSON.parse(this.child.value); - } -} -class ColorComponentController { - constructor(element) { - this.element = element; - this.child = this.element.querySelector('input[type=color]'); - } - updateVal(text) { - this.child.value = text; - } - getVal() { - return this.child.value; - } - setVal(text) { - this.updateVal(text); - this.eventHandler(); - } - eventHandler() { - this.child.dispatchEvent(new Event("input")); - } -} diff --git a/javascript/aspectRatioSliders.js b/javascript/aspectRatioSliders.js deleted file mode 100644 index 3def5158..00000000 --- a/javascript/aspectRatioSliders.js +++ /dev/null @@ -1,181 +0,0 @@ -class AspectRatioSliderController { - constructor(widthSlider, heightSlider, ratioSource, roundingSource, roundingMethod) { - //References - this.widthSlider = new SliderComponentController(widthSlider); - this.heightSlider = new SliderComponentController(heightSlider); - this.ratioSource = new DropdownComponentController(ratioSource); - this.roundingSource = new CheckboxComponentController(roundingSource); - this.roundingMethod = new RadioComponentController(roundingMethod); - this.roundingIndicatorBadge = document.createElement("div"); - // Badge implementation - this.roundingIndicatorBadge.innerText = "📐"; - this.roundingIndicatorBadge.classList.add("rounding-badge"); - this.ratioSource.element.appendChild(this.roundingIndicatorBadge); - // Check initial value of ratioSource to set badge visbility - let initialRatio = this.ratioSource.getVal(); - if (!initialRatio.includes(":")) { - this.roundingIndicatorBadge.style.display = "none"; - } - //Adjust badge icon if rounding is on - if (this.roundingSource.getVal()) { - //this.roundingIndicatorBadge.classList.add("active"); - this.roundingIndicatorBadge.innerText = "📏"; - } - //Make badge clickable to toggle setting - this.roundingIndicatorBadge.addEventListener("click", () => { - this.roundingSource.setVal(!this.roundingSource.getVal()); - }); - //Make rounding setting toggle badge text and style if setting changes - this.roundingSource.child.addEventListener("change", () => { - if (this.roundingSource.getVal()) { - //this.roundingIndicatorBadge.classList.add("active"); - this.roundingIndicatorBadge.innerText = "📏"; - } - else { - //this.roundingIndicatorBadge.classList.remove("active"); - this.roundingIndicatorBadge.innerText = "📐"; - } - this.adjustStepSize(); - }); - //Other event listeners - this.widthSlider.childRangeField.addEventListener("change", (e) => { e.preventDefault(); this.resize("width"); }); - this.widthSlider.childNumField.addEventListener("change", (e) => { e.preventDefault(); this.resize("width"); }); - this.heightSlider.childRangeField.addEventListener("change", (e) => { e.preventDefault(); this.resize("height"); }); - this.heightSlider.childNumField.addEventListener("change", (e) => { e.preventDefault(); this.resize("height"); }); - this.ratioSource.childSelector.addEventListener("change", (e) => { - e.preventDefault(); - //Check and toggle display of badge conditionally on dropdown selection - if (!this.ratioSource.getVal().includes(":")) { - this.roundingIndicatorBadge.style.display = 'none'; - } - else { - this.roundingIndicatorBadge.style.display = 'block'; - } - this.adjustStepSize(); - }); - } - resize(dimension) { - //For moving slider or number field - let val = this.ratioSource.getVal(); - if (!val.includes(":")) { - return; - } - let [width, height] = val.split(":").map(Number); - let ratio = width / height; - if (dimension == 'width') { - let newHeight = parseInt(this.widthSlider.getVal()) / ratio; - if (this.roundingSource.getVal()) { - switch (this.roundingMethod.getVal()) { - case 'Round': - newHeight = Math.round(newHeight / 8) * 8; - break; - case 'Ceiling': - newHeight = Math.ceil(newHeight / 8) * 8; - break; - case 'Floor': - newHeight = Math.floor(newHeight / 8) * 8; - break; - } - } - this.heightSlider.setVal(newHeight.toString()); - } - else if (dimension == "height") { - let newWidth = parseInt(this.heightSlider.getVal()) * ratio; - if (this.roundingSource.getVal()) { - switch (this.roundingMethod.getVal()) { - case 'Round': - newWidth = Math.round(newWidth / 8) * 8; - break; - case 'Ceiling': - newWidth = Math.ceil(newWidth / 8) * 8; - break; - case 'Floor': - newWidth = Math.floor(newWidth / 8) * 8; - break; - } - } - this.widthSlider.setVal(newWidth.toString()); - } - } - adjustStepSize() { - /* Sets scales/precision/rounding steps;*/ - let val = this.ratioSource.getVal(); - if (!val.includes(":")) { - //If ratio unlocked - this.widthSlider.childRangeField.step = "8"; - this.widthSlider.childRangeField.min = "64"; - this.widthSlider.childNumField.step = "8"; - this.widthSlider.childNumField.min = "64"; - this.heightSlider.childRangeField.step = "8"; - this.heightSlider.childRangeField.min = "64"; - this.heightSlider.childNumField.step = "8"; - this.heightSlider.childNumField.min = "64"; - return; - } - //Format string and calculate step sizes - let [width, height] = val.split(":").map(Number); - let decimalPlaces = (width.toString().split(".")[1] || []).length; - //keep upto 6 decimal points of precision of ratio - //euclidean gcd does not support floats, so we scale it up - decimalPlaces = decimalPlaces > 6 ? 6 : decimalPlaces; - let gcd = this.gcd(width * 10 ** decimalPlaces, height * 10 ** decimalPlaces) / 10 ** decimalPlaces; - let stepSize = 8 * height / gcd; - let stepSizeOther = 8 * width / gcd; - if (this.roundingSource.getVal()) { - //If rounding is on set/keep default stepsizes - this.widthSlider.childRangeField.step = "8"; - this.widthSlider.childRangeField.min = "64"; - this.widthSlider.childNumField.step = "8"; - this.widthSlider.childNumField.min = "64"; - this.heightSlider.childRangeField.step = "8"; - this.heightSlider.childRangeField.min = "64"; - this.heightSlider.childNumField.step = "8"; - this.heightSlider.childNumField.min = "64"; - } - else { - //if rounding is off, set step sizes so they enforce snapping - //min is changed, because it offsets snap positions - this.widthSlider.childRangeField.step = stepSizeOther.toString(); - this.widthSlider.childRangeField.min = stepSizeOther.toString(); - this.widthSlider.childNumField.step = stepSizeOther.toString(); - this.widthSlider.childNumField.min = stepSizeOther.toString(); - this.heightSlider.childRangeField.step = stepSize.toString(); - this.heightSlider.childRangeField.min = stepSize.toString(); - this.heightSlider.childNumField.step = stepSize.toString(); - this.heightSlider.childNumField.min = stepSize.toString(); - } - let currentWidth = parseInt(this.widthSlider.getVal()); - //Rounding treated kinda like pythons divmod - let stepsTaken = Math.round(currentWidth / stepSizeOther); - //this snaps it to closest rule matches (rules being html step points, and ratio) - let newWidth = stepsTaken * stepSizeOther; - this.widthSlider.setVal(newWidth.toString()); - this.heightSlider.setVal(Math.round(newWidth / (width / height)).toString()); - } - gcd(a, b) { - //euclidean gcd - if (b === 0) { - return a; - } - return this.gcd(b, a % b); - } - static observeStartup(widthSliderId, heightSliderId, ratioSourceId, roundingSourceId, roundingMethodId) { - let observer = new MutationObserver(() => { - let widthSlider = document.querySelector("gradio-app").shadowRoot.getElementById(widthSliderId); - let heightSlider = document.querySelector("gradio-app").shadowRoot.getElementById(heightSliderId); - let ratioSource = document.querySelector("gradio-app").shadowRoot.getElementById(ratioSourceId); - let roundingSource = document.querySelector("gradio-app").shadowRoot.getElementById(roundingSourceId); - let roundingMethod = document.querySelector("gradio-app").shadowRoot.getElementById(roundingMethodId); - if (widthSlider && heightSlider && ratioSource && roundingSource && roundingMethod) { - observer.disconnect(); - new AspectRatioSliderController(widthSlider, heightSlider, ratioSource, roundingSource, roundingMethod); - } - }); - observer.observe(gradioApp(), { childList: true, subtree: true }); - } -} -document.addEventListener("DOMContentLoaded", () => { - //Register mutation observer for self start-up; - AspectRatioSliderController.observeStartup("txt2img_width", "txt2img_height", "txt2img_ratio", "setting_aspect_ratios_rounding", "setting_aspect_ratios_rounding_method"); - AspectRatioSliderController.observeStartup("img2img_width", "img2img_height", "img2img_ratio", "setting_aspect_ratios_rounding", "setting_aspect_ratios_rounding_method"); -}); diff --git a/modules/shared.py b/modules/shared.py index 2983ee44..e324a48a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -139,24 +139,6 @@ ui_reorder_categories = [ "scripts", ] -aspect_ratio_defaults = [ - "🔓", - "1:1", - "3:2", - "4:3", - "5:4", - "16:9", - "9:16", - "1.85:1", - "2.35:1", - "2.39:1", - "2.40:1", - "21:9", - "1.375:1", - "1.66:1", - "1.75:1" -] - cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -477,9 +459,6 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), - "aspect_ratios_rounding": OptionInfo(True, "Round aspect ratios for more flexibility?", gr.Checkbox), - "aspect_ratios_rounding_method": OptionInfo("Ceiling", "Aspect ratios rounding method", gr.Radio,{"choices": ["Round", "Ceiling", "Floor"]}), - "aspect_ratios": OptionInfo(", ".join(aspect_ratio_defaults), "txt2img/img2img aspect ratios"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), })) diff --git a/modules/ui.py b/modules/ui.py index 2fc1fee5..2fdbda42 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -424,10 +424,6 @@ def ordered_ui_categories(): yield category -def aspect_ratio_list(): - return [ratio.strip() for ratio in shared.opts.aspect_ratios.split(",")] - - def get_value_for_setting(key): value = getattr(opts, key) @@ -483,9 +479,7 @@ def create_ui(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - with gr.Column(elem_id="txt2img_size_toolbox", scale=0): - aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="txt2img_ratio", show_label=False, label="Aspect Ratio") - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") if opts.dimensions_and_batch_together: with gr.Column(elem_id="txt2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") @@ -763,9 +757,7 @@ def create_ui(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_size_toolbox", scale=0): - aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="img2img_ratio", show_label=False, label="Aspect Ratio") - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") if opts.dimensions_and_batch_together: with gr.Column(elem_id="img2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") diff --git a/style.css b/style.css index 55baefb7..05572f66 100644 --- a/style.css +++ b/style.css @@ -747,52 +747,6 @@ footer { margin-left: 0em; } -#txt2img_size_toolbox, #img2img_size_toolbox{ - min-width: unset !important; - gap: 0; -} - -#txt2img_ratio, #img2img_ratio { - padding: 0px; - min-width: unset; - max-width: fit-content; -} -#txt2img_ratio select, #img2img_ratio select{ - -o-appearance: none; - -ms-appearance: none; - -webkit-appearance: none; - -moz-appearance: none; - appearance: none; - background-image: unset; - padding-right: unset; - min-width: 40px; - max-width: 40px; - min-height: 40px; - max-height: 40px; - line-height: 40px; - padding: 0; - text-align: center; -} -.rounding-badge { - display: inline-block; - border-radius: 0px; - /*background-color: #ccc;*/ - cursor: pointer; - position: absolute; - top: -10px; - right: -10px; - width: 20px; - height: 20px; - padding: 1px; - line-height: 16px; - font-size: 14px; -} - -.rounding-badge.active { - background-color: #007bff; - border-radius: 50%; -} - .inactive{ opacity: 0.5; } From 66cfd1dcfc893a9051310c208a66890b86334118 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 11:45:04 +0300 Subject: [PATCH 46/53] Expose xyz_grid's values to other extensions for #7721 --- scripts/xyz_grid.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 5982cfba..62e03d02 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -25,6 +25,8 @@ from modules.ui_components import ToolButton fill_values_symbol = "\U0001f4d2" # 📒 +AxisInfo = namedtuple('AxisInfo', ['axis', 'values']) + def apply_field(field): def fun(p, x, xs): @@ -520,6 +522,10 @@ class Script(scripts.Script): grid_infotext = [None] + state.xyz_plot_x = AxisInfo(x_opt, xs) + state.xyz_plot_y = AxisInfo(y_opt, ys) + state.xyz_plot_z = AxisInfo(z_opt, zs) + # If one of the axes is very slow to change between (like SD model # checkpoint), then make sure it is in the outer iteration of the nested # `for` loop. From fe46a08f52c36ca61446f657296802ab3ceb2529 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 12:09:25 +0300 Subject: [PATCH 47/53] add slash to non-empty dirs in extra networks interface --- modules/ui_extra_networks.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 90abec0a..30ceab4e 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -76,6 +76,10 @@ class ExtraNetworksPage: while subdir.startswith("/"): subdir = subdir[1:] + is_empty = len(os.listdir(x)) == 0 + if not is_empty and not subdir.endswith("/"): + subdir = subdir + "/" + subdirs[subdir] = 1 if subdirs: From b908bed8838fae89e5e83e57ca91808cf2d68077 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 12:23:40 +0300 Subject: [PATCH 48/53] remove unneeded return from #7583 --- modules/shared_items.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_items.py b/modules/shared_items.py index b72b2bae..e792a134 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -20,4 +20,4 @@ def sd_vae_items(): def refresh_vae_list(): import modules.sd_vae - return modules.sd_vae.refresh_vae_list() + modules.sd_vae.refresh_vae_list() From 48d171bbb373e3db9aef0776fe681b63056272b7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 12:25:05 +0300 Subject: [PATCH 49/53] fix incorrectly named args for gr.Slider in prompt matrix and xyz grid --- scripts/prompt_matrix.py | 2 +- scripts/xyz_grid.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index 3ee3cbe4..6340a7d9 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -54,7 +54,7 @@ class Script(scripts.Script): prompt_type = gr.Radio(["positive", "negative"], label="Select prompt", elem_id=self.elem_id("prompt_type"), value="positive") variations_delimiter = gr.Radio(["comma", "space"], label="Select joining char", elem_id=self.elem_id("variations_delimiter"), value="comma") with gr.Column(): - margin_size = gr.Slider(label="Grid margins (px)", min=0, max=500, value=0, step=2, elem_id=self.elem_id("margin_size")) + margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size")) return [put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size] diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 62e03d02..c375d2c0 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -360,7 +360,7 @@ class Script(scripts.Script): include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images")) include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids")) with gr.Column(): - margin_size = gr.Slider(label="Grid margins (px)", min=0, max=500, value=0, step=2, elem_id=self.elem_id("margin_size")) + margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size")) with gr.Row(variant="compact", elem_id="swap_axes"): swap_xy_axes_button = gr.Button(value="Swap X/Y axes", elem_id="xy_grid_swap_axes_button") From 11183b4d905d14c6a0164a4d13675b89b1bf4ceb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 12:44:56 +0300 Subject: [PATCH 50/53] fix for #6700 --- modules/textual_inversion/dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 1568b2b8..af9fbcf2 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -115,7 +115,7 @@ class PersonalizedBase(Dataset): weight /= weight.mean() elif use_weight: #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later - weight = torch.ones([channels] + latent_size) + weight = torch.ones(latent_sample.shape) else: weight = None From d84f3cf7a7743bc91cd5ba524c76cf859e021b49 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 13:11:48 +0300 Subject: [PATCH 51/53] split #7300 into multiple lines --- webui.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 69312a19..9e8b486a 100644 --- a/webui.py +++ b/webui.py @@ -207,6 +207,14 @@ def webui(): if cmd_opts.gradio_queue: shared.demo.queue(64) + gradio_auth_creds = [] + if cmd_opts.gradio_auth: + gradio_auth_creds += cmd_opts.gradio_auth.strip('"').replace('\n', '').split(',') + if cmd_opts.gradio_auth_path: + with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file: + for line in file.readlines(): + gradio_auth_creds += [x.strip() for x in line.split(',')] + app, local_url, share_url = shared.demo.launch( share=cmd_opts.share, server_name=server_name, @@ -214,7 +222,7 @@ def webui(): ssl_keyfile=cmd_opts.tls_keyfile, ssl_certfile=cmd_opts.tls_certfile, debug=cmd_opts.gradio_debug, - auth=[tuple(cred.split(':')) for cred in (cmd_opts.gradio_auth.strip('"').replace('\n','').split(',') + (open(cmd_opts.gradio_auth_path, 'r').read().strip().replace('\n','').split(',') if cmd_opts.gradio_auth_path and os.path.exists(cmd_opts.gradio_auth_path) else None))] if cmd_opts.gradio_auth or (cmd_opts.gradio_auth_path and os.path.exists(cmd_opts.gradio_auth_path)) else None, + auth=[tuple(cred.split(':')) for cred in gradio_auth_creds] if gradio_auth_creds else None, inbrowser=cmd_opts.autolaunch, prevent_thread_lock=True ) From c77f01ff31072715afe80413ecaf6b3d00797d34 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 19 Feb 2023 20:37:40 +0900 Subject: [PATCH 52/53] fix auto sd download issue --- modules/sd_models.py | 8 +++++++- modules/shared.py | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 127e9663..ac4903f4 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -105,9 +105,15 @@ def checkpoint_tiles(): def list_models(): checkpoints_list.clear() checkpoint_alisases.clear() - model_list = modelloader.load_models(model_path=model_path, model_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors", command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"]) cmd_ckpt = shared.cmd_opts.ckpt + if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file: + model_url = None + else: + model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" + + model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"]) + if os.path.exists(cmd_ckpt): checkpoint_info = CheckpointInfo(cmd_ckpt) checkpoint_info.register() diff --git a/modules/shared.py b/modules/shared.py index 2c2edfbd..805f9cc1 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -108,6 +108,7 @@ parser.add_argument("--server-name", type=str, help="Sets hostname of server", d parser.add_argument("--gradio-queue", action='store_true', help="Uses gradio queue; experimental option; breaks restart UI button") parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) +parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) script_loading.preload_extensions(extensions.extensions_dir, parser) From 014e7323f6f54a38183f8de52dc83f2248eee251 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 19 Feb 2023 20:49:07 +0900 Subject: [PATCH 53/53] when exists --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ac4903f4..93959f55 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -107,7 +107,7 @@ def list_models(): checkpoint_alisases.clear() cmd_ckpt = shared.cmd_opts.ckpt - if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file: + if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt): model_url = None else: model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"