From d78c4375833f063370fd40eab7b5322455c93683 Mon Sep 17 00:00:00 2001 From: ParityError Date: Sun, 12 Mar 2023 12:41:27 -0700 Subject: [PATCH 001/113] Update webui-user.sh Installation should not be assumed to be located within ~/home directory. User should expected to install project anywhere and run the startup scripts while in stable-diffusion-webui directory. --- webui-user.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui-user.sh b/webui-user.sh index bfa53cb7..74e8800c 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -4,7 +4,7 @@ ######################################################### # Install directory without trailing slash -#install_dir="/home/$(whoami)" +#install_dir="$(pwd)" # Name of the subdirectory #clone_dir="stable-diffusion-webui" From 6439e72df2cd2126734ce4a69c277502c4c9982d Mon Sep 17 00:00:00 2001 From: ParityError <36368048+ParityError@users.noreply.github.com> Date: Sun, 12 Mar 2023 15:08:26 -0700 Subject: [PATCH 002/113] Update webui.sh Installation should not be assumed to be located within ~/home directory. User should be expected to install project anywhere and run the startup scripts while in stable-diffusion-webui directory. See issue #8534 --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index 8cdad22d..b01e4fa9 100755 --- a/webui.sh +++ b/webui.sh @@ -23,7 +23,7 @@ fi # Install directory without trailing slash if [[ -z "${install_dir}" ]] then - install_dir="/home/$(whoami)" + install_dir="($pwd)" fi # Name of the subdirectory (defaults to stable-diffusion-webui) From 5c051c0618bec1417827910b601ba915d0ca6c4e Mon Sep 17 00:00:00 2001 From: ParityError <36368048+ParityError@users.noreply.github.com> Date: Sun, 12 Mar 2023 15:10:44 -0700 Subject: [PATCH 003/113] Update webui.sh Installation should not be assumed to be located within ~/home directory. User should be expected to install project anywhere and run the startup scripts while in stable-diffusion-webui directory. See issue #8534 --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index b01e4fa9..cf649664 100755 --- a/webui.sh +++ b/webui.sh @@ -23,7 +23,7 @@ fi # Install directory without trailing slash if [[ -z "${install_dir}" ]] then - install_dir="($pwd)" + install_dir="$(pwd)" fi # Name of the subdirectory (defaults to stable-diffusion-webui) From d86beb822832c9162714cf0a3567ad087839a2ac Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Thu, 23 Mar 2023 17:09:59 -0400 Subject: [PATCH 004/113] Remove "do not add watermark to images" option --- javascript/hints.js | 1 - modules/shared.py | 1 - requirements.txt | 1 - 3 files changed, 3 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 7f4101b2..d12e86a9 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -85,7 +85,6 @@ titles = { "vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).", "Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.", - "Do not add watermark to images": "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.", "Filename word regex": "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.", "Filename join string": "This string will be used to join split words into a single line if the option above is enabled.", diff --git a/modules/shared.py b/modules/shared.py index f28a12cc..030cd7e0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -342,7 +342,6 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), - "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), diff --git a/requirements.txt b/requirements.txt index 6d53f089..c6eb2449 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,6 @@ fonts font-roboto gfpgan gradio==3.16.2 -invisible-watermark numpy omegaconf opencv-contrib-python From c9647c8d23efa8c939c6af39878784e246082122 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sat, 25 Mar 2023 16:11:41 -0400 Subject: [PATCH 005/113] Support Gradio's theme API --- modules/shared.py | 35 +++++++++++++++++++++++++++++++++++ modules/ui.py | 2 +- webui.py | 1 + 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 11be3985..2f7892cd 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -4,6 +4,7 @@ import json import os import sys import time +import requests from PIL import Image import gradio as gr @@ -54,6 +55,21 @@ ui_reorder_categories = [ "scripts", ] +# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json +gradio_hf_hub_themes = [ + "gradio/glass", + "gradio/monochrome", + "gradio/seafoam", + "gradio/soft", + "freddyaboulton/dracula_revamped", + "gradio/dracula_test", + "abidlabs/dracula_test", + "abidlabs/pakistan", + "dawood/microsoft_windows", + "ysharma/steampunk" +] + + cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -387,6 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), { "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), + "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", gr.Dropdown, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}) })) options_templates.update(options_section(('ui', "Live previews"), { @@ -599,6 +616,24 @@ clip_model = None progress_print_out = sys.stdout +gradio_theme = gr.themes.Base() + + +def reload_gradio_theme(theme_name=None): + global gradio_theme + if not theme_name: + theme_name = opts.gradio_theme + + if theme_name == "Default": + gradio_theme = gr.themes.Default() + else: + try: + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + except requests.exceptions.ConnectionError: + print("Can't access HuggingFace Hub, falling back to default Gradio theme") + gradio_theme = gr.themes.Default() + + class TotalTQDM: def __init__(self): diff --git a/modules/ui.py b/modules/ui.py index af8546c2..6e049881 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1592,7 +1592,7 @@ def create_ui(): for _interface, label, _ifid in interfaces: shared.tab_names.append(label) - with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo: + with gr.Blocks(css=css, theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Row(elem_id="quicksettings", variant="compact"): for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): component = create_setting_component(k, is_quicksettings=True) diff --git a/webui.py b/webui.py index 30f3e4a1..6986e576 100644 --- a/webui.py +++ b/webui.py @@ -150,6 +150,7 @@ def initialize(): shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed) + shared.opts.onchange("gradio_theme", shared.reload_gradio_theme) startup_timer.record("opts onchange") shared.reload_hypernetworks() From 9ecf3471339d011983f2e3c878e920e49718ff90 Mon Sep 17 00:00:00 2001 From: AlUlkesh <99896447+AlUlkesh@users.noreply.github.com> Date: Mon, 27 Mar 2023 20:01:19 +0200 Subject: [PATCH 006/113] fix: lightboxModal, selectedTab --- javascript/generationParams.js | 2 +- javascript/imageviewer.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/generationParams.js b/javascript/generationParams.js index 95f05093..06a771bc 100644 --- a/javascript/generationParams.js +++ b/javascript/generationParams.js @@ -16,7 +16,7 @@ onUiUpdate(function(){ let modalObserver = new MutationObserver(function(mutations) { mutations.forEach(function(mutationRecord) { - let selectedTab = gradioApp().querySelector('#tabs div button.bg-white')?.innerText + let selectedTab = gradioApp().querySelector('#tabs div button')?.innerText if (mutationRecord.target.style.display === 'none' && selectedTab === 'txt2img' || selectedTab === 'img2img') gradioApp().getElementById(selectedTab+"_generation_info_button").click() }); diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index d6483562..bb61ee24 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -251,7 +251,7 @@ document.addEventListener("DOMContentLoaded", function() { modal.appendChild(modalNext) - gradioApp().appendChild(modal) + gradioApp().body.appendChild(modal) document.body.appendChild(modal); From 5a25826d841a13574ab6afbeb9c50c81a491fa21 Mon Sep 17 00:00:00 2001 From: AlUlkesh <99896447+AlUlkesh@users.noreply.github.com> Date: Tue, 28 Mar 2023 23:28:46 +0200 Subject: [PATCH 007/113] try both versions of appendChild --- javascript/imageviewer.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index bb61ee24..3deffa9b 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -251,8 +251,11 @@ document.addEventListener("DOMContentLoaded", function() { modal.appendChild(modalNext) - gradioApp().body.appendChild(modal) - + try { + gradioApp().appendChild(modal); + } catch (e) { + gradioApp().body.appendChild(modal); + } document.body.appendChild(modal); From 42082e8a3239c1c32cd9e2a03a20b610af857b51 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 18:18:28 -0400 Subject: [PATCH 008/113] performance increase --- modules/processing.py | 4 +++- modules/sd_samplers_kdiffusion.py | 22 +++++++++++++++++----- modules/shared.py | 1 + scripts/xyz_grid.py | 1 + 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..9f00ce3c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -105,7 +105,7 @@ class StableDiffusionProcessing: """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing """ - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -140,6 +140,7 @@ class StableDiffusionProcessing: self.denoising_strength: float = denoising_strength self.sampler_noise_scheduler_override = None self.ddim_discretize = ddim_discretize or opts.ddim_discretize + self.s_min_uncond = s_min_uncond or opts.s_min_uncond self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option @@ -162,6 +163,7 @@ class StableDiffusionProcessing: self.all_seeds = None self.all_subseeds = None self.iteration = 0 + @property def sd_model(self): diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9f08518..6a54ce32 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -76,7 +76,7 @@ class CFGDenoiser(torch.nn.Module): return denoised - def forward(self, x, sigma, uncond, cond, cond_scale, image_cond): + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException @@ -116,6 +116,12 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond + sigma_thresh = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] + if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: cond_in = torch.cat([tensor, uncond]) @@ -144,7 +150,8 @@ class CFGDenoiser(torch.nn.Module): x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + if uncond.shape[0]: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) @@ -157,7 +164,10 @@ class CFGDenoiser(torch.nn.Module): sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) if not is_edit_model: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + if uncond.shape[0]: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + else: + denoised = x_out else: denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) @@ -165,7 +175,6 @@ class CFGDenoiser(torch.nn.Module): denoised = self.init_latent * self.mask + self.nmask * denoised self.step += 1 - return denoised @@ -244,6 +253,7 @@ class KDiffusionSampler: self.model_wrap_cfg.step = 0 self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) @@ -326,6 +336,7 @@ class KDiffusionSampler: 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond } samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) @@ -359,7 +370,8 @@ class KDiffusionSampler: 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) return samples diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..0bdd30b8 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -405,6 +405,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_min_uncond': OptionInfo(0, "minimum sigma to use unconditioned guidance", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795..d6a44b1c 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -212,6 +212,7 @@ axis_options = [ AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: list(sd_models.checkpoints_list)), + AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")), AxisOption("Sigma Churn", float, apply_field("s_churn")), AxisOption("Sigma min", float, apply_field("s_tmin")), AxisOption("Sigma max", float, apply_field("s_tmax")), From bc90592031d26d3a6ed5c1b65ee9801452b5ece5 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 20:59:31 -0400 Subject: [PATCH 009/113] increase range of negative guidance minimum sigma option --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 0bdd30b8..0e9f2d54 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -405,7 +405,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 's_min_uncond': OptionInfo(0, "minimum sigma to use unconditioned guidance", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), + 's_min_uncond': OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), From fb68d93b6a579a424919b22682cf067ce9a8e13f Mon Sep 17 00:00:00 2001 From: ParityError <36368048+ParityError@users.noreply.github.com> Date: Tue, 28 Mar 2023 18:27:44 -0700 Subject: [PATCH 010/113] Update webui-user.sh --- webui-user.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui-user.sh b/webui-user.sh index 74e8800c..bfa53cb7 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -4,7 +4,7 @@ ######################################################### # Install directory without trailing slash -#install_dir="$(pwd)" +#install_dir="/home/$(whoami)" # Name of the subdirectory #clone_dir="stable-diffusion-webui" From f867d7b429d23f8fb82a979c22ba6c90955e0842 Mon Sep 17 00:00:00 2001 From: ParityError <36368048+ParityError@users.noreply.github.com> Date: Tue, 28 Mar 2023 18:34:02 -0700 Subject: [PATCH 011/113] Update README.md Updated to reflect change in webui.sh, so that the installation directory is not absolute (/home/user). --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b67e2296..c2a3578f 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,7 @@ sudo dnf install wget git python3 # Arch-based: sudo pacman -S wget git python3 ``` -2. To install in `/home/$(whoami)/stable-diffusion-webui/`, run: +2. Navigate to the directory you would like the webui to be installed and execute the following command: ```bash bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) ``` @@ -158,4 +158,4 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - Security advice - RyotaK - UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. -- (You) \ No newline at end of file +- (You) From 70a0a11783747d2e77a08a63d9feeceb7d2d4f63 Mon Sep 17 00:00:00 2001 From: Vespinian Date: Tue, 28 Mar 2023 23:52:51 -0400 Subject: [PATCH 012/113] Changed behavior that puts the args from alwayson_script request in the script_args, so don't accidently resize the arg list if we get less arg then or default list has --- modules/api/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 518b2a61..8c5cd185 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -272,7 +272,9 @@ class Api: raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params") # always on script with no arg should always run so you don't really need to add them to the requests if "args" in request.alwayson_scripts[alwayson_script_name]: - script_args[alwayson_script.args_from:alwayson_script.args_to] = request.alwayson_scripts[alwayson_script_name]["args"] + # min between arg length in scriptrunner and arg length in the request + for idx in range(0, min((alwayson_script.args_to - alwayson_script.args_from), len(request.alwayson_scripts[alwayson_script_name]["args"]))): + script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx] return script_args def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): From 79d57d02f15aa406e38997d65ec8ed99374691b7 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 01:52:34 -0500 Subject: [PATCH 013/113] Improve custom code extension - Uses `gr.Code` component - Includes example - Can return out of body --- scripts/custom_code.py | 63 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/scripts/custom_code.py b/scripts/custom_code.py index d29113e6..4071d86d 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -1,9 +1,40 @@ import modules.scripts as scripts import gradio as gr +import ast +import copy from modules.processing import Processed from modules.shared import opts, cmd_opts, state + +def convertExpr2Expression(expr): + expr.lineno = 0 + expr.col_offset = 0 + result = ast.Expression(expr.value, lineno=0, col_offset = 0) + + return result + + +def exec_with_return(code, module): + """ + like exec() but can return values + https://stackoverflow.com/a/52361938/5862977 + """ + code_ast = ast.parse(code) + + init_ast = copy.deepcopy(code_ast) + init_ast.body = code_ast.body[:-1] + + last_ast = copy.deepcopy(code_ast) + last_ast.body = code_ast.body[-1:] + + exec(compile(init_ast, "", "exec"), module.__dict__) + if type(last_ast.body[0]) == ast.Expr: + return eval(compile(convertExpr2Expression(last_ast.body[0]), "", "eval"), module.__dict__) + else: + exec(compile(last_ast, "", "exec"), module.__dict__) + + class Script(scripts.Script): def title(self): @@ -13,12 +44,23 @@ class Script(scripts.Script): return cmd_opts.allow_code def ui(self, is_img2img): - code = gr.Textbox(label="Python code", lines=1, elem_id=self.elem_id("code")) + example = """from modules.processing import process_images - return [code] +p.width = 768 +p.height = 768 +p.batch_size = 2 +p.steps = 10 + +return process_images(p) +""" - def run(self, p, code): + code = gr.Code(value=example, language="python", label="Python code", elem_id=self.elem_id("code")) + indent_level = gr.Number(label='Indent level', value=2, precision=0, elem_id=self.elem_id("indent_level")) + + return [code, indent_level] + + def run(self, p, code, indent_level): assert cmd_opts.allow_code, '--allow-code option must be enabled' display_result_data = [[], -1, ""] @@ -29,13 +71,20 @@ class Script(scripts.Script): display_result_data[2] = i from types import ModuleType - compiled = compile(code, '', 'exec') module = ModuleType("testmodule") module.__dict__.update(globals()) module.p = p module.display = display - exec(compiled, module.__dict__) + + indent = " " * indent_level + indented = code.replace('\n', '\n' + indent) + body = f"""def __webuitemp__(): +{indent}{indented} +__webuitemp__()""" + + result = exec_with_return(body, module) + + if isinstance(result, Processed): + return result return Processed(p, *display_result_data) - - \ No newline at end of file From 3c7b928914eb2fe1b2ab456faa390b846de12b64 Mon Sep 17 00:00:00 2001 From: Thierry Date: Wed, 29 Mar 2023 16:52:45 -0400 Subject: [PATCH 014/113] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b67e2296..abf40a32 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ Alternatively, use online services (like Google Colab): - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) ### Automatic Installation on Windows -1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH". +1. Install [Python 3.10.9](https://www.python.org/downloads/release/python-3109/) (Newer version of Python does not support torch), checking "Add Python to PATH". 2. Install [git](https://git-scm.com/download/win). 3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. 4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. From baef594e4a2c49103d472b48544b6848dfcaf2a6 Mon Sep 17 00:00:00 2001 From: Thierry Date: Wed, 29 Mar 2023 16:58:56 -0400 Subject: [PATCH 015/113] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index abf40a32..671ee2aa 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ Alternatively, use online services (like Google Colab): - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) ### Automatic Installation on Windows -1. Install [Python 3.10.9](https://www.python.org/downloads/release/python-3109/) (Newer version of Python does not support torch), checking "Add Python to PATH". +1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH". 2. Install [git](https://git-scm.com/download/win). 3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. 4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. From 384bfe22cd86377aca87f82d2746125326a2fe1f Mon Sep 17 00:00:00 2001 From: Thierry Date: Wed, 29 Mar 2023 17:00:20 -0400 Subject: [PATCH 016/113] Update launch.py --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 68e08114..b76f1798 100644 --- a/launch.py +++ b/launch.py @@ -49,7 +49,7 @@ or any other error regarding unsuccessful package (library) installation, please downgrade (or upgrade) to the latest version of 3.10 Python and delete current Python and "venv" folder in WebUI's directory. -You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3109/ +You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/ {"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""} From ad5afcaae0b47e9e68b49aacf04cc3ad59d41a8e Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 16:46:03 -0500 Subject: [PATCH 017/113] Save/restore working webui/extension configs --- javascript/extensions.js | 16 ++++ modules/extensions.py | 23 +++-- modules/paths_internal.py | 1 + modules/shared.py | 1 + modules/ui_extensions.py | 186 +++++++++++++++++++++++++++++++++++++- webui.py | 25 ++++- 6 files changed, 243 insertions(+), 9 deletions(-) diff --git a/javascript/extensions.js b/javascript/extensions.js index 72924a28..c2786499 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -47,3 +47,19 @@ function install_extension_from_index(button, url){ gradioApp().querySelector('#install_extension_button').click() } + +function config_state_confirm_restore(_, config_state_name, config_restore_type) { + if (config_state_name == "Current") { + return [false, config_state_name]; + } + let restored = ""; + if (config_restore_type == "extensions") { + restored = "all saved extension versions"; + } else if (config_restore_type == "webui") { + restored = "the webui version"; + } else { + restored = "the webui version and all saved extension versions"; + } + let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + ".\n(A backup of the current state will be made.)"); + return [confirmed, config_state_name, config_restore_type]; +} diff --git a/modules/extensions.py b/modules/extensions.py index 3a7a0372..a87beaa3 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,10 +3,11 @@ import sys import traceback import time +from datetime import datetime import git from modules import shared -from modules.paths_internal import extensions_dir, extensions_builtin_dir +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path extensions = [] @@ -31,12 +32,15 @@ class Extension: self.status = '' self.can_update = False self.is_builtin = is_builtin + self.commit_hash = '' + self.commit_date = None self.version = '' + self.branch = None self.remote = None self.have_info_from_repo = False def read_info_from_repo(self): - if self.have_info_from_repo: + if self.is_builtin or self.have_info_from_repo: return self.have_info_from_repo = True @@ -56,10 +60,15 @@ class Extension: self.status = 'unknown' self.remote = next(repo.remote().urls, None) head = repo.head.commit - ts = time.asctime(time.gmtime(repo.head.commit.committed_date)) - self.version = f'{head.hexsha[:8]} ({ts})' + self.commit_date = repo.head.commit.committed_date + ts = time.asctime(time.gmtime(self.commit_date)) + if repo.active_branch: + self.branch = repo.active_branch.name + self.commit_hash = head.hexsha + self.version = f'{self.commit_hash[:8]} ({ts})' - except Exception: + except Exception as ex: + print(f"Failed reading extension data from Git repository ({self.name}): {ex}", file=sys.stderr) self.remote = None def list_files(self, subdir, extension): @@ -88,12 +97,12 @@ class Extension: self.can_update = False self.status = "latest" - def fetch_and_reset_hard(self): + def fetch_and_reset_hard(self, commit='origin'): repo = git.Repo(self.path) # Fix: `error: Your local changes to the following files would be overwritten by merge`, # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. repo.git.fetch(all=True) - repo.git.reset('origin', hard=True) + repo.git.reset(commit, hard=True) def list_extensions(): diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 926ec3bb..6765bafe 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -20,3 +20,4 @@ data_path = cmd_opts_pre.data_dir models_path = os.path.join(data_path, "models") extensions_dir = os.path.join(data_path, "extensions") extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") +config_states_dir = os.path.join(script_path, "config_states") diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..ffc5e4fe 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -424,6 +424,7 @@ options_templates.update(options_section(('postprocessing', "Postprocessing"), { options_templates.update(options_section((None, "Hidden options"), { "disabled_extensions": OptionInfo([], "Disable these extensions"), "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), + "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), })) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index efd6cda2..ed677b3e 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -2,6 +2,7 @@ import json import os.path import sys import time +from datetime import datetime import traceback import git @@ -11,7 +12,8 @@ import html import shutil import errno -from modules import extensions, shared, paths +from modules import extensions, shared, paths, config_states +from modules.paths_internal import config_states_dir from modules.call_queue import wrap_gradio_gpu_call available_extensions = {"extensions": []} @@ -30,6 +32,9 @@ def apply_and_restart(disable_list, update_list, disable_all): update = json.loads(update_list) assert type(update) == list, f"wrong update_list data for apply_and_restart: {update_list}" + if update: + save_config_state("Backup (pre-update)") + update = set(update) for ext in extensions.extensions: @@ -50,6 +55,48 @@ def apply_and_restart(disable_list, update_list, disable_all): shared.state.need_restart = True +def save_config_state(name): + current_config_state = config_states.get_config() + if not name: + name = "Config" + current_config_state["name"] = name + filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + ".json") + print(f"Saving backup of webui/extension state to {filename}.") + with open(filename, "w", encoding="utf-8") as f: + json.dump(current_config_state, f) + config_states.list_config_states() + new_value = next(iter(config_states.all_config_states.keys()), "Current") + new_choices = ["Current"] + list(config_states.all_config_states.keys()) + return gr.Dropdown.update(value=new_value, choices=new_choices), f"Saved current webui/extension state to '{filename}'" + + +def restore_config_state(confirmed, config_state_name, restore_type): + if config_state_name == "Current": + return "Select a config to restore from." + if not confirmed: + return "Cancelled." + + check_access() + + save_config_state("Backup (pre-restore)") + + config_state = config_states.all_config_states[config_state_name] + + print(f"Restoring webui state from backup: {restore_type}") + + if restore_type == "extensions" or restore_type == "both": + shared.opts.restore_config_state_file = config_state["filename"] + shared.opts.save(shared.config_filename) + + if restore_type == "webui" or restore_type == "both": + config_states.restore_webui_config(config_state) + + shared.state.interrupt() + shared.state.need_restart = True + + return "" + + def check_updates(id_task, disable_list): check_access() @@ -121,6 +168,117 @@ def extension_table(): return code +def update_config_states_table(state_name): + if state_name == "Current": + config_state = config_states.get_config() + else: + config_state = config_states.all_config_states[state_name] + + config_name = config_state.get("name", "Config") + created_date = time.asctime(time.gmtime(config_state["created_at"])) + + code = f"""""" + + webui_remote = config_state["webui"]["remote"] or "" + webui_branch = config_state["webui"]["branch"] + webui_commit_hash = config_state["webui"]["commit_hash"] + if webui_commit_hash: + webui_commit_hash = webui_commit_hash[:8] + else: + webui_commit_hash = "" + webui_commit_date = config_state["webui"]["commit_date"] + if webui_commit_date: + webui_commit_date = time.asctime(time.gmtime(webui_commit_date)) + else: + webui_commit_date = "" + + code += f"""

Config Backup: {config_name}

+ Created at: {created_date}""" + + code += f"""

WebUI State

+ + + + + + + + + + + + + + + + + +
URLBranchCommitDate
{webui_remote}{webui_branch}{webui_commit_hash}{webui_commit_date}
+ """ + + code += """

Extension State

+ + + + + + + + + + + + """ + + ext_map = {ext.name: ext for ext in extensions.extensions} + + for ext_name, ext_conf in config_state["extensions"].items(): + ext_remote = ext_conf["remote"] or "" + ext_branch = ext_conf["branch"] or "" + ext_enabled = ext_conf["enabled"] + ext_commit_hash = ext_conf["commit_hash"] or "" + ext_commit_date = ext_conf["commit_date"] + if ext_commit_date: + ext_commit_date = time.asctime(time.gmtime(ext_commit_date)) + else: + ext_commit_date = "" + + remote = f"""{html.escape(ext_remote or '')}""" + + style_enabled = "" + style_remote = "" + style_branch = "" + style_commit = "" + if ext_name in ext_map: + current_ext = ext_map[ext_name] + current_ext.read_info_from_repo() + if current_ext.enabled != ext_enabled: + style_enabled = ' style="color: var(--primary-400)"' + if current_ext.remote != ext_remote: + style_remote = ' style="color: var(--primary-400)"' + if current_ext.branch != ext_branch: + style_branch = ' style="color: var(--primary-400)"' + if current_ext.commit_hash != ext_commit_hash: + style_commit = ' style="color: var(--primary-400)"' + + code += f""" + + + + + + + + """ + + code += """ + +
ExtensionURLBranchCommitDate
{html.escape(ext_name)}{remote}{ext_branch}{ext_commit_hash[:8]}{ext_commit_date}
+ """ + + return code + + def normalize_git_url(url): if url is None: return "" @@ -292,6 +450,8 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" def create_ui(): import modules.ui + config_states.list_config_states() + with gr.Blocks(analytics_enabled=False) as ui: with gr.Tabs(elem_id="tabs_extensions") as tabs: with gr.TabItem("Installed"): @@ -386,4 +546,28 @@ def create_ui(): outputs=[extensions_table, install_result], ) + with gr.TabItem("Backup/Restore"): + with gr.Row(elem_id="extensions_backup_top_row"): + config_states_list = gr.Dropdown(label="Saved Configs", elem_id="extension_backup_saved_configs", value="Current", choices=["Current"] + list(config_states.all_config_states.keys())) + modules.ui.create_refresh_button(config_states_list, config_states.list_config_states, lambda: {"choices": ["Current"] + list(config_states.all_config_states.keys())}, "refresh_config_states") + config_restore_type = gr.Radio(label="State to restore", choices=["extensions", "webui", "both"], value="extensions", elem_id="extension_backup_restore_type") + config_restore_button = gr.Button(value="Restore Selected Config", variant="primary", elem_id="extension_backup_restore") + with gr.Row(elem_id="extensions_backup_top_row2"): + config_save_name = gr.Textbox("", placeholder="Config Name", show_label=False) + config_save_button = gr.Button(value="Save Current Config") + + config_states_info = gr.HTML("") + config_states_table = gr.HTML(lambda: update_config_states_table("Current")) + + config_save_button.click(fn=save_config_state, inputs=[config_save_name], outputs=[config_states_list, config_states_info]) + + dummy_component = gr.Label(visible=False) + config_restore_button.click(fn=restore_config_state, _js="config_state_confirm_restore", inputs=[dummy_component, config_states_list, config_restore_type], outputs=[config_states_info]) + + config_states_list.change( + fn=update_config_states_table, + inputs=[config_states_list], + outputs=[config_states_table], + ) + return ui diff --git a/webui.py b/webui.py index b570895f..b8f9a2c1 100644 --- a/webui.py +++ b/webui.py @@ -5,6 +5,7 @@ import importlib import signal import re import warnings +import json from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware @@ -37,7 +38,7 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks +from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.codeformer_model as codeformer import modules.face_restoration import modules.gfpgan_model as gfpgan @@ -105,6 +106,17 @@ def initialize(): localization.list_localizations(cmd_opts.localizations_dir) startup_timer.record("list extensions") + config_state_file = shared.opts.restore_config_state_file + shared.opts.restore_config_state_file = "" + shared.opts.save(shared.config_filename) + + if os.path.isfile(config_state_file): + print(f"*** About to restore extension state from file: {config_state_file}") + with open(config_state_file, "r", encoding="utf-8") as f: + config_state = json.load(f) + config_states.restore_extension_state(config_state) + startup_timer.record("restore extension config") + if cmd_opts.ui_debug_mode: shared.sd_upscalers = upscaler.UpscalerLanczos().scalers modules.scripts.load_scripts() @@ -301,6 +313,17 @@ def webui(): extensions.list_extensions() startup_timer.record("list extensions") + config_state_file = shared.opts.restore_config_state_file + shared.opts.restore_config_state_file = "" + shared.opts.save(shared.config_filename) + + if os.path.isfile(config_state_file): + print(f"*** About to restore extension state from file: {config_state_file}") + with open(config_state_file, "r", encoding="utf-8") as f: + config_state = json.load(f) + config_states.restore_extension_state(config_state) + startup_timer.record("restore extension config") + localization.list_localizations(cmd_opts.localizations_dir) modelloader.forbid_loaded_nonbuiltin_upscalers() From f22d0dde4e57444b2d4fe997338550bb82bb249e Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 18:32:29 -0500 Subject: [PATCH 018/113] Better checking of extension state from Git info --- modules/extensions.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/modules/extensions.py b/modules/extensions.py index a87beaa3..34d9d654 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -91,9 +91,20 @@ class Extension: for fetch in repo.remote().fetch(dry_run=True): if fetch.flags != fetch.HEAD_UPTODATE: self.can_update = True - self.status = "behind" + self.status = "new commits" return + try: + origin = repo.rev_parse('origin') + if repo.head.commit != origin: + self.can_update = True + self.status = "behind HEAD" + return + except Exception: + self.can_update = False + self.status = "unknown (remote error)" + return + self.can_update = False self.status = "latest" @@ -103,6 +114,7 @@ class Extension: # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. repo.git.fetch(all=True) repo.git.reset(commit, hard=True) + self.have_info_from_repo = False def list_extensions(): From f3320b802c12f29e5a3201fcc0abfe72be294293 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 18:32:54 -0500 Subject: [PATCH 019/113] Various UI fixes in config state tab --- .gitignore | 1 + javascript/extensions.js | 10 +- modules/config_states.py | 200 +++++++++++++++++++++++++++++++++++++++ modules/ui_extensions.py | 45 +++++---- webui.py | 8 +- 5 files changed, 241 insertions(+), 23 deletions(-) create mode 100644 modules/config_states.py diff --git a/.gitignore b/.gitignore index 0b1d17ca..7c89b673 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,4 @@ notification.mp3 /test/stdout.txt /test/stderr.txt /cache.json +/config_states/ diff --git a/javascript/extensions.js b/javascript/extensions.js index c2786499..3c2f995a 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -50,7 +50,7 @@ function install_extension_from_index(button, url){ function config_state_confirm_restore(_, config_state_name, config_restore_type) { if (config_state_name == "Current") { - return [false, config_state_name]; + return [false, config_state_name, config_restore_type]; } let restored = ""; if (config_restore_type == "extensions") { @@ -60,6 +60,12 @@ function config_state_confirm_restore(_, config_state_name, config_restore_type) } else { restored = "the webui version and all saved extension versions"; } - let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + ".\n(A backup of the current state will be made.)"); + let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + "."); + if (confirmed) { + restart_reload(); + gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){ + x.innerHTML = "Loading..." + }) + } return [confirmed, config_state_name, config_restore_type]; } diff --git a/modules/config_states.py b/modules/config_states.py new file mode 100644 index 00000000..2ea00929 --- /dev/null +++ b/modules/config_states.py @@ -0,0 +1,200 @@ +""" +Supports saving and restoring webui and extensions from a known working set of commits +""" + +import os +import sys +import traceback +import json +import time +import tqdm + +from datetime import datetime +from collections import OrderedDict +import git + +from modules import shared, extensions +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir + + +all_config_states = OrderedDict() + + +def list_config_states(): + global all_config_states + + all_config_states.clear() + os.makedirs(config_states_dir, exist_ok=True) + + config_states = [] + for filename in os.listdir(config_states_dir): + if filename.endswith(".json"): + path = os.path.join(config_states_dir, filename) + with open(path, "r", encoding="utf-8") as f: + j = json.load(f) + j["filepath"] = path + config_states.append(j) + + config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)) + + for cs in config_states: + timestamp = time.asctime(time.gmtime(cs["created_at"])) + name = cs.get("name", "Config") + full_name = f"{name}: {timestamp}" + all_config_states[full_name] = cs + + return all_config_states + + +def get_webui_config(): + webui_repo = None + + try: + if os.path.exists(os.path.join(script_path, ".git")): + webui_repo = git.Repo(script_path) + except Exception: + print(f"Error reading webui git info from {script_path}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + webui_remote = None + webui_commit_hash = None + webui_commit_date = None + webui_branch = None + if webui_repo and not webui_repo.bare: + try: + webui_remote = next(webui_repo.remote().urls, None) + head = webui_repo.head.commit + webui_commit_date = webui_repo.head.commit.committed_date + webui_commit_hash = head.hexsha + webui_branch = webui_repo.active_branch.name + + except Exception: + webui_remote = None + + return { + "remote": webui_remote, + "commit_hash": webui_commit_hash, + "commit_date": webui_commit_date, + "branch": webui_branch, + } + + +def get_extension_config(): + ext_config = {} + + for ext in extensions.extensions: + entry = { + "name": ext.name, + "path": ext.path, + "enabled": ext.enabled, + "is_builtin": ext.is_builtin, + "remote": ext.remote, + "commit_hash": ext.commit_hash, + "commit_date": ext.commit_date, + "branch": ext.branch, + "have_info_from_repo": ext.have_info_from_repo + } + + ext_config[ext.name] = entry + + return ext_config + + +def get_config(): + creation_time = datetime.now().timestamp() + webui_config = get_webui_config() + ext_config = get_extension_config() + + return { + "created_at": creation_time, + "webui": webui_config, + "extensions": ext_config + } + + +def restore_webui_config(config): + print("* Restoring webui state...") + + if "webui" not in config: + print("Error: No webui data saved to config") + return + + webui_config = config["webui"] + + if "commit_hash" not in webui_config: + print("Error: No commit saved to webui config") + return + + webui_commit_hash = webui_config.get("commit_hash", None) + webui_repo = None + + try: + if os.path.exists(os.path.join(script_path, ".git")): + webui_repo = git.Repo(script_path) + except Exception: + print(f"Error reading webui git info from {script_path}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + return + + try: + webui_repo.git.fetch(all=True) + webui_repo.git.reset(webui_commit_hash, hard=True) + print(f"* Restored webui to commit {webui_commit_hash}.") + except Exception: + print(f"Error restoring webui to commit {webui_commit_hash}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + +def restore_extension_config(config): + print("* Restoring extension state...") + + if "extensions" not in config: + print("Error: No extension data saved to config") + return + + ext_config = config["extensions"] + + results = [] + disabled = [] + + for ext in tqdm.tqdm(extensions.extensions): + if ext.is_builtin: + continue + + ext.read_info_from_repo() + current_commit = ext.commit_hash + + if ext.name not in ext_config: + ext.disabled = True + disabled.append(ext.name) + results.append((ext, current_commit[:8], False, "Saved extension state not found in config, marking as disabled")) + continue + + entry = ext_config[ext.name] + + if "commit_hash" in entry and entry["commit_hash"]: + try: + ext.fetch_and_reset_hard(entry["commit_hash"]) + ext.read_info_from_repo() + if current_commit != entry["commit_hash"]: + results.append((ext, current_commit[:8], True, entry["commit_hash"][:8])) + except Exception as ex: + results.append((ext, current_commit[:8], False, ex)) + else: + results.append((ext, current_commit[:8], False, "No commit hash found in config")) + + if not entry.get("enabled", False): + ext.disabled = True + disabled.append(ext.name) + else: + ext.disabled = False + + shared.opts.disabled_extensions = disabled + shared.opts.save(shared.config_filename) + + print("* Finished restoring extensions. Results:") + for ext, prev_commit, success, result in results: + if success: + print(f" + {ext.name}: {prev_commit} -> {result}") + else: + print(f" ! {ext.name}: FAILURE ({result})") diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index ed677b3e..b94b3a3a 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -17,6 +17,7 @@ from modules.paths_internal import config_states_dir from modules.call_queue import wrap_gradio_gpu_call available_extensions = {"extensions": []} +STYLE_PRIMARY = ' style="color: var(--primary-400)"' def check_access(): @@ -67,7 +68,7 @@ def save_config_state(name): config_states.list_config_states() new_value = next(iter(config_states.all_config_states.keys()), "Current") new_choices = ["Current"] + list(config_states.all_config_states.keys()) - return gr.Dropdown.update(value=new_value, choices=new_choices), f"Saved current webui/extension state to '{filename}'" + return gr.Dropdown.update(value=new_value, choices=new_choices), f"Saved current webui/extension state to \"{filename}\"" def restore_config_state(confirmed, config_state_name, restore_type): @@ -78,14 +79,12 @@ def restore_config_state(confirmed, config_state_name, restore_type): check_access() - save_config_state("Backup (pre-restore)") - config_state = config_states.all_config_states[config_state_name] - print(f"Restoring webui state from backup: {restore_type}") + print(f"*** Restoring webui state from backup: {restore_type} ***") if restore_type == "extensions" or restore_type == "both": - shared.opts.restore_config_state_file = config_state["filename"] + shared.opts.restore_config_state_file = config_state["filepath"] shared.opts.save(shared.config_filename) if restore_type == "webui" or restore_type == "both": @@ -149,7 +148,7 @@ def extension_table(): style = "" if shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.opts.disable_all_extensions == "all": - style = ' style="color: var(--primary-400)"' + style = STYLE_PRIMARY code += f""" @@ -181,17 +180,25 @@ def update_config_states_table(state_name): webui_remote = config_state["webui"]["remote"] or "" webui_branch = config_state["webui"]["branch"] - webui_commit_hash = config_state["webui"]["commit_hash"] - if webui_commit_hash: - webui_commit_hash = webui_commit_hash[:8] - else: - webui_commit_hash = "" + webui_commit_hash = config_state["webui"]["commit_hash"] or "" webui_commit_date = config_state["webui"]["commit_date"] if webui_commit_date: webui_commit_date = time.asctime(time.gmtime(webui_commit_date)) else: webui_commit_date = "" + current_webui = config_states.get_webui_config() + + style_remote = "" + style_branch = "" + style_commit = "" + if current_webui["remote"] != webui_remote: + style_remote = STYLE_PRIMARY + if current_webui["branch"] != webui_branch: + style_branch = STYLE_PRIMARY + if current_webui["commit_hash"] != webui_commit_hash: + style_commit = STYLE_PRIMARY + code += f"""

Config Backup: {config_name}

Created at: {created_date}""" @@ -207,10 +214,10 @@ def update_config_states_table(state_name): - {webui_remote} - {webui_branch} - {webui_commit_hash} - {webui_commit_date} + {webui_remote} + {webui_branch} + {webui_commit_hash[:8]} + {webui_commit_date} @@ -253,13 +260,13 @@ def update_config_states_table(state_name): current_ext = ext_map[ext_name] current_ext.read_info_from_repo() if current_ext.enabled != ext_enabled: - style_enabled = ' style="color: var(--primary-400)"' + style_enabled = STYLE_PRIMARY if current_ext.remote != ext_remote: - style_remote = ' style="color: var(--primary-400)"' + style_remote = STYLE_PRIMARY if current_ext.branch != ext_branch: - style_branch = ' style="color: var(--primary-400)"' + style_branch = STYLE_PRIMARY if current_ext.commit_hash != ext_commit_hash: - style_commit = ' style="color: var(--primary-400)"' + style_commit = STYLE_PRIMARY code += f""" diff --git a/webui.py b/webui.py index b8f9a2c1..5ce45056 100644 --- a/webui.py +++ b/webui.py @@ -114,8 +114,10 @@ def initialize(): print(f"*** About to restore extension state from file: {config_state_file}") with open(config_state_file, "r", encoding="utf-8") as f: config_state = json.load(f) - config_states.restore_extension_state(config_state) + config_states.restore_extension_config(config_state) startup_timer.record("restore extension config") + else: + print(f"!!! Config state backup not found: {config_state_file}") if cmd_opts.ui_debug_mode: shared.sd_upscalers = upscaler.UpscalerLanczos().scalers @@ -321,8 +323,10 @@ def webui(): print(f"*** About to restore extension state from file: {config_state_file}") with open(config_state_file, "r", encoding="utf-8") as f: config_state = json.load(f) - config_states.restore_extension_state(config_state) + config_states.restore_extension_config(config_state) startup_timer.record("restore extension config") + else: + print(f"!!! Config state backup not found: {config_state_file}") localization.list_localizations(cmd_opts.localizations_dir) From 9b1fa8298127b05c71c4de04bd6f64b72540ef5a Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 18:55:57 -0500 Subject: [PATCH 020/113] Add filename to UI and config name to filename --- modules/ui_extensions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index b94b3a3a..b1ef38e2 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -61,7 +61,7 @@ def save_config_state(name): if not name: name = "Config" current_config_state["name"] = name - filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + ".json") + filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + "_" + name + ".json") print(f"Saving backup of webui/extension state to {filename}.") with open(filename, "w", encoding="utf-8") as f: json.dump(current_config_state, f) @@ -175,6 +175,7 @@ def update_config_states_table(state_name): config_name = config_state.get("name", "Config") created_date = time.asctime(time.gmtime(config_state["created_at"])) + filepath = config_state.get("filepath", "") code = f"""""" @@ -200,6 +201,7 @@ def update_config_states_table(state_name): style_commit = STYLE_PRIMARY code += f"""

Config Backup: {config_name}

+ Filepath: {filepath} Created at: {created_date}""" code += f"""

WebUI State

From 64bbd3bf030aac9068df6bbe79c852e97cf6dbde Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:00:51 -0500 Subject: [PATCH 021/113] Make into divs --- modules/ui_extensions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index b1ef38e2..3735965c 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -201,8 +201,8 @@ def update_config_states_table(state_name): style_commit = STYLE_PRIMARY code += f"""

Config Backup: {config_name}

- Filepath: {filepath} - Created at: {created_date}""" +
Filepath: {filepath}
+
Created at: {created_date}
""" code += f"""

WebUI State

From 1c0544abdbffb910837d285857515b932a073f89 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:21:57 -0500 Subject: [PATCH 022/113] Add links for commits in table, if remote is from GitHub --- modules/ui_extensions.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 3735965c..0843a7c0 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -122,6 +122,16 @@ def check_updates(id_task, disable_list): return extension_table(), "" +def make_commit_link(commit_hash, remote, text=None): + if text is None: + text = commit_hash[:8] + if remote.startswith("https://github.com/"): + href = os.path.join(remote, "commit", commit_hash) + return f'{text}' + else: + return text + + def extension_table(): code = f"""
@@ -150,11 +160,15 @@ def extension_table(): if shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.opts.disable_all_extensions == "all": style = STYLE_PRIMARY + version_link = ext.version + if ext.commit_hash and ext.remote: + version_link = make_commit_link(ext.commit_hash, ext.remote, ext.version) + code += f""" - + {ext_status} """ @@ -200,6 +214,9 @@ def update_config_states_table(state_name): if current_webui["commit_hash"] != webui_commit_hash: style_commit = STYLE_PRIMARY + commit_link = make_commit_link(webui_commit_hash, webui_remote) + date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date) + code += f"""

Config Backup: {config_name}

Filepath: {filepath}
Created at: {created_date}
""" @@ -218,8 +235,8 @@ def update_config_states_table(state_name): - - + +
{html.escape(ext.name)} {remote}{ext.version}{version_link}
{webui_remote} {webui_branch}{webui_commit_hash[:8]}{webui_commit_date}{commit_link}{date_link}
@@ -270,13 +287,16 @@ def update_config_states_table(state_name): if current_ext.commit_hash != ext_commit_hash: style_commit = STYLE_PRIMARY + commit_link = make_commit_link(ext_commit_hash, ext_remote) + date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date) + code += f""" {html.escape(ext_name)} {remote} {ext_branch} - {ext_commit_hash[:8]} - {ext_commit_date} + {commit_link} + {date_link} """ From 563d048780790fb9e7e9a5fc54a86bdf1ee65d58 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:22:45 -0500 Subject: [PATCH 023/113] Squelch warning if no config restore --- webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index 5ce45056..ee5e4f11 100644 --- a/webui.py +++ b/webui.py @@ -116,7 +116,7 @@ def initialize(): config_state = json.load(f) config_states.restore_extension_config(config_state) startup_timer.record("restore extension config") - else: + elif config_state_file: print(f"!!! Config state backup not found: {config_state_file}") if cmd_opts.ui_debug_mode: @@ -325,7 +325,7 @@ def webui(): config_state = json.load(f) config_states.restore_extension_config(config_state) startup_timer.record("restore extension config") - else: + elif config_state_file: print(f"!!! Config state backup not found: {config_state_file}") localization.list_localizations(cmd_opts.localizations_dir) From 3ccf6f5ae8e57233200f9cac52da8d5d88ecee93 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:26:52 -0500 Subject: [PATCH 024/113] Add webui link --- modules/ui_extensions.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 0843a7c0..af9e92cc 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -202,6 +202,10 @@ def update_config_states_table(state_name): else: webui_commit_date = "" + remote = f"""{html.escape(webui_remote or '')}""" + commit_link = make_commit_link(webui_commit_hash, webui_remote) + date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date) + current_webui = config_states.get_webui_config() style_remote = "" @@ -214,9 +218,6 @@ def update_config_states_table(state_name): if current_webui["commit_hash"] != webui_commit_hash: style_commit = STYLE_PRIMARY - commit_link = make_commit_link(webui_commit_hash, webui_remote) - date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date) - code += f"""

Config Backup: {config_name}

Filepath: {filepath}
Created at: {created_date}
""" @@ -233,7 +234,7 @@ def update_config_states_table(state_name): - {webui_remote} + {remote} {webui_branch} {commit_link} {date_link} @@ -270,6 +271,8 @@ def update_config_states_table(state_name): ext_commit_date = "" remote = f"""{html.escape(ext_remote or '')}""" + commit_link = make_commit_link(ext_commit_hash, ext_remote) + date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date) style_enabled = "" style_remote = "" @@ -287,9 +290,6 @@ def update_config_states_table(state_name): if current_ext.commit_hash != ext_commit_hash: style_commit = STYLE_PRIMARY - commit_link = make_commit_link(ext_commit_hash, ext_remote) - date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date) - code += f""" {html.escape(ext_name)} From 44e8e9c36807d4a71c2fc84129ebcf5ba4f77f21 Mon Sep 17 00:00:00 2001 From: devdn Date: Thu, 30 Mar 2023 00:54:28 -0400 Subject: [PATCH 025/113] fix live preview & alternate uncond guidance for better quality --- modules/sd_samplers_kdiffusion.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 6a54ce32..17d24df4 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -116,11 +116,13 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond - sigma_thresh = s_min_uncond - if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): - uncond = torch.zeros([0,0,uncond.shape[2]]) - x_in=x_in[:x_in.shape[0]//2] - sigma_in=sigma_in[:sigma_in.shape[0]//2] + if self.step % 2 and s_min_uncond > 0 and not is_edit_model: + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + sigma_threshold = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_threshold*sigma_threshold) ): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: @@ -159,7 +161,7 @@ class CFGDenoiser(torch.nn.Module): devices.test_for_nans(x_out, "unet") if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(x_out[0:uncond.shape[0]]) + sd_samplers_common.store_latent(x_out[0:x_out.shape[0]-uncond.shape[0]]) elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) From d5063e07e8b4737621978feffd37b18077b9ea64 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Thu, 30 Mar 2023 10:57:54 -0400 Subject: [PATCH 026/113] update torch --- .gitignore | 2 +- environment-wsl2.yaml | 11 ++++++----- launch.py | 6 +++--- requirements.txt | 1 + requirements_versions.txt | 4 ++-- webui-macos-env.sh | 2 +- webui.py | 2 ++ 7 files changed, 16 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index 0b1d17ca..3b48ba9a 100644 --- a/.gitignore +++ b/.gitignore @@ -32,4 +32,4 @@ notification.mp3 /extensions /test/stdout.txt /test/stderr.txt -/cache.json +/cache.json* diff --git a/environment-wsl2.yaml b/environment-wsl2.yaml index f8872750..06134565 100644 --- a/environment-wsl2.yaml +++ b/environment-wsl2.yaml @@ -4,8 +4,9 @@ channels: - defaults dependencies: - python=3.10 - - pip=22.2.2 - - cudatoolkit=11.3 - - pytorch=1.12.1 - - torchvision=0.13.1 - - numpy=1.23.1 \ No newline at end of file + - pip=23.0 + - cudatoolkit=11.8 + - pytorch=2.0 + - torchvision=0.15 + - numpy=1.23 + \ No newline at end of file diff --git a/launch.py b/launch.py index 68e08114..37c8b516 100644 --- a/launch.py +++ b/launch.py @@ -225,10 +225,10 @@ def run_extensions_installers(settings_file): def prepare_environment(): global skip_install - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117") + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.16rc425') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") @@ -296,7 +296,7 @@ def prepare_environment(): if not os.path.isfile(requirements_file): requirements_file = os.path.join(script_path, requirements_file) - run_pip(f"install -r \"{requirements_file}\"", "requirements for Web UI") + run_pip(f"install -r \"{requirements_file}\"", "requirements") run_extensions_installers(settings_file=args.ui_settings_file) diff --git a/requirements.txt b/requirements.txt index c72b2927..36cdae6c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +astunparse blendmodes accelerate basicsr diff --git a/requirements_versions.txt b/requirements_versions.txt index df65431a..6487f1c3 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -1,10 +1,10 @@ blendmodes==2022 transformers==4.25.1 -accelerate==0.12.0 +accelerate==0.18.0 basicsr==1.4.2 gfpgan==1.3.8 gradio==3.23 -numpy==1.23.3 +numpy==1.23.5 Pillow==9.4.0 realesrgan==0.3.0 torch diff --git a/webui-macos-env.sh b/webui-macos-env.sh index 37cac4fb..65d80413 100644 --- a/webui-macos-env.sh +++ b/webui-macos-env.sh @@ -11,7 +11,7 @@ fi export install_dir="$HOME" export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" -export TORCH_COMMAND="pip install torch==1.12.1 torchvision==0.13.1" +export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118" export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git" export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71" export PYTORCH_ENABLE_MPS_FALLBACK=1 diff --git a/webui.py b/webui.py index b570895f..54552cdd 100644 --- a/webui.py +++ b/webui.py @@ -20,6 +20,8 @@ startup_timer = timer.Timer() import torch import pytorch_lightning # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") +warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") + startup_timer.record("import torch") import gradio From a73f3bf0cfc89cde294b42f5c566017daf4b2ccd Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 30 Mar 2023 23:19:40 -0600 Subject: [PATCH 027/113] Change extras "scale to" to sliders --- scripts/postprocessing_upscale.py | 14 ++++++++++---- style.css | 4 ++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 11eab31a..bc43719b 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -4,8 +4,9 @@ import numpy as np from modules import scripts_postprocessing, shared import gradio as gr -from modules.ui_components import FormRow +from modules.ui_components import FormRow, ToolButton +switch_values_symbol = '\U000021C5' # ⇅ upscale_cache = {} @@ -25,9 +26,12 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with FormRow(): - upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") - upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") - upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") + with gr.Column(elem_id="upscaling_column_size", scale=4): + upscaling_resize_w = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="extras_upscaling_resize_w") + upscaling_resize_h = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="extras_upscaling_resize_w") + with gr.Column(elem_id="upscaling_dimensions_row", scale=1, elem_classes="dimensions-tools"): + upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn") + upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") with FormRow(): extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) @@ -36,6 +40,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") + upscaling_res_switch_btn.click(lambda w, h: (h, w), inputs=[upscaling_resize_w, upscaling_resize_h], outputs=[upscaling_resize_w, upscaling_resize_h], show_progress=False) tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) @@ -45,6 +50,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, + "upscaling_res_switch_btn": upscaling_res_switch_btn, "upscaler_1_name": extras_upscaler_1, "upscaler_2_name": extras_upscaler_2, "upscaler_2_visibility": extras_upscaler_2_visibility, diff --git a/style.css b/style.css index de16a7f2..aafc2362 100644 --- a/style.css +++ b/style.css @@ -312,6 +312,10 @@ div.dimensions-tools{ align-content: center; } +div#extras_scale_to_tab div.form{ + flex-direction: row; +} + #mode_img2img .gradio-image > div.fixed-height, #mode_img2img .gradio-image > div.fixed-height img{ height: 480px !important; max-height: 480px !important; From 69ad46b047678a7a97a152a20e702bac61e37b8b Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 30 Mar 2023 23:25:39 -0600 Subject: [PATCH 028/113] Import switch_values_symbol --- scripts/postprocessing_upscale.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index bc43719b..bf27b64d 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -5,8 +5,7 @@ from modules import scripts_postprocessing, shared import gradio as gr from modules.ui_components import FormRow, ToolButton - -switch_values_symbol = '\U000021C5' # ⇅ +from modules.ui import switch_values_symbol upscale_cache = {} From 3ebdd2afd3769046289880d44bbe1322a832073f Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 31 Mar 2023 00:56:38 -0600 Subject: [PATCH 029/113] Don't return upscaling_res_switch_btn --- scripts/postprocessing_upscale.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index bf27b64d..e60208ac 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -49,7 +49,6 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, - "upscaling_res_switch_btn": upscaling_res_switch_btn, "upscaler_1_name": extras_upscaler_1, "upscaler_2_name": extras_upscaler_2, "upscaler_2_visibility": extras_upscaler_2_visibility, From 18e4ca46944201ccd0b506201d5da441eba93080 Mon Sep 17 00:00:00 2001 From: Z_nonymous <0x29a@free.fr> Date: Fri, 31 Mar 2023 10:54:42 +0200 Subject: [PATCH 030/113] Fix #9185 --- modules/img2img.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 953ac5d2..5ce408f4 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -4,7 +4,7 @@ import sys import traceback import numpy as np -from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops +from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError from modules import devices, sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict @@ -46,7 +46,10 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): if state.interrupted: break - img = Image.open(image) + try: + img = Image.open(image) + except UnidentifiedImageError: + continue # Use the EXIF orientation of photos taken by smartphones. img = ImageOps.exif_transpose(img) p.init_images = [img] * p.batch_size From c938b172a49433291e246b04f9835f3383bad0c8 Mon Sep 17 00:00:00 2001 From: bbonvi <6573230@gmail.com> Date: Fri, 31 Mar 2023 19:29:34 +0600 Subject: [PATCH 031/113] fix missing live preview and progress during certain tasks Sometimes tasks take longer than 5 seconds to start, resulting in missing progress bar and livepreviews, so we have to keep pulling for progress a bit longer (5s -> 20s). --- javascript/progressbar.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 4ac9b8db..eb44aab9 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -138,7 +138,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre return } - if(elapsedFromStart > 5 && !res.queued && !res.active){ + if(elapsedFromStart > 20 && !res.queued && !res.active){ removeProgressBar() return } From 9dc722bcf27d6d2cd9f275df4f2aae23f58c1122 Mon Sep 17 00:00:00 2001 From: wywywywy Date: Sat, 1 Apr 2023 10:39:50 +0100 Subject: [PATCH 032/113] bug: outpaint-mk2 use sample file format not grid --- scripts/outpainting_mk_2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 0906da6a..670bb8ac 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -275,7 +275,7 @@ class Script(scripts.Script): if opts.samples_save: for img in all_processed_images: - images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p) + images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.samples_format, info=res.info, p=p) if opts.grid_save and not unwanted_grid_because_of_img_count: images.save_image(combined_grid_image, p.outpath_grids, "grid", res.seed, p.prompt, opts.grid_format, info=res.info, short_filename=not opts.grid_extended_filename, grid=True, p=p) From 80b847e72dacabbf334d37bb4e6cc65a9a223ce3 Mon Sep 17 00:00:00 2001 From: wywywywy Date: Sat, 1 Apr 2023 10:47:49 +0100 Subject: [PATCH 033/113] bug: poorman use sample file format not grid --- scripts/poor_mans_outpainting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index d8feda00..ddcbd2d3 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -138,7 +138,7 @@ class Script(scripts.Script): combined_image = images.combine_grid(grid) if opts.samples_save: - images.save_image(combined_image, p.outpath_samples, "", initial_seed, p.prompt, opts.grid_format, info=initial_info, p=p) + images.save_image(combined_image, p.outpath_samples, "", initial_seed, p.prompt, opts.samples_format, info=initial_info, p=p) processed = Processed(p, [combined_image], initial_seed, initial_info) From d132481058f8a827cd407f2121f128a2bb862f7a Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 17:41:55 -0500 Subject: [PATCH 034/113] Embed model merge metadata in .safetensors file --- modules/extras.py | 44 ++++++++++++++++++++++++++++++++++++++++++-- modules/sd_models.py | 11 ++++++++++- modules/ui.py | 4 +++- 3 files changed, 55 insertions(+), 4 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index d8ece955..77d88592 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -1,6 +1,7 @@ import os import re import shutil +import json import torch @@ -71,7 +72,7 @@ def to_half(tensor, enable): return tensor -def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights): +def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata): shared.state.begin() shared.state.job = 'model-merge' @@ -241,13 +242,52 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ shared.state.textinfo = "Saving" print(f"Saving to {output_modelname}...") + metadata = {"format": "pt", "models": {}, "merge_recipe": None} + + if save_metadata: + merge_recipe = { + "primary_model_hash": primary_model_info.sha256, + "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, + "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, + "interp_method": interp_method, + "multiplier": multiplier, + "save_as_half": save_as_half, + "custom_name": custom_name, + "config_source": config_source, + "bake_in_vae": bake_in_vae, + "discard_weights": discard_weights, + "is_inpainting": result_is_inpainting_model, + "is_instruct_pix2pix": result_is_instruct_pix2pix_model + } + metadata["merge_recipe"] = json.dumps(merge_recipe) + + def add_model_metadata(checkpoint_info): + metadata["models"][checkpoint_info.sha256] = { + "name": checkpoint_info.name, + "legacy_hash": checkpoint_info.hash, + "merge_recipe": checkpoint_info.metadata.get("merge_recipe", None) + } + + metadata["models"].update(checkpoint_info.metadata.get("models", {})) + + add_model_metadata(primary_model_info) + if secondary_model_info: + add_model_metadata(secondary_model_info) + if tertiary_model_info: + add_model_metadata(tertiary_model_info) + + metadata["models"] = json.dumps(metadata["models"]) + _, extension = os.path.splitext(output_modelname) if extension.lower() == ".safetensors": - safetensors.torch.save_file(theta_0, output_modelname, metadata={"format": "pt"}) + safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata) else: torch.save(theta_0, output_modelname) sd_models.list_models() + created_model = next((ckpt for ckpt in sd_models.checkpoints_list.values() if ckpt.name == filename), None) + if created_model: + created_model.calculate_shorthash() create_config(output_modelname, config_source, primary_model_info, secondary_model_info, tertiary_model_info) diff --git a/modules/sd_models.py b/modules/sd_models.py index 6ea874df..4f7613a1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -52,6 +52,15 @@ class CheckpointInfo: self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) + self.metadata = {} + + _, ext = os.path.splitext(self.filename) + if ext.lower() == ".safetensors": + try: + self.metadata = read_metadata_from_safetensors(filename) + except Exception as e: + errors.display(e, f"reading checkpoint metadata: {filename}") + def register(self): checkpoints_list[self.title] = self for id in self.ids: @@ -544,4 +553,4 @@ def unload_model_weights(sd_model=None, info=None): print(f"Unloaded weights {timer.summary()}.") - return sd_model \ No newline at end of file + return sd_model diff --git a/modules/ui.py b/modules/ui.py index 627fbe0b..64fb93c3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1019,8 +1019,9 @@ def create_ui(): interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") with FormRow(): with gr.Column(): @@ -1658,6 +1659,7 @@ def create_ui(): config_source, bake_in_vae, discard_weights, + save_metadata, ], outputs=[ primary_model_name, From afc349c2c0d7c7543e8cc085cde2beef8549fffc Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 18:40:33 -0500 Subject: [PATCH 035/113] Add field for model merge type Incase this is supported by other merge extensions --- modules/extras.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/extras.py b/modules/extras.py index 77d88592..9a00c9a3 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -246,6 +246,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ if save_metadata: merge_recipe = { + "type": "webui", # indicate this model was merged with webui's built-in merger "primary_model_hash": primary_model_info.sha256, "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, From 7c016dd642cc29e064715ac04ab3d83c5451b45e Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 19:06:39 -0500 Subject: [PATCH 036/113] Calculate shorthash on merge if not exist --- modules/extras.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/extras.py b/modules/extras.py index 9a00c9a3..97d14e5a 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -263,6 +263,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ metadata["merge_recipe"] = json.dumps(merge_recipe) def add_model_metadata(checkpoint_info): + checkpoint_info.calculate_shorthash() metadata["models"][checkpoint_info.sha256] = { "name": checkpoint_info.name, "legacy_hash": checkpoint_info.hash, From 9a4e650800adc444c07a48572a958a71c2144c15 Mon Sep 17 00:00:00 2001 From: Pluventi Date: Mon, 3 Apr 2023 03:32:48 +0200 Subject: [PATCH 037/113] Update postprocessing.py Solution for anyone getting an error when batching on extras, even with a clean install of "stable diffusion webui" --- modules/postprocessing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 09d8e605..63b9caf8 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -18,7 +18,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: - image = Image.open(img) + image = Image.open(img.name) image_data.append(image) image_names.append(os.path.splitext(img.orig_name)[0]) elif extras_mode == 2: From fbaf6e4fd897fa1f3e3f747f1d699c240cad76a0 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 21:41:23 -0500 Subject: [PATCH 038/113] Namespace metadata fields --- modules/extras.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index 97d14e5a..ff4e9c4e 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -242,7 +242,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ shared.state.textinfo = "Saving" print(f"Saving to {output_modelname}...") - metadata = {"format": "pt", "models": {}, "merge_recipe": None} + metadata = {"format": "pt", "sd_merge_models": {}, "sd_merge_recipe": None} if save_metadata: merge_recipe = { @@ -260,17 +260,17 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ "is_inpainting": result_is_inpainting_model, "is_instruct_pix2pix": result_is_instruct_pix2pix_model } - metadata["merge_recipe"] = json.dumps(merge_recipe) + metadata["sd_merge_recipe"] = json.dumps(merge_recipe) def add_model_metadata(checkpoint_info): checkpoint_info.calculate_shorthash() - metadata["models"][checkpoint_info.sha256] = { + metadata["sd_merge_models"][checkpoint_info.sha256] = { "name": checkpoint_info.name, "legacy_hash": checkpoint_info.hash, - "merge_recipe": checkpoint_info.metadata.get("merge_recipe", None) + "sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None) } - metadata["models"].update(checkpoint_info.metadata.get("models", {})) + metadata["sd_merge_models"].update(checkpoint_info.metadata.get("sd_merge_models", {})) add_model_metadata(primary_model_info) if secondary_model_info: @@ -278,7 +278,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ if tertiary_model_info: add_model_metadata(tertiary_model_info) - metadata["models"] = json.dumps(metadata["models"]) + metadata["sd_merge_models"] = json.dumps(metadata["sd_merge_models"]) _, extension = os.path.splitext(output_modelname) if extension.lower() == ".safetensors": From d9fdb5214922b10a16c66207376953460972c6e8 Mon Sep 17 00:00:00 2001 From: GeorgLegato Date: Mon, 3 Apr 2023 04:53:29 +0200 Subject: [PATCH 039/113] Update script.js updated how tabs are presented in DOM with Gradio 3.23. --- script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script.js b/script.js index 1b9a443f..03afe844 100644 --- a/script.js +++ b/script.js @@ -7,7 +7,7 @@ function gradioApp() { } function get_uiCurrentTab() { - return gradioApp().querySelector('#tabs button:not(.border-transparent)') + return gradioApp().querySelector('#tabs button.selected') } function get_uiCurrentTabContent() { From aef42bfec09a9ca93d1222b7b47256f37e192a32 Mon Sep 17 00:00:00 2001 From: keith <1868690+wk5ovc@users.noreply.github.com> Date: Mon, 3 Apr 2023 17:05:49 +0800 Subject: [PATCH 040/113] Fix #9046 /sdapi/v1/txt2img endpoint not working **Describe what this pull request is trying to achieve.** Fix https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/9046 **Environment this was tested in** * OS: Linux * Browser: chrome * Graphics card: RTX 3090 --- webui.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/webui.py b/webui.py index b570895f..8927aa33 100644 --- a/webui.py +++ b/webui.py @@ -5,6 +5,7 @@ import importlib import signal import re import warnings +import asyncio from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware @@ -66,6 +67,46 @@ if cmd_opts.server_name: else: server_name = "0.0.0.0" if cmd_opts.listen else None +if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): + # "Any thread" and "selector" should be orthogonal, but there's not a clean + # interface for composing policies so pick the right base. + _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore +else: + _BasePolicy = asyncio.DefaultEventLoopPolicy + + +class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore + """Event loop policy that allows loop creation on any thread. + + The default `asyncio` event loop policy only automatically creates + event loops in the main threads. Other threads must create event + loops explicitly or `asyncio.get_event_loop` (and therefore + `.IOLoop.current`) will fail. Installing this policy allows event + loops to be created automatically on any thread, matching the + behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). + + Usage:: + + asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + + .. versionadded:: 5.0 + + """ + + def get_event_loop(self) -> asyncio.AbstractEventLoop: + try: + return super().get_event_loop() + except (RuntimeError, AssertionError): + # This was an AssertionError in python 3.4.2 (which ships with debian jessie) + # and changed to a RuntimeError in 3.4.3. + # "There is no current event loop in thread %r" + loop = self.new_event_loop() + self.set_event_loop(loop) + return loop + + +asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + def check_versions(): if shared.cmd_opts.skip_version_check: From d537a1f1b62de7795a77feb71b4c990389c30087 Mon Sep 17 00:00:00 2001 From: Micky Brunetti Date: Tue, 4 Apr 2023 00:14:20 +0900 Subject: [PATCH 041/113] Fix skip-install bug (see #8935) --- launch.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/launch.py b/launch.py index 68e08114..b8f650fc 100644 --- a/launch.py +++ b/launch.py @@ -19,7 +19,6 @@ python = sys.executable git = os.environ.get('GIT', "git") index_url = os.environ.get('INDEX_URL', "") stored_commit_hash = None -skip_install = False dir_repos = "repositories" if 'GRADIO_ANALYTICS_ENABLED' not in os.environ: @@ -122,7 +121,7 @@ def run_python(code, desc=None, errdesc=None): def run_pip(args, desc=None): - if skip_install: + if "--skip-install" in sys.argv: return index_url_line = f' --index-url {index_url}' if index_url != '' else '' @@ -223,8 +222,6 @@ def run_extensions_installers(settings_file): def prepare_environment(): - global skip_install - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") From f7215906af573f6e60b006cd430e82691ba4f8d6 Mon Sep 17 00:00:00 2001 From: gmasil <54176035+gmasil@users.noreply.github.com> Date: Mon, 3 Apr 2023 18:19:57 +0200 Subject: [PATCH 042/113] allow styles.csv to be symlinked or mounted in docker without moving the file around --- modules/styles.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/modules/styles.py b/modules/styles.py index 990d5623..9ed85991 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -72,16 +72,14 @@ class StyleDatabase: return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) def save_styles(self, path: str) -> None: - # Write to temporary file first, so we don't nuke the file if something goes wrong - fd, temp_path = tempfile.mkstemp(".csv") + # Always keep a backup file around + if os.path.exists(path): + shutil.copy(path, path + ".bak") + + fd = os.open(path, os.O_RDWR|os.O_CREAT) with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) writer.writeheader() writer.writerows(style._asdict() for k, style in self.styles.items()) - - # Always keep a backup file around - if os.path.exists(path): - shutil.move(path, path + ".bak") - shutil.move(temp_path, path) From 54fd00ff8f6fc1396ce0396772962b45609e7a9c Mon Sep 17 00:00:00 2001 From: Liam Date: Mon, 3 Apr 2023 13:28:20 -0400 Subject: [PATCH 043/113] fixed logic for updating the displayed generation params when the image modal is closed --- javascript/generationParams.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/javascript/generationParams.js b/javascript/generationParams.js index 95f05093..1266a266 100644 --- a/javascript/generationParams.js +++ b/javascript/generationParams.js @@ -16,9 +16,9 @@ onUiUpdate(function(){ let modalObserver = new MutationObserver(function(mutations) { mutations.forEach(function(mutationRecord) { - let selectedTab = gradioApp().querySelector('#tabs div button.bg-white')?.innerText - if (mutationRecord.target.style.display === 'none' && selectedTab === 'txt2img' || selectedTab === 'img2img') - gradioApp().getElementById(selectedTab+"_generation_info_button").click() + let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText + if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) + gradioApp().getElementById(selectedTab+"_generation_info_button")?.click() }); }); From 4fa59b045add1d23350e884e201dc77bc34864e6 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Mon, 3 Apr 2023 15:23:35 -0400 Subject: [PATCH 044/113] update xformers --- environment-wsl2.yaml | 1 - launch.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/environment-wsl2.yaml b/environment-wsl2.yaml index 06134565..0c4ae680 100644 --- a/environment-wsl2.yaml +++ b/environment-wsl2.yaml @@ -9,4 +9,3 @@ dependencies: - pytorch=2.0 - torchvision=0.15 - numpy=1.23 - \ No newline at end of file diff --git a/launch.py b/launch.py index 37c8b516..c5ae9092 100644 --- a/launch.py +++ b/launch.py @@ -228,7 +228,7 @@ def prepare_environment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.18') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") From 5ebe3b25044efac959c4d8f208dd17d6f1e5ce8a Mon Sep 17 00:00:00 2001 From: Nathanael Santoso <73165142+nart4hire@users.noreply.github.com> Date: Tue, 4 Apr 2023 06:50:29 +0000 Subject: [PATCH 045/113] Added guard clause to prevent multiple tunnel creations --- modules/ngrok.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/ngrok.py b/modules/ngrok.py index 3df2c06b..90268bf1 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -1,6 +1,14 @@ from pyngrok import ngrok, conf, exception def connect(token, port, region): + # Guard for existing tunnels + existing = ngrok.get_tunnels() + if existing: + public_url = existing[0].public_url + print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' + 'You can use this link after the launch is complete.') + return + account = None if token is None: token = 'None' From 2edf73b38fbea4ce67c38edafea8a8983b9435b5 Mon Sep 17 00:00:00 2001 From: Nathanael Santoso <73165142+nart4hire@users.noreply.github.com> Date: Tue, 4 Apr 2023 06:57:39 +0000 Subject: [PATCH 046/113] Improved message clarity --- modules/ngrok.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ngrok.py b/modules/ngrok.py index 90268bf1..5302b6a8 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -5,7 +5,7 @@ def connect(token, port, region): existing = ngrok.get_tunnels() if existing: public_url = existing[0].public_url - print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' + print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' 'You can use this link after the launch is complete.') return From 3158d17ccf6adfe974691f46526773df2f4028f5 Mon Sep 17 00:00:00 2001 From: Nathanael Santoso <73165142+nart4hire@users.noreply.github.com> Date: Tue, 4 Apr 2023 07:41:55 +0000 Subject: [PATCH 047/113] fixed an issue with using ngrok for other connections and also ngrok not using auth_token --- modules/ngrok.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/modules/ngrok.py b/modules/ngrok.py index 5302b6a8..1ad7989b 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -1,14 +1,6 @@ from pyngrok import ngrok, conf, exception def connect(token, port, region): - # Guard for existing tunnels - existing = ngrok.get_tunnels() - if existing: - public_url = existing[0].public_url - print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' - 'You can use this link after the launch is complete.') - return - account = None if token is None: token = 'None' @@ -21,6 +13,18 @@ def connect(token, port, region): config = conf.PyngrokConfig( auth_token=token, region=region ) + + # Guard for existing tunnels + existing = ngrok.get_tunnels(pyngrok_config=config) + if existing: + for established in existing: + # Extra configuration in the case that the user is also using ngrok for other tunnels + if established.config['addr'][-4:] == str(port): + public_url = existing[0].public_url + print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' + 'You can use this link after the launch is complete.') + return + try: if account is None: public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url From 539a69860bafe1c922029722a50462f3ef0db5e4 Mon Sep 17 00:00:00 2001 From: hitomi Date: Sat, 25 Mar 2023 13:58:53 -0700 Subject: [PATCH 048/113] fix `--realesrgan-models-path` not working --- modules/realesrgan_model.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index aad4a629..d6079433 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -9,7 +9,7 @@ from realesrgan import RealESRGANer from modules.upscaler import Upscaler, UpscalerData from modules.shared import cmd_opts, opts - +from modules import modelloader class UpscalerRealESRGAN(Upscaler): def __init__(self, path): @@ -23,7 +23,15 @@ class UpscalerRealESRGAN(Upscaler): self.enable = True self.scalers = [] scalers = self.load_models(path) + + local_model_paths = self.find_models(ext_filter=[".pth"]) for scaler in scalers: + if scaler.local_data_path.startswith("http"): + filename = modelloader.friendly_name(scaler.local_data_path) + local = next(iter([local_model for local_model in local_model_paths if local_model.endswith(filename + '.pth')]), None) + if local: + scaler.local_data_path = local + if scaler.name in opts.realesrgan_enabled_models: self.scalers.append(scaler) @@ -64,7 +72,9 @@ class UpscalerRealESRGAN(Upscaler): print(f"Unable to find model info: {path}") return None - info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True) + if info.local_data_path.startswith("http"): + info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True) + return info except Exception as e: print(f"Error making Real-ESRGAN models list: {e}", file=sys.stderr) From 2ba42bfbd2682c647df07e3baf0d013fbf516329 Mon Sep 17 00:00:00 2001 From: hitomi Date: Sat, 25 Mar 2023 14:02:29 -0700 Subject: [PATCH 049/113] fix `--ldsr-models-path` not working --- extensions-builtin/LDSR/scripts/ldsr_model.py | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index b8cff29b..da19cff1 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -25,22 +25,28 @@ class UpscalerLDSR(Upscaler): yaml_path = os.path.join(self.model_path, "project.yaml") old_model_path = os.path.join(self.model_path, "model.pth") new_model_path = os.path.join(self.model_path, "model.ckpt") - safetensors_model_path = os.path.join(self.model_path, "model.safetensors") + + local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"]) + local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None) + local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None) + local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None) + if os.path.exists(yaml_path): statinfo = os.stat(yaml_path) if statinfo.st_size >= 10485760: print("Removing invalid LDSR YAML file.") os.remove(yaml_path) + if os.path.exists(old_model_path): print("Renaming model from model.pth to model.ckpt") os.rename(old_model_path, new_model_path) - if os.path.exists(safetensors_model_path): - model = safetensors_model_path + + if local_safetensors_path is not None and os.path.exists(local_safetensors_path): + model = local_safetensors_path else: - model = load_file_from_url(url=self.model_url, model_dir=self.model_path, - file_name="model.ckpt", progress=True) - yaml = load_file_from_url(url=self.yaml_url, model_dir=self.model_path, - file_name="project.yaml", progress=True) + model = local_ckpt_path if local_ckpt_path is not None else load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="model.ckpt", progress=True) + + yaml = local_yaml_path if local_yaml_path is not None else load_file_from_url(url=self.yaml_url, model_dir=self.model_path, file_name="project.yaml", progress=True) try: return LDSR(model, yaml) From 80752f43b22acd85bf6ab54b2e4788f144a0c813 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 4 Apr 2023 17:27:27 -0400 Subject: [PATCH 050/113] revert xformers --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index c5ae9092..37c8b516 100644 --- a/launch.py +++ b/launch.py @@ -228,7 +228,7 @@ def prepare_environment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.18') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") From c01dc1cb30f7cd87e1df6458580da99c702ee513 Mon Sep 17 00:00:00 2001 From: pangbo13 <373108669@qq.com> Date: Wed, 5 Apr 2023 19:22:51 +0800 Subject: [PATCH 051/113] add dropdown for X/Y/Z plot --- scripts/xyz_grid.py | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795..774fa2c7 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -374,16 +374,19 @@ class Script(scripts.Script): with gr.Row(): x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type")) x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values")) + x_values_dropdown = gr.Dropdown(label="X values",visible=False,multiselect=True,interactive=True) fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False) with gr.Row(): y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type")) y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values")) + y_values_dropdown = gr.Dropdown(label="Y values",visible=False,multiselect=True,interactive=True) fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False) with gr.Row(): z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type")) z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values")) + z_values_dropdown = gr.Dropdown(label="Z values",visible=False,multiselect=True,interactive=True) fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False) with gr.Row(variant="compact", elem_id="axis_options"): @@ -413,18 +416,20 @@ class Script(scripts.Script): def fill(x_type): axis = self.current_axis_options[x_type] - return ", ".join(axis.choices()) if axis.choices else gr.update() + return axis.choices() if axis.choices else gr.update() - fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values]) - fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values]) - fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values]) + fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values_dropdown]) + fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values_dropdown]) + fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values_dropdown]) def select_axis(x_type): - return gr.Button.update(visible=self.current_axis_options[x_type].choices is not None) + choices = self.current_axis_options[x_type].choices + has_choices = choices is not None + return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices() if has_choices else None,visible=has_choices,value=[]) - x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button]) - y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button]) - z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button]) + x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button,x_values,x_values_dropdown]) + y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button,y_values,y_values_dropdown]) + z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button,z_values,z_values_dropdown]) self.infotext_fields = ( (x_type, "X Type"), @@ -435,20 +440,23 @@ class Script(scripts.Script): (z_values, "Z Values"), ) - return [x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] + return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] - def run(self, p, x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size): + def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size): if not no_fixed_seeds: modules.processing.fix_seed(p) if not opts.return_grid: p.batch_size = 1 - def process_axis(opt, vals): + def process_axis(opt, vals, vals_dropdown): if opt.label == 'Nothing': return [0] - valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] + if opt.choices is not None: + valslist = vals_dropdown + else: + valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] if opt.type == int: valslist_ext = [] @@ -506,13 +514,13 @@ class Script(scripts.Script): return valslist x_opt = self.current_axis_options[x_type] - xs = process_axis(x_opt, x_values) + xs = process_axis(x_opt, x_values, x_values_dropdown) y_opt = self.current_axis_options[y_type] - ys = process_axis(y_opt, y_values) + ys = process_axis(y_opt, y_values, y_values_dropdown) z_opt = self.current_axis_options[z_type] - zs = process_axis(z_opt, z_values) + zs = process_axis(z_opt, z_values, z_values_dropdown) # this could be moved to common code, but unlikely to be ever triggered anywhere else Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes From 3ac5f9c471e4cfb5b664f9f0a7f7e7b171b1cee1 Mon Sep 17 00:00:00 2001 From: pangbo13 <373108669@qq.com> Date: Wed, 5 Apr 2023 21:43:27 +0800 Subject: [PATCH 052/113] fix axis swap and infotxt --- scripts/xyz_grid.py | 43 ++++++++++++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 774fa2c7..52ae1c6e 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -404,14 +404,14 @@ class Script(scripts.Script): swap_yz_axes_button = gr.Button(value="Swap Y/Z axes", elem_id="yz_grid_swap_axes_button") swap_xz_axes_button = gr.Button(value="Swap X/Z axes", elem_id="xz_grid_swap_axes_button") - def swap_axes(axis1_type, axis1_values, axis2_type, axis2_values): - return self.current_axis_options[axis2_type].label, axis2_values, self.current_axis_options[axis1_type].label, axis1_values + def swap_axes(axis1_type, axis1_values, axis1_values_dropdown, axis2_type, axis2_values, axis2_values_dropdown): + return self.current_axis_options[axis2_type].label, axis2_values, axis2_values_dropdown, self.current_axis_options[axis1_type].label, axis1_values, axis1_values_dropdown - xy_swap_args = [x_type, x_values, y_type, y_values] + xy_swap_args = [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown] swap_xy_axes_button.click(swap_axes, inputs=xy_swap_args, outputs=xy_swap_args) - yz_swap_args = [y_type, y_values, z_type, z_values] + yz_swap_args = [y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown] swap_yz_axes_button.click(swap_axes, inputs=yz_swap_args, outputs=yz_swap_args) - xz_swap_args = [x_type, x_values, z_type, z_values] + xz_swap_args = [x_type, x_values, x_values_dropdown, z_type, z_values, z_values_dropdown] swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args) def fill(x_type): @@ -422,22 +422,37 @@ class Script(scripts.Script): fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values_dropdown]) fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values_dropdown]) - def select_axis(x_type): - choices = self.current_axis_options[x_type].choices + def select_axis(axis_type,axis_values_dropdown): + choices = self.current_axis_options[axis_type].choices has_choices = choices is not None - return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices() if has_choices else None,visible=has_choices,value=[]) + current_values = axis_values_dropdown + if has_choices: + choices = choices() + if isinstance(current_values,str): + current_values = current_values.split(",") + current_values = list(filter(lambda x: x in choices, current_values)) + return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices if has_choices else None,visible=has_choices,value=current_values) - x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button,x_values,x_values_dropdown]) - y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button,y_values,y_values_dropdown]) - z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button,z_values,z_values_dropdown]) + x_type.change(fn=select_axis, inputs=[x_type,x_values_dropdown], outputs=[fill_x_button,x_values,x_values_dropdown]) + y_type.change(fn=select_axis, inputs=[y_type,y_values_dropdown], outputs=[fill_y_button,y_values,y_values_dropdown]) + z_type.change(fn=select_axis, inputs=[z_type,z_values_dropdown], outputs=[fill_z_button,z_values,z_values_dropdown]) + + def get_dropdown_update_from_params(axis,params): + val_key = axis + " Values" + vals = params.get(val_key,"") + valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] + return gr.update(value = valslist) self.infotext_fields = ( (x_type, "X Type"), (x_values, "X Values"), + (x_values_dropdown, lambda params:get_dropdown_update_from_params("X",params)), (y_type, "Y Type"), (y_values, "Y Values"), + (y_values_dropdown, lambda params:get_dropdown_update_from_params("Y",params)), (z_type, "Z Type"), (z_values, "Z Values"), + (z_values_dropdown, lambda params:get_dropdown_update_from_params("Z",params)), ) return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] @@ -514,12 +529,18 @@ class Script(scripts.Script): return valslist x_opt = self.current_axis_options[x_type] + if x_opt.choices is not None: + x_values = ",".join(x_values_dropdown) xs = process_axis(x_opt, x_values, x_values_dropdown) y_opt = self.current_axis_options[y_type] + if y_opt.choices is not None: + y_values = ",".join(y_values_dropdown) ys = process_axis(y_opt, y_values, y_values_dropdown) z_opt = self.current_axis_options[z_type] + if z_opt.choices is not None: + z_values = ",".join(z_values_dropdown) zs = process_axis(z_opt, z_values, z_values_dropdown) # this could be moved to common code, but unlikely to be ever triggered anywhere else From 52a8f286ef99bb5004bea2b099a7bcbb073b638f Mon Sep 17 00:00:00 2001 From: Andre Ubuntu Date: Wed, 5 Apr 2023 20:28:00 -0300 Subject: [PATCH 053/113] fix preprocess orientation --- modules/textual_inversion/preprocess.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 2239cb84..9cb98694 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -161,7 +161,25 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre params.subindex = 0 filename = os.path.join(src, imagefile) try: - img = Image.open(filename).convert("RGB") + img = Image.open(filename) + # make sure to rotate the image according to EXIF data of the original image + # ImageOps.exif_transpose(img) # doesn't work for some reason + EXIF = img._getexif() + # rotate the image according to the EXIF data + try: + if EXIF[274] == 3: + # print("Rotating image by 180 degrees") + img = img.rotate(180, expand=True) + elif EXIF[274] == 6: + # print("Rotating image by 270 degrees") + img = img.rotate(270, expand=True) + elif EXIF[274] == 8: + # print("Rotating image by 90 degrees") + img = img.rotate(90, expand=True) + except: + pass + # print("No EXIF data found for image: " + filename) + img = img.convert("RGB") except Exception: continue From 3a5b47e26e8cb1cd640fedfe7cb5263a588d7088 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Thu, 6 Apr 2023 01:36:27 +0200 Subject: [PATCH 054/113] Forcing PyTorch version for AMD GPUs automatic install The old code tries to install the newest versions of pytorch, wich is currently 2.0. Forcing it to 1.13.1 --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index 8cdad22d..7d9a8538 100755 --- a/webui.sh +++ b/webui.sh @@ -118,7 +118,7 @@ case "$gpu_info" in esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then - export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2" + export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --extra-index-url https://download.pytorch.org/whl/rocm5.2" fi for preq in "${GIT}" "${python_cmd}" From 48c06af8dc718abf0bf9355ea5548c6a66e0b1e6 Mon Sep 17 00:00:00 2001 From: Andre Ubuntu Date: Wed, 5 Apr 2023 20:51:29 -0300 Subject: [PATCH 055/113] Pythonic way to achieve it --- modules/textual_inversion/preprocess.py | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 9cb98694..de1ddb59 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -162,23 +162,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre filename = os.path.join(src, imagefile) try: img = Image.open(filename) - # make sure to rotate the image according to EXIF data of the original image - # ImageOps.exif_transpose(img) # doesn't work for some reason - EXIF = img._getexif() - # rotate the image according to the EXIF data - try: - if EXIF[274] == 3: - # print("Rotating image by 180 degrees") - img = img.rotate(180, expand=True) - elif EXIF[274] == 6: - # print("Rotating image by 270 degrees") - img = img.rotate(270, expand=True) - elif EXIF[274] == 8: - # print("Rotating image by 90 degrees") - img = img.rotate(90, expand=True) - except: - pass - # print("No EXIF data found for image: " + filename) + img = ImageOps.exif_transpose(img) img = img.convert("RGB") except Exception: continue From b3593d0997bfdcca7f8aa01663e81720db50e494 Mon Sep 17 00:00:00 2001 From: For Sure Date: Thu, 6 Apr 2023 19:42:26 +0300 Subject: [PATCH 056/113] Add support for saving init images in img2img --- modules/processing.py | 8 ++++++++ modules/shared.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..5556afc5 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -3,6 +3,7 @@ import math import os import sys import warnings +import hashlib import torch import numpy as np @@ -476,6 +477,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "Init image hash": getattr(p, 'init_img_hash', None) } generation_params.update(p.extra_generation_params) @@ -1007,6 +1009,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.color_corrections = [] imgs = [] for img in self.init_images: + + # Save init image + if opts.save_init_img: + self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest() + images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False) + image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..69c2b21e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -39,6 +39,7 @@ restricted_opts = { "outdir_grids", "outdir_txt2img_grids", "outdir_save", + "outdir_init_images" } ui_reorder_categories = [ @@ -253,6 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), + "save_init_img": OptionInfo(True, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), @@ -268,6 +270,7 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), { "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), + "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), })) options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { From 63a6f9b4d98a192bb359910cb284cf00582baabf Mon Sep 17 00:00:00 2001 From: forsurefr <67145502+forsurefr@users.noreply.github.com> Date: Fri, 7 Apr 2023 12:13:51 +0300 Subject: [PATCH 057/113] Do not save init image by default --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 69c2b21e..c5a1b5ad 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -254,7 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), - "save_init_img": OptionInfo(True, "Save init images when using img2img"), + "save_init_img": OptionInfo(False, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), From d609f6030ec464b371a899ced366c62bbd9a4a91 Mon Sep 17 00:00:00 2001 From: gk Date: Fri, 7 Apr 2023 21:04:46 +0900 Subject: [PATCH 058/113] Add [batch_number] and [generation_number] filename patterns --- modules/images.py | 6 ++++++ modules/processing.py | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index b3535070..5c0cf1d8 100644 --- a/modules/images.py +++ b/modules/images.py @@ -352,6 +352,8 @@ class FilenameGenerator: 'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_words': lambda self: self.prompt_words(), + 'batch_number': lambda self: self.p.batch_index + 1, + 'generation_number': lambda self: self.p.iteration * self.p.batch_size + self.p.batch_index + 1, } default_time_format = '%Y%m%d%H%M%S' @@ -403,6 +405,10 @@ class FilenameGenerator: for m in re_pattern.finditer(x): text, pattern = m.groups() + + if pattern is not None and (pattern.lower() == 'batch_number' and self.p.batch_size == 1 or pattern.lower() == 'generation_number' and self.p.n_iter == 1 and self.p.batch_size == 1): + continue + res += text if pattern is None: diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..0e6a60ba 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -670,6 +670,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) for i, x_sample in enumerate(x_samples_ddim): + p.batch_index = i + x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) @@ -718,7 +720,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.return_mask: output_images.append(image_mask) - + if opts.return_mask_composite: output_images.append(image_mask_composite) From 27b9ec60e4ede748ec23615fecddb70e48daa623 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Sat, 8 Apr 2023 15:58:00 -0400 Subject: [PATCH 059/113] sort embeddings by name (case insensitive) --- modules/textual_inversion/textual_inversion.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index d2e62e58..7c50839f 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -2,7 +2,7 @@ import os import sys import traceback import inspect -from collections import namedtuple +from collections import namedtuple, OrderedDict import torch import tqdm @@ -108,7 +108,7 @@ class DirWithTextualInversionEmbeddings: class EmbeddingDatabase: def __init__(self): self.ids_lookup = {} - self.word_embeddings = {} + self.word_embeddings = OrderedDict() self.skipped_embeddings = {} self.expected_shape = -1 self.embedding_dirs = {} @@ -233,6 +233,9 @@ class EmbeddingDatabase: self.load_from_dir(embdir) embdir.update() + # re-sort word_embeddings because load_from_dir may not load in alphabetic order. + self.word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys())) if self.previously_displayed_embeddings != displayed_embeddings: self.previously_displayed_embeddings = displayed_embeddings From 1aba8d82cbb816a755d012c5c729d8bafeb1b8ed Mon Sep 17 00:00:00 2001 From: yike5460 Date: Sun, 9 Apr 2023 22:22:43 +0800 Subject: [PATCH 060/113] feat: add branch support for extension installation --- modules/ui_extensions.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index efd6cda2..d9487f83 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -129,7 +129,7 @@ def normalize_git_url(url): return url -def install_extension_from_url(dirname, url): +def install_extension_from_url(dirname, branch_name, url): check_access() assert url, 'No URL specified' @@ -150,7 +150,7 @@ def install_extension_from_url(dirname, url): try: shutil.rmtree(tmpdir, True) - with git.Repo.clone_from(url, tmpdir) as repo: + with git.Repo.clone_from(url, tmpdir, branch=branch_name if branch_name else '') as repo: repo.remote().fetch() for submodule in repo.submodules: submodule.update() @@ -376,13 +376,14 @@ def create_ui(): with gr.TabItem("Install from URL"): install_url = gr.Text(label="URL for extension's git repository") + install_branch = gr.Text(label="Branch name for extension's git repository", placeholder="Leave empty for default branch") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") install_result = gr.HTML(elem_id="extension_install_result") install_button.click( fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]), - inputs=[install_dirname, install_url], + inputs=[install_dirname, install_branch, install_url], outputs=[extensions_table, install_result], ) From c19618f37059b425b1e53429ad8def2caa78cdec Mon Sep 17 00:00:00 2001 From: Ilya Khadykin Date: Sun, 9 Apr 2023 21:33:09 +0200 Subject: [PATCH 061/113] fix(extras): fix batch image processing on 'Extras\Batch Process' tab This change fixes an issue where an incorrect type was passed to the PIL.Image.open() function that caused the whole process to fail. Scope of this change is limited to only batch image processing, and it shouldn't affect other functionality. --- modules/postprocessing.py | 6 ++++-- modules/ui_postprocessing.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 09d8e605..c27ad8db 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -1,4 +1,6 @@ import os +import tempfile +from typing import List from PIL import Image @@ -6,7 +8,7 @@ from modules import shared, images, devices, scripts, scripts_postprocessing, ui from modules.shared import opts -def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): +def run_postprocessing(extras_mode, image, image_folder: List[tempfile.NamedTemporaryFile], input_dir, output_dir, show_extras_results, *args, save_output: bool = True): devices.torch_gc() shared.state.begin() @@ -18,7 +20,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: - image = Image.open(img) + image = Image.open(os.path.abspath(img.name)) image_data.append(image) image_names.append(os.path.splitext(img.orig_name)[0]) elif extras_mode == 2: diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index b418d955..d278e1b6 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -13,7 +13,7 @@ def create_ui(): extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab") as tab_batch: - image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch") + image_batch = gr.Files(label="Batch Process", interactive=True, elem_id="extras_image_batch") with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab") as tab_batch_dir: extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") From c84118d70d7c3dd2f741f9e89b9a8a4643f27f98 Mon Sep 17 00:00:00 2001 From: bluelovers Date: Tue, 4 Apr 2023 23:42:39 +0800 Subject: [PATCH 062/113] feat(xyz): try sort Checkpoint name values --- scripts/xyz_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795..8aaee244 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -211,7 +211,7 @@ axis_options = [ AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), - AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: list(sd_models.checkpoints_list)), + AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), AxisOption("Sigma Churn", float, apply_field("s_churn")), AxisOption("Sigma min", float, apply_field("s_tmin")), AxisOption("Sigma max", float, apply_field("s_tmax")), From 7c62bb2788d9cec10bab9d0154bd24f3658f7a83 Mon Sep 17 00:00:00 2001 From: yike5460 Date: Mon, 10 Apr 2023 09:38:26 +0800 Subject: [PATCH 063/113] fix: support for default branch --- modules/ui_extensions.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index d9487f83..b402bc8b 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -150,10 +150,17 @@ def install_extension_from_url(dirname, branch_name, url): try: shutil.rmtree(tmpdir, True) - with git.Repo.clone_from(url, tmpdir, branch=branch_name if branch_name else '') as repo: - repo.remote().fetch() - for submodule in repo.submodules: - submodule.update() + if branch_name == '': + # if no branch is specified, use the default branch + with git.Repo.clone_from(url, tmpdir) as repo: + repo.remote().fetch() + for submodule in repo.submodules: + submodule.update() + else: + with git.Repo.clone_from(url, tmpdir, branch=branch_name) as repo: + repo.remote().fetch() + for submodule in repo.submodules: + submodule.update() try: os.rename(tmpdir, target_dir) except OSError as err: @@ -376,7 +383,7 @@ def create_ui(): with gr.TabItem("Install from URL"): install_url = gr.Text(label="URL for extension's git repository") - install_branch = gr.Text(label="Branch name for extension's git repository", placeholder="Leave empty for default branch") + install_branch = gr.Text(label="Specific branch name", placeholder="Leave empty for default main branch") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") install_result = gr.HTML(elem_id="extension_install_result") From 9edd4b6e516ec327e15cc00a3933c681fc4b2f75 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Tue, 11 Apr 2023 11:22:28 +0200 Subject: [PATCH 064/113] Using --index-url instead of --extra-index-url following new PyTorch install command --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index 7d9a8538..3b92e184 100755 --- a/webui.sh +++ b/webui.sh @@ -118,7 +118,7 @@ case "$gpu_info" in esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then - export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --extra-index-url https://download.pytorch.org/whl/rocm5.2" + export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --index-url https://download.pytorch.org/whl/rocm5.2" fi for preq in "${GIT}" "${python_cmd}" From 8af4b3bbe46dcb7f701d3650b6e4d31d6dd268a7 Mon Sep 17 00:00:00 2001 From: gk Date: Thu, 13 Apr 2023 08:46:59 +0900 Subject: [PATCH 065/113] Try using TCMalloc on Linux by default --- README.md | 1 + webui-user.sh | 3 +++ webui.sh | 21 ++++++++++++++++++--- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b67e2296..20f74531 100644 --- a/README.md +++ b/README.md @@ -120,6 +120,7 @@ sudo pacman -S wget git python3 bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) ``` 3. Run `webui.sh`. +4. Check `webui-user.sh` for options. ### Installation on Apple Silicon Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). diff --git a/webui-user.sh b/webui-user.sh index bfa53cb7..49a426ff 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -43,4 +43,7 @@ # Uncomment to enable accelerated launch #export ACCELERATE="True" +# Uncomment to disable TCMalloc +#export NO_TCMALLOC="True" + ########################################### diff --git a/webui.sh b/webui.sh index 8cdad22d..1122b965 100755 --- a/webui.sh +++ b/webui.sh @@ -113,13 +113,13 @@ case "$gpu_info" in printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half" printf "\n%s\n" "${delimiter}" ;; - *) + *) ;; esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2" -fi +fi for preq in "${GIT}" "${python_cmd}" do @@ -172,15 +172,30 @@ else exit 1 fi +# Try using TCMalloc on Linux +prepare_tcmalloc() { + if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then + TCMALLOC="$(ldconfig -p | grep -Po "libtcmalloc.so.\d" | head -n 1)" + if [[ ! -z "${TCMALLOC}" ]]; then + echo "Using TCMalloc: ${TCMALLOC}" + export LD_PRELOAD="${TCMALLOC}" + else + printf "\e[1m\e[31mCannot locate TCMalloc (improves CPU memory usage)\e[0m\n" + fi + fi +} + if [[ ! -z "${ACCELERATE}" ]] && [ ${ACCELERATE}="True" ] && [ -x "$(command -v accelerate)" ] then printf "\n%s\n" "${delimiter}" printf "Accelerating launch.py..." printf "\n%s\n" "${delimiter}" + prepare_tcmalloc exec accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@" else printf "\n%s\n" "${delimiter}" printf "Launching launch.py..." - printf "\n%s\n" "${delimiter}" + printf "\n%s\n" "${delimiter}" + prepare_tcmalloc exec "${python_cmd}" "${LAUNCH_SCRIPT}" "$@" fi From 7fb72edaffd3d4f2336d2478a424fc455f2376a6 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Thu, 13 Apr 2023 06:47:48 -0400 Subject: [PATCH 066/113] change index url --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 37c8b516..4256ef25 100644 --- a/launch.py +++ b/launch.py @@ -225,7 +225,7 @@ def run_extensions_installers(settings_file): def prepare_environment(): global skip_install - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') From fcc194afad8acd68c3fe3fd43e0bd3bac0371199 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 13 Apr 2023 22:42:20 +0300 Subject: [PATCH 067/113] prompt-bracket-checker: Simplify + improve error reporting --- .../javascript/prompt-bracket-checker.js | 119 +++++------------- 1 file changed, 29 insertions(+), 90 deletions(-) diff --git a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js index f0918e26..5c7a836a 100644 --- a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js +++ b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js @@ -1,103 +1,42 @@ // Stable Diffusion WebUI - Bracket checker -// Version 1.0 -// By Hingashi no Florin/Bwin4L +// By Hingashi no Florin/Bwin4L & @akx // Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs. // If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong. -function checkBrackets(evt, textArea, counterElt) { - errorStringParen = '(...) - Different number of opening and closing parentheses detected.\n'; - errorStringSquare = '[...] - Different number of opening and closing square brackets detected.\n'; - errorStringCurly = '{...} - Different number of opening and closing curly brackets detected.\n'; +function checkBrackets(textArea, counterElt) { + var counts = {}; + (textArea.value.match(/[(){}\[\]]/g) || []).forEach(bracket => { + counts[bracket] = (counts[bracket] || 0) + 1; + }); + var errors = []; - openBracketRegExp = /\(/g; - closeBracketRegExp = /\)/g; - - openSquareBracketRegExp = /\[/g; - closeSquareBracketRegExp = /\]/g; - - openCurlyBracketRegExp = /\{/g; - closeCurlyBracketRegExp = /\}/g; - - totalOpenBracketMatches = 0; - totalCloseBracketMatches = 0; - totalOpenSquareBracketMatches = 0; - totalCloseSquareBracketMatches = 0; - totalOpenCurlyBracketMatches = 0; - totalCloseCurlyBracketMatches = 0; - - openBracketMatches = textArea.value.match(openBracketRegExp); - if(openBracketMatches) { - totalOpenBracketMatches = openBracketMatches.length; - } - - closeBracketMatches = textArea.value.match(closeBracketRegExp); - if(closeBracketMatches) { - totalCloseBracketMatches = closeBracketMatches.length; - } - - openSquareBracketMatches = textArea.value.match(openSquareBracketRegExp); - if(openSquareBracketMatches) { - totalOpenSquareBracketMatches = openSquareBracketMatches.length; - } - - closeSquareBracketMatches = textArea.value.match(closeSquareBracketRegExp); - if(closeSquareBracketMatches) { - totalCloseSquareBracketMatches = closeSquareBracketMatches.length; - } - - openCurlyBracketMatches = textArea.value.match(openCurlyBracketRegExp); - if(openCurlyBracketMatches) { - totalOpenCurlyBracketMatches = openCurlyBracketMatches.length; - } - - closeCurlyBracketMatches = textArea.value.match(closeCurlyBracketRegExp); - if(closeCurlyBracketMatches) { - totalCloseCurlyBracketMatches = closeCurlyBracketMatches.length; - } - - if(totalOpenBracketMatches != totalCloseBracketMatches) { - if(!counterElt.title.includes(errorStringParen)) { - counterElt.title += errorStringParen; + function checkPair(open, close, kind) { + if (counts[open] !== counts[close]) { + errors.push( + `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.` + ); } - } else { - counterElt.title = counterElt.title.replace(errorStringParen, ''); } - if(totalOpenSquareBracketMatches != totalCloseSquareBracketMatches) { - if(!counterElt.title.includes(errorStringSquare)) { - counterElt.title += errorStringSquare; - } - } else { - counterElt.title = counterElt.title.replace(errorStringSquare, ''); - } + checkPair('(', ')', 'round brackets'); + checkPair('[', ']', 'square brackets'); + checkPair('{', '}', 'curly brackets'); + counterElt.title = errors.join('\n'); + counterElt.classList.toggle('error', errors.length !== 0); +} - if(totalOpenCurlyBracketMatches != totalCloseCurlyBracketMatches) { - if(!counterElt.title.includes(errorStringCurly)) { - counterElt.title += errorStringCurly; - } - } else { - counterElt.title = counterElt.title.replace(errorStringCurly, ''); - } +function setupBracketChecking(id_prompt, id_counter) { + var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); + var counter = gradioApp().getElementById(id_counter) - if(counterElt.title != '') { - counterElt.classList.add('error'); - } else { - counterElt.classList.remove('error'); + if (textarea && counter) { + textarea.addEventListener("input", () => checkBrackets(textarea, counter)); } } -function setupBracketChecking(id_prompt, id_counter){ - var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); - var counter = gradioApp().getElementById(id_counter) - - textarea.addEventListener("input", function(evt){ - checkBrackets(evt, textarea, counter) - }); -} - -onUiLoaded(function(){ - setupBracketChecking('txt2img_prompt', 'txt2img_token_counter') - setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter') - setupBracketChecking('img2img_prompt', 'img2img_token_counter') - setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter') -}) \ No newline at end of file +onUiLoaded(function () { + setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); + setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); + setupBracketChecking('img2img_prompt', 'img2img_token_counter'); + setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter'); +}); From dab5002c59ce1f68deae5e6e0c03e5e2c27155db Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Thu, 13 Apr 2023 23:12:33 -0400 Subject: [PATCH 068/113] sort self.word_embeddings without instantiating it a new dict --- modules/textual_inversion/textual_inversion.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 7c50839f..379df243 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -2,7 +2,7 @@ import os import sys import traceback import inspect -from collections import namedtuple, OrderedDict +from collections import namedtuple import torch import tqdm @@ -108,7 +108,7 @@ class DirWithTextualInversionEmbeddings: class EmbeddingDatabase: def __init__(self): self.ids_lookup = {} - self.word_embeddings = OrderedDict() + self.word_embeddings = {} self.skipped_embeddings = {} self.expected_shape = -1 self.embedding_dirs = {} @@ -234,7 +234,10 @@ class EmbeddingDatabase: embdir.update() # re-sort word_embeddings because load_from_dir may not load in alphabetic order. - self.word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + # using a temporary copy so we don't reinitialize self.word_embeddings in case other objects have a reference to it. + sorted_word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + self.word_embeddings.clear() + self.word_embeddings.update(sorted_word_embeddings) displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys())) if self.previously_displayed_embeddings != displayed_embeddings: From 3af152d488db0c521f6058676e1a65c7157ccc14 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:17:14 -0400 Subject: [PATCH 069/113] Fix image mask composite for weird resolutions --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..f49992d9 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -708,7 +708,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: image_mask = p.mask_for_overlay.convert('RGB') - image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), p.mask_for_overlay.convert('L')).convert('RGBA') + image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') if opts.save_mask: images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask") From fbab3fc6d122fb4e6648dd82291a70fc348c0b4a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:24:55 -0400 Subject: [PATCH 070/113] Only handle image mask if any option enabled --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index f49992d9..5c6edc60 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -706,7 +706,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image.info["parameters"] = text output_images.append(image) - if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: + if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): image_mask = p.mask_for_overlay.convert('RGB') image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') From 02e351880796422eac3bbaf7aa86332b588651ce Mon Sep 17 00:00:00 2001 From: tqwuliao Date: Sat, 15 Apr 2023 23:20:08 +0800 Subject: [PATCH 071/113] Add new FilenameGenerator [hasprompt..] --- javascript/hints.js | 4 ++-- modules/images.py | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 7f4101b2..730ce7bd 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -67,8 +67,8 @@ titles = { "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.", - "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime], [datetime