From ba70a220e3176153ba2a559acb9e5aa692dce7ca Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 5 Jun 2023 22:20:29 +0300 Subject: [PATCH 01/40] Remove a bunch of unused/vestigial code As found by Vulture and some eyes --- modules/api/api.py | 7 ------ modules/api/models.py | 4 --- modules/codeformer_model.py | 4 --- modules/devices.py | 7 ------ modules/generation_parameters_copypaste.py | 29 ---------------------- modules/hypernetworks/hypernetwork.py | 24 ------------------ modules/paths.py | 14 ----------- 7 files changed, 89 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 2e49526e..41cd7eca 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -32,13 +32,6 @@ import piexif import piexif.helper -def upscaler_to_index(name: str): - try: - return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) - except Exception as e: - raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}") from e - - def script_name_to_index(name, scripts): try: return [script.title().lower() for script in scripts].index(name.lower()) diff --git a/modules/api/models.py b/modules/api/models.py index b3a745f0..b5683071 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -274,10 +274,6 @@ class PromptStyleItem(BaseModel): prompt: Optional[str] = Field(title="Prompt") negative_prompt: Optional[str] = Field(title="Negative Prompt") -class ArtistItem(BaseModel): - name: str = Field(title="Name") - score: float = Field(title="Score") - category: str = Field(title="Category") class EmbeddingItem(BaseModel): step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available") diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 4260b016..a01fe63d 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -15,7 +15,6 @@ model_dir = "Codeformer" model_path = os.path.join(models_path, model_dir) model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' -have_codeformer = False codeformer = None @@ -125,9 +124,6 @@ def setup_model(dirname): return restored_img - global have_codeformer - have_codeformer = True - global codeformer codeformer = FaceRestorerCodeFormer(dirname) shared.face_restorers.append(codeformer) diff --git a/modules/devices.py b/modules/devices.py index 1ed6ffdc..620ed1a6 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -15,13 +15,6 @@ def has_mps() -> bool: else: return mac_specific.has_mps -def extract_device_id(args, name): - for x in range(len(args)): - if name in args[x]: - return args[x + 1] - - return None - def get_cuda_device_string(): from modules import shared diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 1d02ffae..699b1a81 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -174,31 +174,6 @@ def send_image_and_dimensions(x): return img, w, h - -def find_hypernetwork_key(hypernet_name, hypernet_hash=None): - """Determines the config parameter name to use for the hypernet based on the parameters in the infotext. - - Example: an infotext provides "Hypernet: ke-ta" and "Hypernet hash: 1234abcd". For the "Hypernet" config - parameter this means there should be an entry that looks like "ke-ta-10000(1234abcd)" to set it to. - - If the infotext has no hash, then a hypernet with the same name will be selected instead. - """ - hypernet_name = hypernet_name.lower() - if hypernet_hash is not None: - # Try to match the hash in the name - for hypernet_key in shared.hypernetworks.keys(): - result = re_hypernet_hash.search(hypernet_key) - if result is not None and result[1] == hypernet_hash: - return hypernet_key - else: - # Fall back to a hypernet with the same name - for hypernet_key in shared.hypernetworks.keys(): - if hypernet_key.lower().startswith(hypernet_name): - return hypernet_key - - return None - - def restore_old_hires_fix_params(res): """for infotexts that specify old First pass size parameter, convert it into width, height, and hr scale""" @@ -329,10 +304,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model return res -settings_map = {} - - - infotext_to_setting_name_mapping = [ ('Clip skip', 'CLIP_stop_at_last_layers', ), ('Conditional mask weight', 'inpainting_mask_weight'), diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 5d12b449..51941c11 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -353,17 +353,6 @@ def load_hypernetworks(names, multipliers=None): shared.loaded_hypernetworks.append(hypernetwork) -def find_closest_hypernetwork_name(search: str): - if not search: - return None - search = search.lower() - applicable = [name for name in shared.hypernetworks if search in name.lower()] - if not applicable: - return None - applicable = sorted(applicable, key=lambda name: len(name)) - return applicable[0] - - def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None): hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None) @@ -446,18 +435,6 @@ def statistics(data): return total_information, recent_information -def report_statistics(loss_info:dict): - keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x])) - for key in keys: - try: - print("Loss statistics for file " + key) - info, recent = statistics(list(loss_info[key])) - print(info) - print(recent) - except Exception as e: - print(e) - - def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): # Remove illegal characters from name. name = "".join( x for x in name if (x.isalnum() or x in "._- ")) @@ -770,7 +747,6 @@ Last saved image: {html.escape(last_saved_image)}
pbar.leave = False pbar.close() hypernetwork.eval() - #report_statistics(loss_dict) sd_hijack_checkpoint.remove() diff --git a/modules/paths.py b/modules/paths.py index 5171df4f..bada804e 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -38,17 +38,3 @@ for d, must_exist, what, options in path_dirs: else: sys.path.append(d) paths[what] = d - - -class Prioritize: - def __init__(self, name): - self.name = name - self.path = None - - def __enter__(self): - self.path = sys.path.copy() - sys.path = [paths[self.name]] + sys.path - - def __exit__(self, exc_type, exc_val, exc_tb): - sys.path = self.path - self.path = None From 8ca34ad6d8cc2502403b3b96bb811366bc13c076 Mon Sep 17 00:00:00 2001 From: Su Wei Date: Fri, 9 Jun 2023 13:14:20 +0800 Subject: [PATCH 02/40] add model exists status check to modeuls/api/api.py , /sdapi/v1/options [POST] --- modules/api/api.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index eee99bbb..56b7858d 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -22,7 +22,7 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_ from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from PIL import PngImagePlugin,Image -from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights +from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights,checkpoint_alisases from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices @@ -515,6 +515,11 @@ class Api: def set_config(self, req: Dict[str, Any]): for k, v in req.items(): + if k == "sd_model_checkpoint": + checkpoint_info = checkpoint_alisases.get(v, None) + if checkpoint_info is None: + print(f"model [{v}] not founded, skip config saving process") + return shared.opts.set(k, v) shared.opts.save(shared.config_filename) From 5576a7232207ea4df7c27f8ccd040d1697434532 Mon Sep 17 00:00:00 2001 From: arch-fan Date: Fri, 9 Jun 2023 19:59:27 +0000 Subject: [PATCH 03/40] fixed typos --- modules/shared.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 271a062d..e19f1576 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -416,9 +416,9 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), - "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP nrtwork; 1 ignores none, 2 ignores one layer"), + "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different vidocard vendors"), + "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"), })) options_templates.update(options_section(('optimizations', "Optimizations"), { From 9142be0a0d8cea37cf1ae86c17fc7dcb161d9a42 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 10 Jun 2023 23:36:34 +0900 Subject: [PATCH 04/40] quit restart --- modules/api/api.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 2e49526e..317c809f 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -14,7 +14,7 @@ from fastapi.encoders import jsonable_encoder from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors +from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart from modules.api import models from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images @@ -208,6 +208,8 @@ class Api: self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo]) + self.add_api_route("/sdapi/v1/quit-webui", self.quit_webui, methods=["POST"]) + self.add_api_route("/sdapi/v1/restart-webui", self.restart_webui, methods=["POST"]) self.default_script_arg_txt2img = [] self.default_script_arg_img2img = [] @@ -715,3 +717,10 @@ class Api: def launch(self, server_name, port): self.app.include_router(self.router) uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=0) + + def quit_webui(self): + restart.stop_program() + + def restart_webui(self): + if restart.is_restartable(): + restart.restart_program() From 7e2d39a2d158d1426321686b05d31a3ea694a99e Mon Sep 17 00:00:00 2001 From: Su Wei Date: Mon, 12 Jun 2023 15:22:49 +0800 Subject: [PATCH 05/40] update model checkpoint switch code --- modules/api/api.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 56b7858d..7d7dfe9a 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -514,12 +514,11 @@ class Api: return options def set_config(self, req: Dict[str, Any]): + checkpoint_key="sd_model_checkpoint" + if checkpoint_key in req and str(req[checkpoint_key]) not in checkpoint_alisases: + raise RuntimeError(f"model {v!r} not found") + for k, v in req.items(): - if k == "sd_model_checkpoint": - checkpoint_info = checkpoint_alisases.get(v, None) - if checkpoint_info is None: - print(f"model [{v}] not founded, skip config saving process") - return shared.opts.set(k, v) shared.opts.save(shared.config_filename) From b9664ab6154818680ee25920e229b808a3cdec68 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 12 Jun 2023 18:15:27 +0900 Subject: [PATCH 06/40] move _stop route to api --- modules/api/api.py | 11 +++++++++-- webui.py | 7 ------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 317c809f..cb1cde78 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -208,8 +208,11 @@ class Api: self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo]) - self.add_api_route("/sdapi/v1/quit-webui", self.quit_webui, methods=["POST"]) - self.add_api_route("/sdapi/v1/restart-webui", self.restart_webui, methods=["POST"]) + + if shared.cmd_opts.add_stop_route: + self.add_api_route("/sdapi/v1/quit-webui", self.quit_webui, methods=["POST"]) + self.add_api_route("/sdapi/v1/restart-webui", self.restart_webui, methods=["POST"]) + self.add_api_route("/_stop", self.stop_route, methods=["POST"]) self.default_script_arg_txt2img = [] self.default_script_arg_img2img = [] @@ -724,3 +727,7 @@ class Api: def restart_webui(self): if restart.is_restartable(): restart.restart_program() + + def stop_route(request): + shared.state.server_command = "stop" + return Response("Stopping.") diff --git a/webui.py b/webui.py index 136d036d..ae6dc9fb 100644 --- a/webui.py +++ b/webui.py @@ -362,11 +362,6 @@ def api_only(): api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861) -def stop_route(request): - shared.state.server_command = "stop" - return Response("Stopping.") - - def webui(): launch_api = cmd_opts.api initialize() @@ -404,8 +399,6 @@ def webui(): "redoc_url": "/redoc", }, ) - if cmd_opts.add_stop_route: - app.add_route("/_stop", stop_route, methods=["POST"]) # after initial launch, disable --autolaunch for subsequent restarts cmd_opts.autolaunch = False From d80962681ae0f3456b1c2776f68c5c838d782786 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 12 Jun 2023 18:21:01 +0900 Subject: [PATCH 07/40] remove fastapi.Response --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index ae6dc9fb..bad29f28 100644 --- a/webui.py +++ b/webui.py @@ -11,7 +11,7 @@ import json from threading import Thread from typing import Iterable -from fastapi import FastAPI, Response +from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware from packaging import version From ee029a8cad762238f94706d2386ec70f7854339d Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Mon, 12 Jun 2023 22:19:22 +0300 Subject: [PATCH 08/40] Improved error output, improved settings menu --- .../canvas-zoom-and-pan/javascript/zoom.js | 175 ++++++++++++++---- .../scripts/hotkey_config.py | 8 +- 2 files changed, 145 insertions(+), 38 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 4ecb3d36..604a9c1e 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -4,12 +4,12 @@ onUiLoaded(async() => { inpaint: "#img2maskimg", inpaintSketch: "#inpaint_sketch", rangeGroup: "#img2img_column_size", - sketch: "#img2img_sketch", + sketch: "#img2img_sketch" }; const tabNameToElementId = { "Inpaint sketch": elementIDs.inpaintSketch, "Inpaint": elementIDs.inpaint, - "Sketch": elementIDs.sketch, + "Sketch": elementIDs.sketch }; // Helper functions @@ -42,37 +42,124 @@ onUiLoaded(async() => { } } - // Check is hotkey valid - function isSingleLetter(value) { + // Function for defining the "Ctrl", "Shift" and "Alt" keys + function isModifierKey(event, key) { + switch (key) { + case "Ctrl": + return event.ctrlKey; + case "Shift": + return event.shiftKey; + case "Alt": + return event.altKey; + default: + return false; + } + } + + // Check if hotkey is valid + function isValidHotkey(value) { + const specialKeys = ["Ctrl", "Alt", "Shift", "Disable"]; return ( - typeof value === "string" && value.length === 1 && /[a-z]/i.test(value) + (typeof value === "string" && + value.length === 1 && + /[a-z]/i.test(value)) || + specialKeys.includes(value) ); } // Create hotkeyConfig from opts function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) { const result = {}; - const usedKeys = new Set(); + const usedSingleKeys = new Set(); + const usedSpecialKeys = new Set(); + + // Normalize hotkey + function normalizeHotkey(hotkey) { + return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey; + } + + // Format hotkey for display + function formatHotkeyForDisplay(hotkey) { + return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey; + } + + // Check if canvas_hotkey_adjust and canvas_hotkey_zoom are the same + if ( + hotkeysConfigOpts.canvas_hotkey_adjust !== "Disable" && + hotkeysConfigOpts.canvas_hotkey_zoom !== "Disable" && + normalizeHotkey(hotkeysConfigOpts.canvas_hotkey_adjust) === + normalizeHotkey(hotkeysConfigOpts.canvas_hotkey_zoom) + ) { + console.error( + `Hotkey: ${formatHotkeyForDisplay( + hotkeysConfigOpts.canvas_hotkey_zoom + )} for canvas_hotkey_zoom conflicts with canvas_hotkey_adjust. The default hotkey is used: ${formatHotkeyForDisplay( + defaultHotkeysConfig.canvas_hotkey_zoom + )}` + ); + hotkeysConfigOpts.canvas_hotkey_zoom = + defaultHotkeysConfig.canvas_hotkey_zoom; + } for (const key in defaultHotkeysConfig) { if (typeof hotkeysConfigOpts[key] === "boolean") { result[key] = hotkeysConfigOpts[key]; continue; } + if (hotkeysConfigOpts[key] === "Disable") { + result[key] = hotkeysConfigOpts[key]; + continue; + } if ( hotkeysConfigOpts[key] && - isSingleLetter(hotkeysConfigOpts[key]) && - !usedKeys.has(hotkeysConfigOpts[key].toUpperCase()) + isValidHotkey(hotkeysConfigOpts[key]) ) { - // If the property passed the test and has not yet been used, add 'Key' before it and save it - result[key] = "Key" + hotkeysConfigOpts[key].toUpperCase(); - usedKeys.add(hotkeysConfigOpts[key].toUpperCase()); + const hotkey = normalizeHotkey(hotkeysConfigOpts[key]); + const isSpecialKey = hotkey.length > 1; + + if ( + (!isSpecialKey && !usedSingleKeys.has(hotkey)) || + (isSpecialKey && !usedSpecialKeys.has(hotkey)) + ) { + result[key] = hotkey; + + if (isSpecialKey) { + usedSpecialKeys.add(hotkey); + } else { + usedSingleKeys.add(hotkey); + } + } else { + console.error( + `Hotkey: ${formatHotkeyForDisplay( + hotkeysConfigOpts[key] + )} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay( + defaultHotkeysConfig[key] + )}` + ); + result[key] = defaultHotkeysConfig[key]; + + if (isSpecialKey) { + usedSpecialKeys.add(defaultHotkeysConfig[key]); + } else { + usedSingleKeys.add(defaultHotkeysConfig[key]); + } + } } else { - // If the property does not pass the test or has already been used, we keep the default value console.error( - `Hotkey: ${hotkeysConfigOpts[key]} for ${key} is repeated and conflicts with another hotkey or is not 1 letter. The default hotkey is used: ${defaultHotkeysConfig[key][3]}` + `Hotkey: ${formatHotkeyForDisplay( + hotkeysConfigOpts[key] + )} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay( + defaultHotkeysConfig[key] + )}` ); result[key] = defaultHotkeysConfig[key]; + + const isSpecialKey = defaultHotkeysConfig[key].length > 1; + if (isSpecialKey) { + usedSpecialKeys.add(defaultHotkeysConfig[key]); + } else { + usedSingleKeys.add(defaultHotkeysConfig[key]); + } } } @@ -100,7 +187,9 @@ onUiLoaded(async() => { imageARPreview.style.transform = ""; if (parseFloat(mainTab.style.width) > 865) { const transformString = mainTab.style.transform; - const scaleMatch = transformString.match(/scale\(([-+]?[0-9]*\.?[0-9]+)\)/); + const scaleMatch = transformString.match( + /scale\(([-+]?[0-9]*\.?[0-9]+)\)/ + ); let zoom = 1; // default zoom if (scaleMatch && scaleMatch[1]) { @@ -124,12 +213,13 @@ onUiLoaded(async() => { // Default config const defaultHotkeysConfig = { + canvas_hotkey_zoom: "Alt", + canvas_hotkey_adjust: "Ctrl", canvas_hotkey_reset: "KeyR", canvas_hotkey_fullscreen: "KeyS", canvas_hotkey_move: "KeyF", canvas_hotkey_overlap: "KeyO", - canvas_show_tooltip: true, - canvas_swap_controls: false + canvas_show_tooltip: true }; // swap the actions for ctr + wheel and shift + wheel const hotkeysConfig = createHotkeyConfig( @@ -137,18 +227,23 @@ onUiLoaded(async() => { hotkeysConfigOpts ); + console.log(hotkeysConfig); + let isMoving = false; let mouseX, mouseY; let activeElement; - const elements = Object.fromEntries(Object.keys(elementIDs).map((id) => [ - id, - gradioApp().querySelector(elementIDs[id]), - ])); + const elements = Object.fromEntries( + Object.keys(elementIDs).map(id => [ + id, + gradioApp().querySelector(elementIDs[id]) + ]) + ); const elemData = {}; // Apply functionality to the range inputs. Restore redmask and correct for long images. - const rangeInputs = elements.rangeGroup ? Array.from(elements.rangeGroup.querySelectorAll("input")) : + const rangeInputs = elements.rangeGroup ? + Array.from(elements.rangeGroup.querySelectorAll("input")) : [ gradioApp().querySelector("#img2img_width input[type='range']"), gradioApp().querySelector("#img2img_height input[type='range']") @@ -192,26 +287,36 @@ onUiLoaded(async() => { tooltipContent.className = "tooltip-content"; // Add info about hotkeys - const zoomKey = hotkeysConfig.canvas_swap_controls ? "Ctrl" : "Shift"; - const adjustKey = hotkeysConfig.canvas_swap_controls ? "Shift" : "Ctrl"; + const zoomKey = hotkeysConfig.canvas_hotkey_zoom; + const adjustKey = hotkeysConfig.canvas_hotkey_adjust; const hotkeys = [ {key: `${zoomKey} + wheel`, action: "Zoom canvas"}, {key: `${adjustKey} + wheel`, action: "Adjust brush size"}, { - key: hotkeysConfig.canvas_hotkey_reset.charAt(hotkeysConfig.canvas_hotkey_reset.length - 1), + key: hotkeysConfig.canvas_hotkey_reset.charAt( + hotkeysConfig.canvas_hotkey_reset.length - 1 + ), action: "Reset zoom" }, { - key: hotkeysConfig.canvas_hotkey_fullscreen.charAt(hotkeysConfig.canvas_hotkey_fullscreen.length - 1), + key: hotkeysConfig.canvas_hotkey_fullscreen.charAt( + hotkeysConfig.canvas_hotkey_fullscreen.length - 1 + ), action: "Fullscreen mode" }, { - key: hotkeysConfig.canvas_hotkey_move.charAt(hotkeysConfig.canvas_hotkey_move.length - 1), + key: hotkeysConfig.canvas_hotkey_move.charAt( + hotkeysConfig.canvas_hotkey_move.length - 1 + ), action: "Move canvas" } ]; for (const hotkey of hotkeys) { + if (hotkey.key === "Disable + wheel") { + continue; + } + const p = document.createElement("p"); p.innerHTML = `${hotkey.key} - ${hotkey.action}`; tooltipContent.appendChild(p); @@ -346,10 +451,7 @@ onUiLoaded(async() => { // Change the zoom level based on user interaction function changeZoomLevel(operation, e) { - if ( - (!hotkeysConfig.canvas_swap_controls && e.shiftKey) || - (hotkeysConfig.canvas_swap_controls && e.ctrlKey) - ) { + if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) { e.preventDefault(); let zoomPosX, zoomPosY; @@ -514,6 +616,13 @@ onUiLoaded(async() => { event.preventDefault(); action(event); } + + if ( + isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) || + isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust) + ) { + event.preventDefault(); + } } // Get Mouse position @@ -564,11 +673,7 @@ onUiLoaded(async() => { changeZoomLevel(operation, e); // Handle brush size adjustment with ctrl key pressed - if ( - (hotkeysConfig.canvas_swap_controls && e.shiftKey) || - (!hotkeysConfig.canvas_swap_controls && - (e.ctrlKey || e.metaKey)) - ) { + if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) { e.preventDefault(); // Increase or decrease brush size based on scroll direction diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py index d83e14da..be8b43f5 100644 --- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py +++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py @@ -1,10 +1,12 @@ +import gradio as gr from modules import shared shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), { - "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas"), + "canvas_hotkey_zoom": shared.OptionInfo("Shift", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt","Disable"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), + "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt","Disable"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), + "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"), "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "), "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), - "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap ( Technical button, neededs for testing )"), + "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), - "canvas_swap_controls": shared.OptionInfo(False, "Swap hotkey combinations for Zoom and Adjust brush resize"), })) From 9a2da597c5e329cf04eeef3005f4465de10f6832 Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Mon, 12 Jun 2023 22:21:42 +0300 Subject: [PATCH 09/40] remove console.log --- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 604a9c1e..7c785e29 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -227,8 +227,6 @@ onUiLoaded(async() => { hotkeysConfigOpts ); - console.log(hotkeysConfig); - let isMoving = false; let mouseX, mouseY; let activeElement; From 165ab44f03cc17dc3e4c35b3e5976f3d646c7ac7 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 29 May 2023 10:18:15 +0300 Subject: [PATCH 10/40] Use os.makedirs(..., exist_ok=True) --- modules/codeformer_model.py | 4 +--- modules/extensions.py | 3 +-- modules/gfpgan_model.py | 5 +---- modules/modelloader.py | 3 +-- modules/sd_models.py | 3 +-- modules/textual_inversion/autocrop.py | 3 +-- 6 files changed, 6 insertions(+), 15 deletions(-) diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 4260b016..d974e4b8 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -20,9 +20,7 @@ codeformer = None def setup_model(dirname): - global model_path - if not os.path.exists(model_path): - os.makedirs(model_path) + os.makedirs(model_path, exist_ok=True) path = modules.paths.paths.get("CodeFormer", None) if path is None: diff --git a/modules/extensions.py b/modules/extensions.py index 8608584b..abc6e2b1 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -7,8 +7,7 @@ from modules.paths_internal import extensions_dir, extensions_builtin_dir, scrip extensions = [] -if not os.path.exists(extensions_dir): - os.makedirs(extensions_dir) +os.makedirs(extensions_dir, exist_ok=True) def active(): diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index e239a09d..6ecd295c 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -70,11 +70,8 @@ gfpgan_constructor = None def setup_model(dirname): - global model_path - if not os.path.exists(model_path): - os.makedirs(model_path) - try: + os.makedirs(model_path, exist_ok=True) from gfpgan import GFPGANer from facexlib import detection, parsing # noqa: F401 global user_path diff --git a/modules/modelloader.py b/modules/modelloader.py index be23071a..75f01247 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -95,8 +95,7 @@ def cleanup_models(): def move_files(src_path: str, dest_path: str, ext_filter: str = None): try: - if not os.path.exists(dest_path): - os.makedirs(dest_path) + os.makedirs(dest_path, exist_ok=True) if os.path.exists(src_path): for file in os.listdir(src_path): fullpath = os.path.join(src_path, file) diff --git a/modules/sd_models.py b/modules/sd_models.py index 918f6fd6..6ff5d17d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -95,8 +95,7 @@ except Exception: def setup_model(): - if not os.path.exists(model_path): - os.makedirs(model_path) + os.makedirs(model_path, exist_ok=True) enable_midas_autodownload() diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index 75705459..1675e39a 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -298,8 +298,7 @@ def download_and_cache_models(dirname): download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true' model_file_name = 'face_detection_yunet.onnx' - if not os.path.exists(dirname): - os.makedirs(dirname) + os.makedirs(dirname, exist_ok=True) cache_file = os.path.join(dirname, model_file_name) if not os.path.exists(cache_file): From 89352a2f52c6be51318192cedd86c8a342966a49 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 29 May 2023 09:34:26 +0300 Subject: [PATCH 11/40] Move `load_file_from_url` to modelloader --- extensions-builtin/LDSR/scripts/ldsr_model.py | 7 ++--- .../ScuNET/scripts/scunet_model.py | 5 ++-- .../SwinIR/scripts/swinir_model.py | 8 +++-- modules/esrgan_model.py | 4 +-- modules/modelloader.py | 29 +++++++++++++++++-- modules/realesrgan_model.py | 4 +-- 6 files changed, 39 insertions(+), 18 deletions(-) diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index dbd6d331..bf9b6de2 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -1,7 +1,6 @@ import os -from basicsr.utils.download_util import load_file_from_url - +from modules.modelloader import load_file_from_url from modules.upscaler import Upscaler, UpscalerData from ldsr_model_arch import LDSR from modules import shared, script_callbacks, errors @@ -43,9 +42,9 @@ class UpscalerLDSR(Upscaler): if local_safetensors_path is not None and os.path.exists(local_safetensors_path): model = local_safetensors_path else: - model = local_ckpt_path if local_ckpt_path is not None else load_file_from_url(url=self.model_url, model_dir=self.model_download_path, file_name="model.ckpt", progress=True) + model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt") - yaml = local_yaml_path if local_yaml_path is not None else load_file_from_url(url=self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml", progress=True) + yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml") try: return LDSR(model, yaml) diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index 85b4505f..2785b551 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -6,12 +6,11 @@ import numpy as np import torch from tqdm import tqdm -from basicsr.utils.download_util import load_file_from_url - import modules.upscaler from modules import devices, modelloader, script_callbacks, errors from scunet_model_arch import SCUNet as net +from modules.modelloader import load_file_from_url from modules.shared import opts @@ -120,7 +119,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): def load_model(self, path: str): device = devices.get_device_for('scunet') if "http" in path: - filename = load_file_from_url(url=self.model_url, model_dir=self.model_download_path, file_name="%s.pth" % self.name, progress=True) + filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth") else: filename = path if not os.path.exists(os.path.join(self.model_path, filename)) or filename is None: diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index 1c7bf325..a5b0e2eb 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -3,7 +3,6 @@ import os import numpy as np import torch from PIL import Image -from basicsr.utils.download_util import load_file_from_url from tqdm import tqdm from modules import modelloader, devices, script_callbacks, shared @@ -50,8 +49,11 @@ class UpscalerSwinIR(Upscaler): def load_model(self, path, scale=4): if "http" in path: - dl_name = "%s%s" % (self.model_name.replace(" ", "_"), ".pth") - filename = load_file_from_url(url=path, model_dir=self.model_download_path, file_name=dl_name, progress=True) + filename = modelloader.load_file_from_url( + url=path, + model_dir=self.model_download_path, + file_name=f"{self.model_name.replace(' ', '_')}.pth", + ) else: filename = path if filename is None or not os.path.exists(filename): diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 2fced999..f1a98c07 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -3,7 +3,6 @@ import os import numpy as np import torch from PIL import Image -from basicsr.utils.download_util import load_file_from_url import modules.esrgan_model_arch as arch from modules import modelloader, images, devices @@ -152,11 +151,10 @@ class UpscalerESRGAN(Upscaler): def load_model(self, path: str): if "http" in path: - filename = load_file_from_url( + filename = modelloader.load_file_from_url( url=self.model_url, model_dir=self.model_download_path, file_name=f"{self.model_name}.pth", - progress=True, ) else: filename = path diff --git a/modules/modelloader.py b/modules/modelloader.py index be23071a..a69c8a4f 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import shutil import importlib @@ -8,6 +10,29 @@ from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, Upscale from modules.paths import script_path, models_path +def load_file_from_url( + url: str, + *, + model_dir: str, + progress: bool = True, + file_name: str | None = None, +) -> str: + """Download a file from `url` into `model_dir`, using the file present if possible. + + Returns the path to the downloaded file. + """ + os.makedirs(model_dir, exist_ok=True) + if not file_name: + parts = urlparse(url) + file_name = os.path.basename(parts.path) + cached_file = os.path.abspath(os.path.join(model_dir, file_name)) + if not os.path.exists(cached_file): + print(f'Downloading: "{url}" to {cached_file}\n') + from torch.hub import download_url_to_file + download_url_to_file(url, cached_file, progress=progress) + return cached_file + + def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list: """ A one-and done loader to try finding the desired models in specified directories. @@ -46,9 +71,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None if model_url is not None and len(output) == 0: if download_name is not None: - from basicsr.utils.download_util import load_file_from_url - dl = load_file_from_url(model_url, places[0], True, download_name) - output.append(dl) + output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name)) else: output.append(model_url) diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index 2d27b321..0d9c2e48 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -2,7 +2,6 @@ import os import numpy as np from PIL import Image -from basicsr.utils.download_util import load_file_from_url from realesrgan import RealESRGANer from modules.upscaler import Upscaler, UpscalerData @@ -10,6 +9,7 @@ from modules.shared import cmd_opts, opts from modules import modelloader, errors + class UpscalerRealESRGAN(Upscaler): def __init__(self, path): self.name = "RealESRGAN" @@ -71,7 +71,7 @@ class UpscalerRealESRGAN(Upscaler): return None if info.local_data_path.startswith("http"): - info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_download_path, progress=True) + info.local_data_path = modelloader.load_file_from_url(info.data_path, model_dir=self.model_download_path) return info except Exception: From 0afbc0c2355ead3a0ce7149a6d678f1f2e2fbfee Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 29 May 2023 09:41:36 +0300 Subject: [PATCH 12/40] Fix up `if "http" in ...:` to be more sensible startswiths --- extensions-builtin/ScuNET/scripts/scunet_model.py | 4 ++-- extensions-builtin/SwinIR/scripts/swinir_model.py | 4 ++-- modules/esrgan_model.py | 4 ++-- modules/gfpgan_model.py | 2 +- modules/modelloader.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index 2785b551..64f50829 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -27,7 +27,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): scalers = [] add_model2 = True for file in model_paths: - if "http" in file: + if file.startswith("http"): name = self.model_name else: name = modelloader.friendly_name(file) @@ -118,7 +118,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): def load_model(self, path: str): device = devices.get_device_for('scunet') - if "http" in path: + if path.startswith("http"): filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth") else: filename = path diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index a5b0e2eb..4551761d 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -27,7 +27,7 @@ class UpscalerSwinIR(Upscaler): scalers = [] model_files = self.find_models(ext_filter=[".pt", ".pth"]) for model in model_files: - if "http" in model: + if model.startswith("http"): name = self.model_name else: name = modelloader.friendly_name(model) @@ -48,7 +48,7 @@ class UpscalerSwinIR(Upscaler): return img def load_model(self, path, scale=4): - if "http" in path: + if path.startswith("http"): filename = modelloader.load_file_from_url( url=path, model_dir=self.model_download_path, diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index f1a98c07..0666a2c2 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -133,7 +133,7 @@ class UpscalerESRGAN(Upscaler): scaler_data = UpscalerData(self.model_name, self.model_url, self, 4) scalers.append(scaler_data) for file in model_paths: - if "http" in file: + if file.startswith("http"): name = self.model_name else: name = modelloader.friendly_name(file) @@ -150,7 +150,7 @@ class UpscalerESRGAN(Upscaler): return img def load_model(self, path: str): - if "http" in path: + if path.startswith("http"): filename = modelloader.load_file_from_url( url=self.model_url, model_dir=self.model_download_path, diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index e239a09d..804fb53d 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -25,7 +25,7 @@ def gfpgann(): return None models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN") - if len(models) == 1 and "http" in models[0]: + if len(models) == 1 and models[0].startswith("http"): model_file = models[0] elif len(models) != 0: latest_file = max(models, key=os.path.getctime) diff --git a/modules/modelloader.py b/modules/modelloader.py index a69c8a4f..b2f0bb71 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -82,7 +82,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None def friendly_name(file: str): - if "http" in file: + if file.startswith("http"): file = urlparse(file).path file = os.path.basename(file) From e3a973a68df3cfe13039dae33d19cf2c02a741e0 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 29 May 2023 09:45:07 +0300 Subject: [PATCH 13/40] Add TODO comments to sus model loads --- extensions-builtin/ScuNET/scripts/scunet_model.py | 1 + modules/esrgan_model.py | 1 + 2 files changed, 2 insertions(+) diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index 64f50829..da74a829 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -119,6 +119,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): def load_model(self, path: str): device = devices.get_device_for('scunet') if path.startswith("http"): + # TODO: this doesn't use `path` at all? filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth") else: filename = path diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 0666a2c2..a20e8d91 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -151,6 +151,7 @@ class UpscalerESRGAN(Upscaler): def load_model(self, path: str): if path.startswith("http"): + # TODO: this doesn't use `path` at all? filename = modelloader.load_file_from_url( url=self.model_url, model_dir=self.model_download_path, From bf67a5dcf44c3dbd88d1913478d4e02477915f33 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 29 May 2023 10:38:51 +0300 Subject: [PATCH 14/40] Upscaler.load_model: don't return None, just use exceptions --- extensions-builtin/LDSR/scripts/ldsr_model.py | 13 +++--- .../ScuNET/scripts/scunet_model.py | 16 +++----- .../SwinIR/scripts/swinir_model.py | 40 +++++++++---------- modules/esrgan_model.py | 14 +++---- modules/realesrgan_model.py | 33 +++++++-------- 5 files changed, 52 insertions(+), 64 deletions(-) diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index bf9b6de2..bd78dece 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -46,16 +46,13 @@ class UpscalerLDSR(Upscaler): yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml") - try: - return LDSR(model, yaml) - except Exception: - errors.report("Error importing LDSR", exc_info=True) - return None + return LDSR(model, yaml) def do_upscale(self, img, path): - ldsr = self.load_model(path) - if ldsr is None: - print("NO LDSR!") + try: + ldsr = self.load_model(path) + except Exception: + errors.report(f"Failed loading LDSR model {path}", exc_info=True) return img ddim_steps = shared.opts.ldsr_steps return ldsr.super_resolution(img, ddim_steps, self.scale) diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index da74a829..ffef26b2 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -1,4 +1,3 @@ -import os.path import sys import PIL.Image @@ -8,7 +7,7 @@ from tqdm import tqdm import modules.upscaler from modules import devices, modelloader, script_callbacks, errors -from scunet_model_arch import SCUNet as net +from scunet_model_arch import SCUNet from modules.modelloader import load_file_from_url from modules.shared import opts @@ -88,9 +87,10 @@ class UpscalerScuNET(modules.upscaler.Upscaler): torch.cuda.empty_cache() - model = self.load_model(selected_file) - if model is None: - print(f"ScuNET: Unable to load model from {selected_file}", file=sys.stderr) + try: + model = self.load_model(selected_file) + except Exception as e: + print(f"ScuNET: Unable to load model from {selected_file}: {e}", file=sys.stderr) return img device = devices.get_device_for('scunet') @@ -123,11 +123,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth") else: filename = path - if not os.path.exists(os.path.join(self.model_path, filename)) or filename is None: - print(f"ScuNET: Unable to load model from {filename}", file=sys.stderr) - return None - - model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64) + model = SCUNet(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64) model.load_state_dict(torch.load(filename), strict=True) model.eval() for _, v in model.named_parameters(): diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index 4551761d..3ce622d9 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -1,4 +1,4 @@ -import os +import sys import numpy as np import torch @@ -7,8 +7,8 @@ from tqdm import tqdm from modules import modelloader, devices, script_callbacks, shared from modules.shared import opts, state -from swinir_model_arch import SwinIR as net -from swinir_model_arch_v2 import Swin2SR as net2 +from swinir_model_arch import SwinIR +from swinir_model_arch_v2 import Swin2SR from modules.upscaler import Upscaler, UpscalerData @@ -36,8 +36,10 @@ class UpscalerSwinIR(Upscaler): self.scalers = scalers def do_upscale(self, img, model_file): - model = self.load_model(model_file) - if model is None: + try: + model = self.load_model(model_file) + except Exception as e: + print(f"Failed loading SwinIR model {model_file}: {e}", file=sys.stderr) return img model = model.to(device_swinir, dtype=devices.dtype) img = upscale(img, model) @@ -56,25 +58,23 @@ class UpscalerSwinIR(Upscaler): ) else: filename = path - if filename is None or not os.path.exists(filename): - return None if filename.endswith(".v2.pth"): - model = net2( - upscale=scale, - in_chans=3, - img_size=64, - window_size=8, - img_range=1.0, - depths=[6, 6, 6, 6, 6, 6], - embed_dim=180, - num_heads=[6, 6, 6, 6, 6, 6], - mlp_ratio=2, - upsampler="nearest+conv", - resi_connection="1conv", + model = Swin2SR( + upscale=scale, + in_chans=3, + img_size=64, + window_size=8, + img_range=1.0, + depths=[6, 6, 6, 6, 6, 6], + embed_dim=180, + num_heads=[6, 6, 6, 6, 6, 6], + mlp_ratio=2, + upsampler="nearest+conv", + resi_connection="1conv", ) params = None else: - model = net( + model = SwinIR( upscale=scale, in_chans=3, img_size=64, diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index a20e8d91..02a1727d 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -1,4 +1,4 @@ -import os +import sys import numpy as np import torch @@ -6,9 +6,8 @@ from PIL import Image import modules.esrgan_model_arch as arch from modules import modelloader, images, devices -from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts - +from modules.upscaler import Upscaler, UpscalerData def mod2normal(state_dict): @@ -142,8 +141,10 @@ class UpscalerESRGAN(Upscaler): self.scalers.append(scaler_data) def do_upscale(self, img, selected_model): - model = self.load_model(selected_model) - if model is None: + try: + model = self.load_model(selected_model) + except Exception as e: + print(f"Unable to load ESRGAN model {selected_model}: {e}", file=sys.stderr) return img model.to(devices.device_esrgan) img = esrgan_upscale(model, img) @@ -159,9 +160,6 @@ class UpscalerESRGAN(Upscaler): ) else: filename = path - if not os.path.exists(filename) or filename is None: - print(f"Unable to load {self.model_path} from {filename}") - return None state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None) diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index 0d9c2e48..0700b853 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -9,7 +9,6 @@ from modules.shared import cmd_opts, opts from modules import modelloader, errors - class UpscalerRealESRGAN(Upscaler): def __init__(self, path): self.name = "RealESRGAN" @@ -43,9 +42,10 @@ class UpscalerRealESRGAN(Upscaler): if not self.enable: return img - info = self.load_model(path) - if not os.path.exists(info.local_data_path): - print(f"Unable to load RealESRGAN model: {info.name}") + try: + info = self.load_model(path) + except Exception: + errors.report(f"Unable to load RealESRGAN model {path}", exc_info=True) return img upsampler = RealESRGANer( @@ -63,20 +63,17 @@ class UpscalerRealESRGAN(Upscaler): return image def load_model(self, path): - try: - info = next(iter([scaler for scaler in self.scalers if scaler.data_path == path]), None) - - if info is None: - print(f"Unable to find model info: {path}") - return None - - if info.local_data_path.startswith("http"): - info.local_data_path = modelloader.load_file_from_url(info.data_path, model_dir=self.model_download_path) - - return info - except Exception: - errors.report("Error making Real-ESRGAN models list", exc_info=True) - return None + for scaler in self.scalers: + if scaler.data_path == path: + if scaler.local_data_path.startswith("http"): + scaler.local_data_path = modelloader.load_file_from_url( + scaler.data_path, + model_dir=self.model_download_path, + ) + if not os.path.exists(scaler.local_data_path): + raise FileNotFoundError(f"RealESRGAN data missing: {scaler.local_data_path}") + return scaler + raise ValueError(f"Unable to find model info: {path}") def load_models(self, _): return get_realesrgan_models(self) From 2667f47ffbf7c641a7e77abbdddf5e81bf144199 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Tue, 13 Jun 2023 13:00:05 +0300 Subject: [PATCH 15/40] Remove stray space from SwinIR model URL --- extensions-builtin/SwinIR/scripts/swinir_model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index 3ce622d9..c6bc53a8 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -11,6 +11,7 @@ from swinir_model_arch import SwinIR from swinir_model_arch_v2 import Swin2SR from modules.upscaler import Upscaler, UpscalerData +SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth" device_swinir = devices.get_device_for('swinir') @@ -18,9 +19,7 @@ device_swinir = devices.get_device_for('swinir') class UpscalerSwinIR(Upscaler): def __init__(self, dirname): self.name = "SwinIR" - self.model_url = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0" \ - "/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR" \ - "-L_x4_GAN.pth " + self.model_url = SWINIR_MODEL_URL self.model_name = "SwinIR 4x" self.user_path = dirname super().__init__() From 8ce9b36e0fe51002e72f90ec4dbdc53b564c8fad Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Tue, 13 Jun 2023 13:07:06 +0300 Subject: [PATCH 16/40] Upgrade ruff to 272 --- .github/workflows/on_pull_request.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml index 7b7219fd..8ebf5918 100644 --- a/.github/workflows/on_pull_request.yaml +++ b/.github/workflows/on_pull_request.yaml @@ -18,7 +18,7 @@ jobs: # not to have GHA download an (at the time of writing) 4 GB cache # of PyTorch and other dependencies. - name: Install Ruff - run: pip install ruff==0.0.265 + run: pip install ruff==0.0.272 - name: Run Ruff run: ruff . lint-js: From d8071647760a2213aaf33a533addb4d84ba86816 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Tue, 13 Jun 2023 13:07:39 +0300 Subject: [PATCH 17/40] textual_inversion/logging.py: clean up duplicate key from sets (and sort them) (Ruff B033) --- modules/textual_inversion/logging.py | 48 +++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/modules/textual_inversion/logging.py b/modules/textual_inversion/logging.py index 734a4b6f..45823eb1 100644 --- a/modules/textual_inversion/logging.py +++ b/modules/textual_inversion/logging.py @@ -2,11 +2,51 @@ import datetime import json import os -saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "clip_grad_mode", "clip_grad_value", "gradient_step", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file", "gradient_step", "latent_sampling_method"} -saved_params_ti = {"embedding_name", "num_vectors_per_token", "save_embedding_every", "save_image_with_stored_embedding"} -saved_params_hypernet = {"hypernetwork_name", "layer_structure", "activation_func", "weight_init", "add_layer_norm", "use_dropout", "save_hypernetwork_every"} +saved_params_shared = { + "batch_size", + "clip_grad_mode", + "clip_grad_value", + "create_image_every", + "data_root", + "gradient_step", + "initial_step", + "latent_sampling_method", + "learn_rate", + "log_directory", + "model_hash", + "model_name", + "num_of_dataset_images", + "steps", + "template_file", + "training_height", + "training_width", +} +saved_params_ti = { + "embedding_name", + "num_vectors_per_token", + "save_embedding_every", + "save_image_with_stored_embedding", +} +saved_params_hypernet = { + "activation_func", + "add_layer_norm", + "hypernetwork_name", + "layer_structure", + "save_hypernetwork_every", + "use_dropout", + "weight_init", +} saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet -saved_params_previews = {"preview_prompt", "preview_negative_prompt", "preview_steps", "preview_sampler_index", "preview_cfg_scale", "preview_seed", "preview_width", "preview_height"} +saved_params_previews = { + "preview_cfg_scale", + "preview_height", + "preview_negative_prompt", + "preview_prompt", + "preview_sampler_index", + "preview_seed", + "preview_steps", + "preview_width", +} def save_settings_to_file(log_directory, all_params): From 9b687f013d951c43f32bb03674c9af0b3b5b76e3 Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Wed, 14 Jun 2023 00:24:25 +0300 Subject: [PATCH 18/40] Reworked the disabling of functions, refactored part of the code --- .../canvas-zoom-and-pan/javascript/zoom.js | 239 ++++++++---------- .../scripts/hotkey_config.py | 5 +- .../canvas-zoom-and-pan/style.css | 10 +- 3 files changed, 120 insertions(+), 134 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 7c785e29..c5df4318 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -56,116 +56,87 @@ onUiLoaded(async() => { } } + // Check if hotkey is valid function isValidHotkey(value) { const specialKeys = ["Ctrl", "Alt", "Shift", "Disable"]; return ( - (typeof value === "string" && - value.length === 1 && - /[a-z]/i.test(value)) || - specialKeys.includes(value) + (typeof value === "string" && value.length === 1 && /[a-z]/i.test(value)) || + specialKeys.includes(value) ); } - - // Create hotkeyConfig from opts + + // Normalize hotkey + function normalizeHotkey(hotkey) { + return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey; + } + + // Format hotkey for display + function formatHotkeyForDisplay(hotkey) { + return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey; + } + + // Create hotkey configuration with the provided options function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) { - const result = {}; - const usedSingleKeys = new Set(); - const usedSpecialKeys = new Set(); - - // Normalize hotkey - function normalizeHotkey(hotkey) { - return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey; - } - - // Format hotkey for display - function formatHotkeyForDisplay(hotkey) { - return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey; - } - - // Check if canvas_hotkey_adjust and canvas_hotkey_zoom are the same + const result = {}; // Resulting hotkey configuration + const usedKeys = new Set(); // Set of used hotkeys + + // Iterate through defaultHotkeysConfig keys + for (const key in defaultHotkeysConfig) { + const userValue = hotkeysConfigOpts[key]; // User-provided hotkey value + const defaultValue = defaultHotkeysConfig[key]; // Default hotkey value + + // Apply appropriate value for undefined, boolean, or object userValue if ( - hotkeysConfigOpts.canvas_hotkey_adjust !== "Disable" && - hotkeysConfigOpts.canvas_hotkey_zoom !== "Disable" && - normalizeHotkey(hotkeysConfigOpts.canvas_hotkey_adjust) === - normalizeHotkey(hotkeysConfigOpts.canvas_hotkey_zoom) + userValue === undefined || + typeof userValue === "boolean" || + typeof userValue === "object" || + userValue === "disable" ) { + result[key] = userValue === undefined ? defaultValue : userValue; + } else if (isValidHotkey(userValue)) { + const normalizedUserValue = normalizeHotkey(userValue); + + // Check for conflicting hotkeys + if (!usedKeys.has(normalizedUserValue)) { + usedKeys.add(normalizedUserValue); + result[key] = normalizedUserValue; + } else { console.error( `Hotkey: ${formatHotkeyForDisplay( - hotkeysConfigOpts.canvas_hotkey_zoom - )} for canvas_hotkey_zoom conflicts with canvas_hotkey_adjust. The default hotkey is used: ${formatHotkeyForDisplay( - defaultHotkeysConfig.canvas_hotkey_zoom + userValue + )} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay( + defaultValue )}` ); - hotkeysConfigOpts.canvas_hotkey_zoom = - defaultHotkeysConfig.canvas_hotkey_zoom; + result[key] = defaultValue; + } + } else { + console.error( + `Hotkey: ${formatHotkeyForDisplay( + userValue + )} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay( + defaultValue + )}` + ); + result[key] = defaultValue; } - - for (const key in defaultHotkeysConfig) { - if (typeof hotkeysConfigOpts[key] === "boolean") { - result[key] = hotkeysConfigOpts[key]; - continue; - } - if (hotkeysConfigOpts[key] === "Disable") { - result[key] = hotkeysConfigOpts[key]; - continue; - } - if ( - hotkeysConfigOpts[key] && - isValidHotkey(hotkeysConfigOpts[key]) - ) { - const hotkey = normalizeHotkey(hotkeysConfigOpts[key]); - const isSpecialKey = hotkey.length > 1; - - if ( - (!isSpecialKey && !usedSingleKeys.has(hotkey)) || - (isSpecialKey && !usedSpecialKeys.has(hotkey)) - ) { - result[key] = hotkey; - - if (isSpecialKey) { - usedSpecialKeys.add(hotkey); - } else { - usedSingleKeys.add(hotkey); - } - } else { - console.error( - `Hotkey: ${formatHotkeyForDisplay( - hotkeysConfigOpts[key] - )} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay( - defaultHotkeysConfig[key] - )}` - ); - result[key] = defaultHotkeysConfig[key]; - - if (isSpecialKey) { - usedSpecialKeys.add(defaultHotkeysConfig[key]); - } else { - usedSingleKeys.add(defaultHotkeysConfig[key]); - } - } - } else { - console.error( - `Hotkey: ${formatHotkeyForDisplay( - hotkeysConfigOpts[key] - )} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay( - defaultHotkeysConfig[key] - )}` - ); - result[key] = defaultHotkeysConfig[key]; - - const isSpecialKey = defaultHotkeysConfig[key].length > 1; - if (isSpecialKey) { - usedSpecialKeys.add(defaultHotkeysConfig[key]); - } else { - usedSingleKeys.add(defaultHotkeysConfig[key]); - } - } } - + return result; } + function disableFunctions(config, disabledFunctions) { + disabledFunctions.forEach((funcName) => { + if (functionMap.hasOwnProperty(funcName)) { + const key = functionMap[funcName]; + config[key] = "disable"; + } + }); + + return config; + } + /** * The restoreImgRedMask function displays a red mask around an image to indicate the aspect ratio. * If the image display property is set to 'none', the mask breaks. To fix this, the function @@ -219,14 +190,31 @@ onUiLoaded(async() => { canvas_hotkey_fullscreen: "KeyS", canvas_hotkey_move: "KeyF", canvas_hotkey_overlap: "KeyO", + canvas_disabled_functions : [], canvas_show_tooltip: true }; - // swap the actions for ctr + wheel and shift + wheel - const hotkeysConfig = createHotkeyConfig( + + const functionMap = { + "Zoom": "canvas_hotkey_zoom", + "Adjust brush size": "canvas_hotkey_adjust", + "Moving canvas": "canvas_hotkey_move", + "Fullscreen": "canvas_hotkey_fullscreen", + "Reset Zoom": "canvas_hotkey_reset", + "Overlap": "canvas_hotkey_overlap", + }; + + // Loading the configuration from opts + const preHotkeysConfig = createHotkeyConfig( defaultHotkeysConfig, hotkeysConfigOpts ); + // Disable functions that are not needed by the user + const hotkeysConfig = disableFunctions( + preHotkeysConfig, + preHotkeysConfig.canvas_disabled_functions + ); + let isMoving = false; let mouseX, mouseY; let activeElement; @@ -273,52 +261,49 @@ onUiLoaded(async() => { const toolTipElemnt = targetElement.querySelector(".image-container"); const tooltip = document.createElement("div"); - tooltip.className = "tooltip"; + tooltip.className = "canvas-tooltip"; // Creating an item of information const info = document.createElement("i"); - info.className = "tooltip-info"; + info.className = "canvas-tooltip-info"; info.textContent = ""; // Create a container for the contents of the tooltip const tooltipContent = document.createElement("div"); - tooltipContent.className = "tooltip-content"; + tooltipContent.className = "canvas-tooltip-content"; - // Add info about hotkeys - const zoomKey = hotkeysConfig.canvas_hotkey_zoom; - const adjustKey = hotkeysConfig.canvas_hotkey_adjust; - - const hotkeys = [ - {key: `${zoomKey} + wheel`, action: "Zoom canvas"}, - {key: `${adjustKey} + wheel`, action: "Adjust brush size"}, - { - key: hotkeysConfig.canvas_hotkey_reset.charAt( - hotkeysConfig.canvas_hotkey_reset.length - 1 - ), - action: "Reset zoom" - }, - { - key: hotkeysConfig.canvas_hotkey_fullscreen.charAt( - hotkeysConfig.canvas_hotkey_fullscreen.length - 1 - ), - action: "Fullscreen mode" - }, - { - key: hotkeysConfig.canvas_hotkey_move.charAt( - hotkeysConfig.canvas_hotkey_move.length - 1 - ), - action: "Move canvas" - } + // Define an array with hotkey information and their actions + const hotkeysInfo = [ + { configKey: "canvas_hotkey_zoom", action: "Zoom canvas", keySuffix: " + wheel" }, + { configKey: "canvas_hotkey_adjust", action: "Adjust brush size", keySuffix: " + wheel" }, + { configKey: "canvas_hotkey_reset", action: "Reset zoom" }, + { configKey: "canvas_hotkey_fullscreen", action: "Fullscreen mode" }, + { configKey: "canvas_hotkey_move", action: "Move canvas" }, + { configKey: "canvas_hotkey_overlap", action: "Overlap" }, ]; - for (const hotkey of hotkeys) { - if (hotkey.key === "Disable + wheel") { - continue; + + // Create hotkeys array with disabled property based on the config values + const hotkeys = hotkeysInfo.map((info) => { + const configValue = hotkeysConfig[info.configKey]; + const key = info.keySuffix + ? `${configValue}${info.keySuffix}` + : configValue.charAt(configValue.length - 1); + return { + key, + action: info.action, + disabled: configValue === "disable", + }; + }); + + for (const hotkey of hotkeys) { + if (hotkey.disabled) { + continue; } - + const p = document.createElement("p"); p.innerHTML = `${hotkey.key} - ${hotkey.action}`; tooltipContent.appendChild(p); - } + } // Add information and content elements to the tooltip element tooltip.appendChild(info); diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py index be8b43f5..1b6683aa 100644 --- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py +++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py @@ -2,11 +2,12 @@ import gradio as gr from modules import shared shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), { - "canvas_hotkey_zoom": shared.OptionInfo("Shift", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt","Disable"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), - "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt","Disable"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), + "canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), + "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"), "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "), "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), + "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}), })) diff --git a/extensions-builtin/canvas-zoom-and-pan/style.css b/extensions-builtin/canvas-zoom-and-pan/style.css index 5b131d50..6bcc9570 100644 --- a/extensions-builtin/canvas-zoom-and-pan/style.css +++ b/extensions-builtin/canvas-zoom-and-pan/style.css @@ -1,4 +1,4 @@ -.tooltip-info { +.canvas-tooltip-info { position: absolute; top: 10px; left: 10px; @@ -15,7 +15,7 @@ z-index: 100; } -.tooltip-info::after { +.canvas-tooltip-info::after { content: ''; display: block; width: 2px; @@ -24,7 +24,7 @@ margin-top: 2px; } -.tooltip-info::before { +.canvas-tooltip-info::before { content: ''; display: block; width: 2px; @@ -32,7 +32,7 @@ background-color: white; } -.tooltip-content { +.canvas-tooltip-content { display: none; background-color: #f9f9f9; color: #333; @@ -50,7 +50,7 @@ z-index: 100; } -.tooltip:hover .tooltip-content { +.canvas-tooltip:hover .canvas-tooltip-content { display: block; animation: fadeIn 0.5s; opacity: 1; From 3a41d7c551b8441537efb4448250824b21891ab1 Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Wed, 14 Jun 2023 00:31:36 +0300 Subject: [PATCH 19/40] Formatting code with Prettier --- .../canvas-zoom-and-pan/javascript/zoom.js | 170 ++++++++++-------- 1 file changed, 95 insertions(+), 75 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index c5df4318..5ebd2073 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -56,86 +56,95 @@ onUiLoaded(async() => { } } - // Check if hotkey is valid function isValidHotkey(value) { const specialKeys = ["Ctrl", "Alt", "Shift", "Disable"]; return ( - (typeof value === "string" && value.length === 1 && /[a-z]/i.test(value)) || - specialKeys.includes(value) + (typeof value === "string" && + value.length === 1 && + /[a-z]/i.test(value)) || + specialKeys.includes(value) ); } - + // Normalize hotkey function normalizeHotkey(hotkey) { return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey; } - + // Format hotkey for display function formatHotkeyForDisplay(hotkey) { return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey; } - + // Create hotkey configuration with the provided options function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) { const result = {}; // Resulting hotkey configuration const usedKeys = new Set(); // Set of used hotkeys - + // Iterate through defaultHotkeysConfig keys for (const key in defaultHotkeysConfig) { - const userValue = hotkeysConfigOpts[key]; // User-provided hotkey value - const defaultValue = defaultHotkeysConfig[key]; // Default hotkey value - - // Apply appropriate value for undefined, boolean, or object userValue - if ( - userValue === undefined || - typeof userValue === "boolean" || - typeof userValue === "object" || - userValue === "disable" - ) { - result[key] = userValue === undefined ? defaultValue : userValue; - } else if (isValidHotkey(userValue)) { - const normalizedUserValue = normalizeHotkey(userValue); - - // Check for conflicting hotkeys - if (!usedKeys.has(normalizedUserValue)) { - usedKeys.add(normalizedUserValue); - result[key] = normalizedUserValue; + const userValue = hotkeysConfigOpts[key]; // User-provided hotkey value + const defaultValue = defaultHotkeysConfig[key]; // Default hotkey value + + // Apply appropriate value for undefined, boolean, or object userValue + if ( + userValue === undefined || + typeof userValue === "boolean" || + typeof userValue === "object" || + userValue === "disable" + ) { + result[key] = + userValue === undefined ? defaultValue : userValue; + } else if (isValidHotkey(userValue)) { + const normalizedUserValue = normalizeHotkey(userValue); + + // Check for conflicting hotkeys + if (!usedKeys.has(normalizedUserValue)) { + usedKeys.add(normalizedUserValue); + result[key] = normalizedUserValue; + } else { + console.error( + `Hotkey: ${formatHotkeyForDisplay( + userValue + )} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay( + defaultValue + )}` + ); + result[key] = defaultValue; + } } else { - console.error( - `Hotkey: ${formatHotkeyForDisplay( - userValue - )} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay( - defaultValue - )}` - ); - result[key] = defaultValue; + console.error( + `Hotkey: ${formatHotkeyForDisplay( + userValue + )} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay( + defaultValue + )}` + ); + result[key] = defaultValue; } - } else { - console.error( - `Hotkey: ${formatHotkeyForDisplay( - userValue - )} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay( - defaultValue - )}` - ); - result[key] = defaultValue; } - } - + return result; } + // Disables functions in the config object based on the provided list of function names function disableFunctions(config, disabledFunctions) { - disabledFunctions.forEach((funcName) => { - if (functionMap.hasOwnProperty(funcName)) { - const key = functionMap[funcName]; - config[key] = "disable"; - } + // Bind the hasOwnProperty method to the functionMap object to avoid errors + const hasOwnProperty = + Object.prototype.hasOwnProperty.bind(functionMap); + + // Loop through the disabledFunctions array and disable the corresponding functions in the config object + disabledFunctions.forEach(funcName => { + if (hasOwnProperty(funcName)) { + const key = functionMap[funcName]; + config[key] = "disable"; + } }); - + + // Return the updated config object return config; - } + } /** * The restoreImgRedMask function displays a red mask around an image to indicate the aspect ratio. @@ -190,7 +199,7 @@ onUiLoaded(async() => { canvas_hotkey_fullscreen: "KeyS", canvas_hotkey_move: "KeyF", canvas_hotkey_overlap: "KeyO", - canvas_disabled_functions : [], + canvas_disabled_functions: [], canvas_show_tooltip: true }; @@ -200,9 +209,9 @@ onUiLoaded(async() => { "Moving canvas": "canvas_hotkey_move", "Fullscreen": "canvas_hotkey_fullscreen", "Reset Zoom": "canvas_hotkey_reset", - "Overlap": "canvas_hotkey_overlap", - }; - + "Overlap": "canvas_hotkey_overlap" + }; + // Loading the configuration from opts const preHotkeysConfig = createHotkeyConfig( defaultHotkeysConfig, @@ -213,7 +222,7 @@ onUiLoaded(async() => { const hotkeysConfig = disableFunctions( preHotkeysConfig, preHotkeysConfig.canvas_disabled_functions - ); + ); let isMoving = false; let mouseX, mouseY; @@ -274,36 +283,47 @@ onUiLoaded(async() => { // Define an array with hotkey information and their actions const hotkeysInfo = [ - { configKey: "canvas_hotkey_zoom", action: "Zoom canvas", keySuffix: " + wheel" }, - { configKey: "canvas_hotkey_adjust", action: "Adjust brush size", keySuffix: " + wheel" }, - { configKey: "canvas_hotkey_reset", action: "Reset zoom" }, - { configKey: "canvas_hotkey_fullscreen", action: "Fullscreen mode" }, - { configKey: "canvas_hotkey_move", action: "Move canvas" }, - { configKey: "canvas_hotkey_overlap", action: "Overlap" }, + { + configKey: "canvas_hotkey_zoom", + action: "Zoom canvas", + keySuffix: " + wheel" + }, + { + configKey: "canvas_hotkey_adjust", + action: "Adjust brush size", + keySuffix: " + wheel" + }, + {configKey: "canvas_hotkey_reset", action: "Reset zoom"}, + { + configKey: "canvas_hotkey_fullscreen", + action: "Fullscreen mode" + }, + {configKey: "canvas_hotkey_move", action: "Move canvas"}, + {configKey: "canvas_hotkey_overlap", action: "Overlap"} ]; - + // Create hotkeys array with disabled property based on the config values - const hotkeys = hotkeysInfo.map((info) => { + const hotkeys = hotkeysInfo.map(info => { const configValue = hotkeysConfig[info.configKey]; - const key = info.keySuffix - ? `${configValue}${info.keySuffix}` - : configValue.charAt(configValue.length - 1); + const key = info.keySuffix ? + `${configValue}${info.keySuffix}` : + configValue.charAt(configValue.length - 1); return { - key, - action: info.action, - disabled: configValue === "disable", + key, + action: info.action, + disabled: configValue === "disable" }; }); - - for (const hotkey of hotkeys) { + + for (const hotkey of hotkeys) { if (hotkey.disabled) { - continue; + continue; } - + const p = document.createElement("p"); p.innerHTML = `${hotkey.key} - ${hotkey.action}`; tooltipContent.appendChild(p); - } + } // Add information and content elements to the tooltip element tooltip.appendChild(info); From 5be6c026f55760039b3ebb284cc2ce85586be4ac Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 14 Jun 2023 18:51:47 +0900 Subject: [PATCH 20/40] rename routes --- modules/api/api.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index cb1cde78..5ea1d21c 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -210,9 +210,9 @@ class Api: self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo]) if shared.cmd_opts.add_stop_route: - self.add_api_route("/sdapi/v1/quit-webui", self.quit_webui, methods=["POST"]) - self.add_api_route("/sdapi/v1/restart-webui", self.restart_webui, methods=["POST"]) - self.add_api_route("/_stop", self.stop_route, methods=["POST"]) + self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"]) + self.add_api_route("/sdapi/v1/server-restart", self.restart_webui, methods=["POST"]) + self.add_api_route("/sdapi/v1/server-terminate", self.terminate_webui, methods=["POST"]) self.default_script_arg_txt2img = [] self.default_script_arg_img2img = [] @@ -721,13 +721,13 @@ class Api: self.app.include_router(self.router) uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=0) - def quit_webui(self): + def kill_webui(self): restart.stop_program() def restart_webui(self): if restart.is_restartable(): restart.restart_program() - def stop_route(request): + def terminate_webui(request): shared.state.server_command = "stop" return Response("Stopping.") From 6387f0e85d207705e0a68178bbf71aa81ba82256 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 14 Jun 2023 18:51:54 +0900 Subject: [PATCH 21/40] update workflow kill test server --- .github/workflows/run_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index 226cf759..394811ac 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -50,7 +50,7 @@ jobs: python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test - name: Kill test server if: always() - run: curl -vv -XPOST http://127.0.0.1:7860/_stop && sleep 10 + run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-terminate && sleep 10 - name: Show coverage run: | python -m coverage combine .coverage* From 49fb2a337661d1b9a80de8ff35a640083fa98d2f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 14 Jun 2023 19:52:12 +0900 Subject: [PATCH 22/40] response 501 if not a able to restart --- modules/api/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/api/api.py b/modules/api/api.py index 5ea1d21c..4dc48a03 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -727,6 +727,7 @@ class Api: def restart_webui(self): if restart.is_restartable(): restart.restart_program() + return Response(status_code=501) def terminate_webui(request): shared.state.server_command = "stop" From 6091c4e4aa32b674f8ec755e6bd58989f09b08c5 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 14 Jun 2023 19:53:08 +0900 Subject: [PATCH 23/40] terminate -> stop --- .github/workflows/run_tests.yaml | 2 +- modules/api/api.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index 394811ac..96546011 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -50,7 +50,7 @@ jobs: python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test - name: Kill test server if: always() - run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-terminate && sleep 10 + run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-stop && sleep 10 - name: Show coverage run: | python -m coverage combine .coverage* diff --git a/modules/api/api.py b/modules/api/api.py index 4dc48a03..80d45ca7 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -212,7 +212,7 @@ class Api: if shared.cmd_opts.add_stop_route: self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"]) self.add_api_route("/sdapi/v1/server-restart", self.restart_webui, methods=["POST"]) - self.add_api_route("/sdapi/v1/server-terminate", self.terminate_webui, methods=["POST"]) + self.add_api_route("/sdapi/v1/server-stop", self.stop_webui, methods=["POST"]) self.default_script_arg_txt2img = [] self.default_script_arg_img2img = [] @@ -729,6 +729,6 @@ class Api: restart.restart_program() return Response(status_code=501) - def terminate_webui(request): + def stop_webui(request): shared.state.server_command = "stop" return Response("Stopping.") From fa9d2ac2ff7cf6fbc73525190bd7fde724ec1fb3 Mon Sep 17 00:00:00 2001 From: Jared Deckard Date: Wed, 14 Jun 2023 13:53:13 -0500 Subject: [PATCH 24/40] Fix gradio special args in the call queue --- modules/call_queue.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/call_queue.py b/modules/call_queue.py index 447bb764..64ebf868 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -1,3 +1,4 @@ +from functools import wraps import html import sys import threading @@ -20,6 +21,7 @@ def wrap_queued_call(func): def wrap_gradio_gpu_call(func, extra_outputs=None): + @wraps(func) def f(*args, **kwargs): # if the first argument is a string that says "task(...)", it is treated as a job id @@ -47,6 +49,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): def wrap_gradio_call(func, extra_outputs=None, add_stats=False): + @wraps(func) def f(*args, extra_outputs_array=extra_outputs, **kwargs): run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats if run_memmon: From 376f793bded0e7df40eafcacfd086e4e4d218bc5 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 15 Jun 2023 04:23:52 +0900 Subject: [PATCH 25/40] git clone show progress --- modules/launch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 609a181e..97539e68 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -147,10 +147,10 @@ def git_clone(url, dir, name, commithash=None): return run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") - run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") + run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) return - run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") + run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True) if commithash is not None: run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") From d3c86e5178725b11a4679097f0aefb0a9fc90014 Mon Sep 17 00:00:00 2001 From: Jared Deckard Date: Wed, 14 Jun 2023 14:03:44 -0500 Subject: [PATCH 26/40] Note the Gradio user in the Exif data --- modules/img2img.py | 5 ++++- modules/processing.py | 3 +++ modules/txt2img.py | 6 ++++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index d704bf90..83bd7857 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -2,6 +2,7 @@ import os import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError +import gradio as gr from modules import sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict @@ -78,7 +79,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): processed_image.save(os.path.join(output_dir, filename)) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -160,6 +161,8 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s p.scripts = modules.scripts.scripts_img2img p.script_args = args + p.user = request.username + if shared.cmd_opts.enable_console_prompts: print(f"\nimg2img: {prompt}", file=shared.progress_print_out) diff --git a/modules/processing.py b/modules/processing.py index d22b353f..3e8d153e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -180,6 +180,8 @@ class StableDiffusionProcessing: self.uc = None self.c = None + self.user = None + @property def sd_model(self): return shared.sd_model @@ -578,6 +580,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, + "User": p.user, } generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) diff --git a/modules/txt2img.py b/modules/txt2img.py index 2e7d202d..6aa79f23 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -4,10 +4,10 @@ from modules.generation_parameters_copypaste import create_override_settings_dic from modules.shared import opts, cmd_opts import modules.shared as shared from modules.ui import plaintext_to_html +import gradio as gr - -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -48,6 +48,8 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step p.scripts = modules.scripts.scripts_txt2img p.script_args = args + p.user = request.username + if cmd_opts.enable_console_prompts: print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) From 8f18e672439fa1926717df2c938e7089149f3a8b Mon Sep 17 00:00:00 2001 From: Jared Deckard Date: Thu, 15 Jun 2023 10:53:16 -0500 Subject: [PATCH 27/40] Add a user pattern to the filename generator --- modules/images.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/images.py b/modules/images.py index 40efc96c..92b924ef 100644 --- a/modules/images.py +++ b/modules/images.py @@ -359,6 +359,7 @@ class FilenameGenerator: 'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt..] 'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"], 'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT, + 'user': lambda self: self.p.user, } default_time_format = '%Y%m%d%H%M%S' From f603275d84301b5ee952683e951dd1aad72ba615 Mon Sep 17 00:00:00 2001 From: Jared Deckard Date: Thu, 15 Jun 2023 10:55:53 -0500 Subject: [PATCH 28/40] Add an opt-in infotext user name setting --- modules/processing.py | 2 +- modules/shared.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 3e8d153e..a0cc8db2 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -580,7 +580,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, - "User": p.user, + "User": p.user if opts.add_user_name_to_info else None, } generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) diff --git a/modules/shared.py b/modules/shared.py index 271a062d..4c639a21 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -496,6 +496,7 @@ options_templates.update(options_section(('ui', "User interface"), { options_templates.update(options_section(('infotext', "Infotext"), { "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), + "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), })) From 41363e0d27bbaa0c84eebe3c7c8451075390ec4e Mon Sep 17 00:00:00 2001 From: dhwz Date: Fri, 16 Jun 2023 18:10:15 +0200 Subject: [PATCH 29/40] fix very slow loading speed of .safetensors files --- modules/sd_models.py | 7 +++++-- modules/shared.py | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 918f6fd6..d9ac675b 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -247,8 +247,11 @@ def read_metadata_from_safetensors(filename): def read_state_dict(checkpoint_file, print_global_state=False, map_location=None): _, extension = os.path.splitext(checkpoint_file) if extension.lower() == ".safetensors": - device = map_location or shared.weight_load_location or devices.get_optimal_device_name() - pl_sd = safetensors.torch.load_file(checkpoint_file, device=device) + if not shared.opts.disable_mmap_load_safetensors: + device = map_location or shared.weight_load_location or devices.get_optimal_device_name() + pl_sd = safetensors.torch.load_file(checkpoint_file, device=device) + else: + pl_sd = safetensors.torch.load(open(checkpoint_file, 'rb').read()) else: pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location) diff --git a/modules/shared.py b/modules/shared.py index 91c31d55..6b0ccac1 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -376,6 +376,7 @@ options_templates.update(options_section(('system', "System"), { "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), + "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files (fixes very slow loading speed in some cases)."), })) options_templates.update(options_section(('training', "Training"), { From 373ff5a217eca33607abb692b9ebfa38abb7fe33 Mon Sep 17 00:00:00 2001 From: huchenlei Date: Fri, 16 Jun 2023 15:17:17 -0400 Subject: [PATCH 30/40] :bug: Allow Script to have metaclass --- modules/scripts.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/scripts.py b/modules/scripts.py index c902804b..52682fbf 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -2,6 +2,7 @@ import os import re import sys import traceback +import inspect from collections import namedtuple import gradio as gr @@ -238,7 +239,7 @@ def load_scripts(): def register_scripts_from_module(module): for script_class in module.__dict__.values(): - if type(script_class) != type: + if not inspect.isclass(script_class): continue if issubclass(script_class, Script): From 2e1710d88edc1e1a08b01c063fa386b50e5abc30 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 18 Jun 2023 14:07:41 +0900 Subject: [PATCH 31/40] update the description of --add-stop-rout --- modules/cmd_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index de905caa..624dcb4f 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -106,4 +106,4 @@ parser.add_argument("--skip-version-check", action='store_true', help="Do not ch parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') -parser.add_argument('--add-stop-route', action='store_true', help='add /_stop route to stop server') +parser.add_argument('--add-stop-route', action='store_true', help='enable server stop/restart/kill via api') From f7ae0e68c9c91cd95e28552ef930299286026cd7 Mon Sep 17 00:00:00 2001 From: zhtttylz Date: Sun, 18 Jun 2023 15:46:08 +0800 Subject: [PATCH 32/40] Fix Typo of hints.js --- javascript/hints.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 46f342cb..a2f222e5 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -15,7 +15,7 @@ var titles = { "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results", "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result", "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time", - "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed", + "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomized", "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", "\u{1f4c2}": "Open images output directory", "\u{1f4be}": "Save style", @@ -112,7 +112,7 @@ var titles = { "Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders.", "Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.", "Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.", - "Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order lsited.", + "Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order listed.", "Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction." }; From d2ccdcdc97f8e8b0a1a63f2031716b3866c7b53b Mon Sep 17 00:00:00 2001 From: George Gu Date: Mon, 19 Jun 2023 10:16:18 +0800 Subject: [PATCH 33/40] fix: adding elem_id for img2img resize to and resize by tabs --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 361f596e..ce019f9c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -789,7 +789,7 @@ def create_ui(): selected_scale_tab = gr.State(value=0) with gr.Tabs(): - with gr.Tab(label="Resize to") as tab_scale_to: + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: with FormRow(): with gr.Column(elem_id="img2img_column_size", scale=4): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") @@ -798,7 +798,7 @@ def create_ui(): res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") - with gr.Tab(label="Resize by") as tab_scale_by: + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") with FormRow(): From 27e9e3f6fa41c10eb5256662ddf3643dee933810 Mon Sep 17 00:00:00 2001 From: stablegeniusdiffuser Date: Mon, 19 Jun 2023 20:36:44 +0200 Subject: [PATCH 34/40] Add use_main_prompt parameter to use proper metadata for batch run grids or individual images --- modules/processing.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 8da73884..1d97e95e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -549,7 +549,7 @@ def program_version(): return res -def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0): +def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False): index = position_in_batch + iteration * p.batch_size clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) @@ -589,9 +589,10 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) + prompt_text = p.prompt if use_main_prompt else all_prompts[index] negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else "" - return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() + return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip() def process_images(p: StableDiffusionProcessing) -> Processed: @@ -663,8 +664,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))] - def infotext(iteration=0, position_in_batch=0): - return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch) + def infotext(iteration=0, position_in_batch=0, use_main_prompt=False): + return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch, use_main_prompt) if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() @@ -824,7 +825,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: grid = images.image_grid(output_images, p.batch_size) if opts.return_grid: - text = infotext() + text = infotext(use_main_prompt=True) infotexts.insert(0, text) if opts.enable_pnginfo: grid.info["parameters"] = text @@ -832,7 +833,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: index_of_first_image = 1 if opts.grid_save: - images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) + images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True) if not p.disable_extra_networks and p.extra_network_data: extra_networks.deactivate(p, p.extra_network_data) From d5a5f2f29fc1c2b5f7dda0af5b984264744478b5 Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Sun, 25 Jun 2023 01:31:02 +0800 Subject: [PATCH 35/40] Strip whitespaces from URL and dirname prior to extension installation This avoid some cryptic errors brought by accidental spaces around urls --- modules/ui_extensions.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 515ec262..6e5222ba 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -325,6 +325,11 @@ def normalize_git_url(url): def install_extension_from_url(dirname, url, branch_name=None): check_access() + if isinstance(dirname, str): + dirname = dirname.strip() + if isinstance(url, str): + url = url.strip() + assert url, 'No URL specified' if dirname is None or dirname == "": From dd268c48c9099c4cf308eb04590bd201c9b64253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mart=C3=ADn=20=28Netux=29=20Rodr=C3=ADguez?= Date: Sun, 25 Jun 2023 00:30:08 -0300 Subject: [PATCH 36/40] feat(extensions): add toggle all checkbox to Installed tab Small QoL addition. While there is the option to disable all extensions with the radio buttons at the top, that only acts as an added flag and doesn't really change the state of the extensions in the UI. An use case for this checkbox is to disable all extensions except for a few, which is important for debugging extensions. You could do that before, but you'd have to uncheck and recheck every extension one by one. --- javascript/extensions.js | 18 ++++++++++++++++++ modules/ui_extensions.py | 7 +++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/javascript/extensions.js b/javascript/extensions.js index efeaf3a5..1f7254c5 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -72,3 +72,21 @@ function config_state_confirm_restore(_, config_state_name, config_restore_type) } return [confirmed, config_state_name, config_restore_type]; } + +function toggle_all_extensions(event) { + gradioApp().querySelectorAll('#extensions .extension_toggle').forEach(function(checkbox_el) { + checkbox_el.checked = event.target.checked; + }); +} + +function toggle_extension() { + let all_extensions_toggled = true; + for (const checkbox_el of gradioApp().querySelectorAll('#extensions .extension_toggle')) { + if (!checkbox_el.checked) { + all_extensions_toggled = false; + break; + } + } + + gradioApp().querySelector('#extensions .all_extensions_toggle').checked = all_extensions_toggled; +} diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 4379a641..50955fab 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -138,7 +138,10 @@ def extension_table(): - + @@ -170,7 +173,7 @@ def extension_table(): code += f""" - + From 4bd490c28dd8f17b7df943eb3963c34d725084fc Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 27 Jun 2023 06:18:43 +0300 Subject: [PATCH 37/40] add missing infotext entry for the pad cond/uncond option --- modules/generation_parameters_copypaste.py | 1 + modules/sd_samplers_kdiffusion.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index a638f912..dd30a1b5 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -357,6 +357,7 @@ infotext_to_setting_name_mapping = [ ('Token merging ratio hr', 'token_merging_ratio_hr'), ('RNG', 'randn_source'), ('NGMS', 's_min_uncond'), + ('Pad conds', 'pad_cond_uncond'), ] diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index f8a0c7ba..71581b76 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -69,6 +69,7 @@ class CFGDenoiser(torch.nn.Module): self.init_latent = None self.step = 0 self.image_cfg_scale = None + self.padded_cond_uncond = False def combine_denoised(self, x_out, conds_list, uncond, cond_scale): denoised_uncond = x_out[-uncond.shape[0]:] @@ -133,15 +134,17 @@ class CFGDenoiser(torch.nn.Module): x_in = x_in[:-batch_size] sigma_in = sigma_in[:-batch_size] - # TODO add infotext entry + self.padded_cond_uncond = False if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: empty = shared.sd_model.cond_stage_model_empty_prompt num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] if num_repeats < 0: tensor = torch.cat([tensor, empty.repeat((tensor.shape[0], -num_repeats, 1))], axis=1) + self.padded_cond_uncond = True elif num_repeats > 0: uncond = torch.cat([uncond, empty.repeat((uncond.shape[0], num_repeats, 1))], axis=1) + self.padded_cond_uncond = True if tensor.shape[1] == uncond.shape[1] or skip_uncond: if is_edit_model: @@ -405,6 +408,9 @@ class KDiffusionSampler: samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + return samples def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): @@ -438,5 +444,8 @@ class KDiffusionSampler: 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + return samples From 9bb1fcfad43103778406ace17e6804c67fad9c17 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 27 Jun 2023 08:59:35 +0300 Subject: [PATCH 38/40] alternate fix for catch errors when retrieving extension index #11290 --- modules/ui_extensions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index f3db76f2..278bf5e4 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -571,9 +571,9 @@ def create_ui(): available_extensions_table = gr.HTML() refresh_available_extensions_button.click( - fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update()]), + fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update(), gr.update()]), inputs=[available_extensions_index, hide_tags, sort_column], - outputs=[available_extensions_index, available_extensions_table, hide_tags, install_result, search_extensions_text], + outputs=[available_extensions_index, available_extensions_table, hide_tags, search_extensions_text, install_result], ) install_extension_button.click( From 24129368f1b732be25ef486edb2cf5a6ace66737 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 27 Jun 2023 09:19:04 +0300 Subject: [PATCH 39/40] send tensors to the correct device when loading from safetensors file with memmap disabled for #11260 --- modules/sd_models.py | 4 +++- modules/shared.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 0391398a..f65f4e36 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -246,11 +246,13 @@ def read_metadata_from_safetensors(filename): def read_state_dict(checkpoint_file, print_global_state=False, map_location=None): _, extension = os.path.splitext(checkpoint_file) if extension.lower() == ".safetensors": + device = map_location or shared.weight_load_location or devices.get_optimal_device_name() + if not shared.opts.disable_mmap_load_safetensors: - device = map_location or shared.weight_load_location or devices.get_optimal_device_name() pl_sd = safetensors.torch.load_file(checkpoint_file, device=device) else: pl_sd = safetensors.torch.load(open(checkpoint_file, 'rb').read()) + pl_sd = {k: v.to(device) for k, v in pl_sd.items()} else: pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location) diff --git a/modules/shared.py b/modules/shared.py index 4743a428..203ee1b9 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -376,7 +376,7 @@ options_templates.update(options_section(('system', "System"), { "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), - "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files (fixes very slow loading speed in some cases)."), + "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), })) options_templates.update(options_section(('training', "Training"), { From d06af4e517865277d0521642c2c5513af9afd76f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 27 Jun 2023 09:26:18 +0300 Subject: [PATCH 40/40] fix and rework #11113 --- modules/api/api.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index f96056b6..1d4aeff5 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -522,9 +522,9 @@ class Api: return options def set_config(self, req: Dict[str, Any]): - checkpoint_key="sd_model_checkpoint" - if checkpoint_key in req and str(req[checkpoint_key]) not in checkpoint_alisases: - raise RuntimeError(f"model {v!r} not found") + checkpoint_name = req.get("sd_model_checkpoint", None) + if checkpoint_name is not None and checkpoint_name not in checkpoint_alisases: + raise RuntimeError(f"model {checkpoint_name!r} not found") for k, v in req.items(): shared.opts.set(k, v)
Extension + + Extension + URL Branch Version
{html.escape(ext.name)}{html.escape(ext.name)} {remote} {ext.branch} {version_link}