diff --git a/.gitignore b/.gitignore index 0b1d17ca..7328401f 100644 --- a/.gitignore +++ b/.gitignore @@ -32,4 +32,5 @@ notification.mp3 /extensions /test/stdout.txt /test/stderr.txt -/cache.json +/cache.json* +/config_states/ diff --git a/README.md b/README.md index b67e2296..67a1a83a 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ Alternatively, use online services (like Google Colab): - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) ### Automatic Installation on Windows -1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH". +1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH". 2. Install [git](https://git-scm.com/download/win). 3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. 4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. @@ -115,11 +115,12 @@ sudo dnf install wget git python3 # Arch-based: sudo pacman -S wget git python3 ``` -2. To install in `/home/$(whoami)/stable-diffusion-webui/`, run: +2. Navigate to the directory you would like the webui to be installed and execute the following command: ```bash bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) ``` 3. Run `webui.sh`. +4. Check `webui-user.sh` for options. ### Installation on Apple Silicon Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). @@ -158,4 +159,4 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - Security advice - RyotaK - UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. -- (You) \ No newline at end of file +- (You) diff --git a/environment-wsl2.yaml b/environment-wsl2.yaml index f8872750..0c4ae680 100644 --- a/environment-wsl2.yaml +++ b/environment-wsl2.yaml @@ -4,8 +4,8 @@ channels: - defaults dependencies: - python=3.10 - - pip=22.2.2 - - cudatoolkit=11.3 - - pytorch=1.12.1 - - torchvision=0.13.1 - - numpy=1.23.1 \ No newline at end of file + - pip=23.0 + - cudatoolkit=11.8 + - pytorch=2.0 + - torchvision=0.15 + - numpy=1.23 diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index b8cff29b..da19cff1 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -25,22 +25,28 @@ class UpscalerLDSR(Upscaler): yaml_path = os.path.join(self.model_path, "project.yaml") old_model_path = os.path.join(self.model_path, "model.pth") new_model_path = os.path.join(self.model_path, "model.ckpt") - safetensors_model_path = os.path.join(self.model_path, "model.safetensors") + + local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"]) + local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None) + local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None) + local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None) + if os.path.exists(yaml_path): statinfo = os.stat(yaml_path) if statinfo.st_size >= 10485760: print("Removing invalid LDSR YAML file.") os.remove(yaml_path) + if os.path.exists(old_model_path): print("Renaming model from model.pth to model.ckpt") os.rename(old_model_path, new_model_path) - if os.path.exists(safetensors_model_path): - model = safetensors_model_path + + if local_safetensors_path is not None and os.path.exists(local_safetensors_path): + model = local_safetensors_path else: - model = load_file_from_url(url=self.model_url, model_dir=self.model_path, - file_name="model.ckpt", progress=True) - yaml = load_file_from_url(url=self.yaml_url, model_dir=self.model_path, - file_name="project.yaml", progress=True) + model = local_ckpt_path if local_ckpt_path is not None else load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="model.ckpt", progress=True) + + yaml = local_yaml_path if local_yaml_path is not None else load_file_from_url(url=self.yaml_url, model_dir=self.model_path, file_name="project.yaml", progress=True) try: return LDSR(model, yaml) diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py index 6be6ef73..45f899fc 100644 --- a/extensions-builtin/Lora/extra_networks_lora.py +++ b/extensions-builtin/Lora/extra_networks_lora.py @@ -8,7 +8,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): def activate(self, p, params_list): additional = shared.opts.sd_lora - if additional != "" and additional in lora.available_loras and len([x for x in params_list if x.items[0] == additional]) == 0: + if additional != "None" and additional in lora.available_loras and len([x for x in params_list if x.items[0] == additional]) == 0: p.all_prompts = [x + f"" for x in p.all_prompts] params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 0adab225..3fc38ab9 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -52,5 +52,5 @@ script_callbacks.on_before_ui(before_ui) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { - "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": [""] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), + "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), })) diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index e0fbf3a3..c7fd5739 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -5,11 +5,15 @@ import traceback import PIL.Image import numpy as np import torch +from tqdm import tqdm + from basicsr.utils.download_util import load_file_from_url import modules.upscaler from modules import devices, modelloader from scunet_model_arch import SCUNet as net +from modules.shared import opts +from modules import images class UpscalerScuNET(modules.upscaler.Upscaler): @@ -42,28 +46,78 @@ class UpscalerScuNET(modules.upscaler.Upscaler): scalers.append(scaler_data2) self.scalers = scalers - def do_upscale(self, img: PIL.Image, selected_file): + @staticmethod + @torch.no_grad() + def tiled_inference(img, model): + # test the image tile by tile + h, w = img.shape[2:] + tile = opts.SCUNET_tile + tile_overlap = opts.SCUNET_tile_overlap + if tile == 0: + return model(img) + + device = devices.get_device_for('scunet') + assert tile % 8 == 0, "tile size should be a multiple of window_size" + sf = 1 + + stride = tile - tile_overlap + h_idx_list = list(range(0, h - tile, stride)) + [h - tile] + w_idx_list = list(range(0, w - tile, stride)) + [w - tile] + E = torch.zeros(1, 3, h * sf, w * sf, dtype=img.dtype, device=device) + W = torch.zeros_like(E, dtype=devices.dtype, device=device) + + with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="ScuNET tiles") as pbar: + for h_idx in h_idx_list: + + for w_idx in w_idx_list: + + in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile] + + out_patch = model(in_patch) + out_patch_mask = torch.ones_like(out_patch) + + E[ + ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf + ].add_(out_patch) + W[ + ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf + ].add_(out_patch_mask) + pbar.update(1) + output = E.div_(W) + + return output + + def do_upscale(self, img: PIL.Image.Image, selected_file): + torch.cuda.empty_cache() model = self.load_model(selected_file) if model is None: + print(f"ScuNET: Unable to load model from {selected_file}", file=sys.stderr) return img device = devices.get_device_for('scunet') - img = np.array(img) - img = img[:, :, ::-1] - img = np.moveaxis(img, 2, 0) / 255 - img = torch.from_numpy(img).float() - img = img.unsqueeze(0).to(device) + tile = opts.SCUNET_tile + h, w = img.height, img.width + np_img = np.array(img) + np_img = np_img[:, :, ::-1] # RGB to BGR + np_img = np_img.transpose((2, 0, 1)) / 255 # HWC to CHW + torch_img = torch.from_numpy(np_img).float().unsqueeze(0).to(device) # type: ignore - with torch.no_grad(): - output = model(img) - output = output.squeeze().float().cpu().clamp_(0, 1).numpy() - output = 255. * np.moveaxis(output, 0, 2) - output = output.astype(np.uint8) - output = output[:, :, ::-1] + if tile > h or tile > w: + _img = torch.zeros(1, 3, max(h, tile), max(w, tile), dtype=torch_img.dtype, device=torch_img.device) + _img[:, :, :h, :w] = torch_img # pad image + torch_img = _img + + torch_output = self.tiled_inference(torch_img, model).squeeze(0) + torch_output = torch_output[:, :h * 1, :w * 1] # remove padding, if any + np_output: np.ndarray = torch_output.float().cpu().clamp_(0, 1).numpy() + del torch_img, torch_output torch.cuda.empty_cache() - return PIL.Image.fromarray(output, 'RGB') + + output = np_output.transpose((1, 2, 0)) # CHW to HWC + output = output[:, :, ::-1] # BGR to RGB + return PIL.Image.fromarray((output * 255).astype(np.uint8)) def load_model(self, path: str): device = devices.get_device_for('scunet') @@ -84,4 +138,3 @@ class UpscalerScuNET(modules.upscaler.Upscaler): model = model.to(device) return model - diff --git a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js index f0918e26..5c7a836a 100644 --- a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js +++ b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js @@ -1,103 +1,42 @@ // Stable Diffusion WebUI - Bracket checker -// Version 1.0 -// By Hingashi no Florin/Bwin4L +// By Hingashi no Florin/Bwin4L & @akx // Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs. // If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong. -function checkBrackets(evt, textArea, counterElt) { - errorStringParen = '(...) - Different number of opening and closing parentheses detected.\n'; - errorStringSquare = '[...] - Different number of opening and closing square brackets detected.\n'; - errorStringCurly = '{...} - Different number of opening and closing curly brackets detected.\n'; +function checkBrackets(textArea, counterElt) { + var counts = {}; + (textArea.value.match(/[(){}\[\]]/g) || []).forEach(bracket => { + counts[bracket] = (counts[bracket] || 0) + 1; + }); + var errors = []; - openBracketRegExp = /\(/g; - closeBracketRegExp = /\)/g; - - openSquareBracketRegExp = /\[/g; - closeSquareBracketRegExp = /\]/g; - - openCurlyBracketRegExp = /\{/g; - closeCurlyBracketRegExp = /\}/g; - - totalOpenBracketMatches = 0; - totalCloseBracketMatches = 0; - totalOpenSquareBracketMatches = 0; - totalCloseSquareBracketMatches = 0; - totalOpenCurlyBracketMatches = 0; - totalCloseCurlyBracketMatches = 0; - - openBracketMatches = textArea.value.match(openBracketRegExp); - if(openBracketMatches) { - totalOpenBracketMatches = openBracketMatches.length; - } - - closeBracketMatches = textArea.value.match(closeBracketRegExp); - if(closeBracketMatches) { - totalCloseBracketMatches = closeBracketMatches.length; - } - - openSquareBracketMatches = textArea.value.match(openSquareBracketRegExp); - if(openSquareBracketMatches) { - totalOpenSquareBracketMatches = openSquareBracketMatches.length; - } - - closeSquareBracketMatches = textArea.value.match(closeSquareBracketRegExp); - if(closeSquareBracketMatches) { - totalCloseSquareBracketMatches = closeSquareBracketMatches.length; - } - - openCurlyBracketMatches = textArea.value.match(openCurlyBracketRegExp); - if(openCurlyBracketMatches) { - totalOpenCurlyBracketMatches = openCurlyBracketMatches.length; - } - - closeCurlyBracketMatches = textArea.value.match(closeCurlyBracketRegExp); - if(closeCurlyBracketMatches) { - totalCloseCurlyBracketMatches = closeCurlyBracketMatches.length; - } - - if(totalOpenBracketMatches != totalCloseBracketMatches) { - if(!counterElt.title.includes(errorStringParen)) { - counterElt.title += errorStringParen; + function checkPair(open, close, kind) { + if (counts[open] !== counts[close]) { + errors.push( + `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.` + ); } - } else { - counterElt.title = counterElt.title.replace(errorStringParen, ''); } - if(totalOpenSquareBracketMatches != totalCloseSquareBracketMatches) { - if(!counterElt.title.includes(errorStringSquare)) { - counterElt.title += errorStringSquare; - } - } else { - counterElt.title = counterElt.title.replace(errorStringSquare, ''); - } + checkPair('(', ')', 'round brackets'); + checkPair('[', ']', 'square brackets'); + checkPair('{', '}', 'curly brackets'); + counterElt.title = errors.join('\n'); + counterElt.classList.toggle('error', errors.length !== 0); +} - if(totalOpenCurlyBracketMatches != totalCloseCurlyBracketMatches) { - if(!counterElt.title.includes(errorStringCurly)) { - counterElt.title += errorStringCurly; - } - } else { - counterElt.title = counterElt.title.replace(errorStringCurly, ''); - } +function setupBracketChecking(id_prompt, id_counter) { + var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); + var counter = gradioApp().getElementById(id_counter) - if(counterElt.title != '') { - counterElt.classList.add('error'); - } else { - counterElt.classList.remove('error'); + if (textarea && counter) { + textarea.addEventListener("input", () => checkBrackets(textarea, counter)); } } -function setupBracketChecking(id_prompt, id_counter){ - var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); - var counter = gradioApp().getElementById(id_counter) - - textarea.addEventListener("input", function(evt){ - checkBrackets(evt, textarea, counter) - }); -} - -onUiLoaded(function(){ - setupBracketChecking('txt2img_prompt', 'txt2img_token_counter') - setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter') - setupBracketChecking('img2img_prompt', 'img2img_token_counter') - setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter') -}) \ No newline at end of file +onUiLoaded(function () { + setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); + setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); + setupBracketChecking('img2img_prompt', 'img2img_token_counter'); + setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter'); +}); diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index 06f505b0..9468c107 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -161,14 +161,6 @@ addContextMenuEventListener = initResponse[2]; appendContextMenuOption('#img2img_interrupt','Cancel generate forever',cancelGenerateForever) appendContextMenuOption('#img2img_generate', 'Cancel generate forever',cancelGenerateForever) - appendContextMenuOption('#roll','Roll three', - function(){ - let rollbutton = get_uiCurrentTabContent().querySelector('#roll'); - setTimeout(function(){rollbutton.click()},100) - setTimeout(function(){rollbutton.click()},200) - setTimeout(function(){rollbutton.click()},300) - } - ) })(); //End example Context Menu Items diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 20a5aadf..588c7b77 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -17,7 +17,7 @@ function keyupEditAttention(event){ // Find opening parenthesis around current cursor const before = text.substring(0, selectionStart); let beforeParen = before.lastIndexOf(OPEN); - if (beforeParen == -1) return false; + if (beforeParen == -1) return false; let beforeParenClose = before.lastIndexOf(CLOSE); while (beforeParenClose !== -1 && beforeParenClose > beforeParen) { beforeParen = before.lastIndexOf(OPEN, beforeParen - 1); @@ -27,7 +27,7 @@ function keyupEditAttention(event){ // Find closing parenthesis around current cursor const after = text.substring(selectionStart); let afterParen = after.indexOf(CLOSE); - if (afterParen == -1) return false; + if (afterParen == -1) return false; let afterParenOpen = after.indexOf(OPEN); while (afterParenOpen !== -1 && afterParen > afterParenOpen) { afterParen = after.indexOf(CLOSE, afterParen + 1); @@ -43,10 +43,28 @@ function keyupEditAttention(event){ target.setSelectionRange(selectionStart, selectionEnd); return true; } + + function selectCurrentWord(){ + if (selectionStart !== selectionEnd) return false; + const delimiters = opts.keyedit_delimiters + " \r\n\t"; + + // seek backward until to find beggining + while (!delimiters.includes(text[selectionStart - 1]) && selectionStart > 0) { + selectionStart--; + } + + // seek forward to find end + while (!delimiters.includes(text[selectionEnd]) && selectionEnd < text.length) { + selectionEnd++; + } - // If the user hasn't selected anything, let's select their current parenthesis block - if(! selectCurrentParenthesisBlock('<', '>')){ - selectCurrentParenthesisBlock('(', ')') + target.setSelectionRange(selectionStart, selectionEnd); + return true; + } + + // If the user hasn't selected anything, let's select their current parenthesis block or word + if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')')) { + selectCurrentWord(); } event.preventDefault(); @@ -81,7 +99,13 @@ function keyupEditAttention(event){ weight = parseFloat(weight.toPrecision(12)); if(String(weight).length == 1) weight += ".0" - text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + 1 + end - 1); + if (closeCharacter == ')' && weight == 1) { + text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + 5); + selectionStart--; + selectionEnd--; + } else { + text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + 1 + end - 1); + } target.focus(); target.value = text; @@ -93,4 +117,4 @@ function keyupEditAttention(event){ addEventListener('keydown', (event) => { keyupEditAttention(event); -}); \ No newline at end of file +}); diff --git a/javascript/extensions.js b/javascript/extensions.js index 72924a28..3c2f995a 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -47,3 +47,25 @@ function install_extension_from_index(button, url){ gradioApp().querySelector('#install_extension_button').click() } + +function config_state_confirm_restore(_, config_state_name, config_restore_type) { + if (config_state_name == "Current") { + return [false, config_state_name, config_restore_type]; + } + let restored = ""; + if (config_restore_type == "extensions") { + restored = "all saved extension versions"; + } else if (config_restore_type == "webui") { + restored = "the webui version"; + } else { + restored = "the webui version and all saved extension versions"; + } + let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + "."); + if (confirmed) { + restart_reload(); + gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){ + x.innerHTML = "Loading..." + }) + } + return [confirmed, config_state_name, config_restore_type]; +} diff --git a/javascript/generationParams.js b/javascript/generationParams.js index 95f05093..1266a266 100644 --- a/javascript/generationParams.js +++ b/javascript/generationParams.js @@ -16,9 +16,9 @@ onUiUpdate(function(){ let modalObserver = new MutationObserver(function(mutations) { mutations.forEach(function(mutationRecord) { - let selectedTab = gradioApp().querySelector('#tabs div button.bg-white')?.innerText - if (mutationRecord.target.style.display === 'none' && selectedTab === 'txt2img' || selectedTab === 'img2img') - gradioApp().getElementById(selectedTab+"_generation_info_button").click() + let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText + if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) + gradioApp().getElementById(selectedTab+"_generation_info_button")?.click() }); }); diff --git a/javascript/hints.js b/javascript/hints.js index f48a0eb6..fa023585 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -65,8 +65,8 @@ titles = { "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.", - "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime], [datetime