Merge branch 'release_candidate'
This commit is contained in:
commit
68f336bd99
25
CHANGELOG.md
25
CHANGELOG.md
@ -1,3 +1,28 @@
|
|||||||
|
## 1.5.1
|
||||||
|
|
||||||
|
### Minor:
|
||||||
|
* support parsing text encoder blocks in some new LoRAs
|
||||||
|
* delete scale checker script due to user demand
|
||||||
|
|
||||||
|
### Extensions and API:
|
||||||
|
* add postprocess_batch_list script callback
|
||||||
|
|
||||||
|
### Bug Fixes:
|
||||||
|
* fix TI training for SD1
|
||||||
|
* fix reload altclip model error
|
||||||
|
* prepend the pythonpath instead of overriding it
|
||||||
|
* fix typo in SD_WEBUI_RESTARTING
|
||||||
|
* if txt2img/img2img raises an exception, finally call state.end()
|
||||||
|
* fix composable diffusion weight parsing
|
||||||
|
* restyle Startup profile for black users
|
||||||
|
* fix webui not launching with --nowebui
|
||||||
|
* catch exception for non git extensions
|
||||||
|
* fix some options missing from /sdapi/v1/options
|
||||||
|
* fix for extension update status always saying "unknown"
|
||||||
|
* fix display of extra network cards that have `<>` in the name
|
||||||
|
* update lora extension to work with python 3.8
|
||||||
|
|
||||||
|
|
||||||
## 1.5.0
|
## 1.5.0
|
||||||
|
|
||||||
### Features:
|
### Features:
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
from __future__ import annotations
|
||||||
import os
|
import os
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
import enum
|
import enum
|
||||||
|
@ -163,6 +163,11 @@ def load_network(name, network_on_disk):
|
|||||||
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
|
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
|
||||||
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
||||||
|
|
||||||
|
# some SD1 Loras also have correct compvis keys
|
||||||
|
if sd_module is None:
|
||||||
|
key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model")
|
||||||
|
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
||||||
|
|
||||||
if sd_module is None:
|
if sd_module is None:
|
||||||
keys_failed_to_match[key_network] = key
|
keys_failed_to_match[key_network] = key
|
||||||
continue
|
continue
|
||||||
|
@ -1,108 +0,0 @@
|
|||||||
(function() {
|
|
||||||
var ignore = localStorage.getItem("bad-scale-ignore-it") == "ignore-it";
|
|
||||||
|
|
||||||
function getScale() {
|
|
||||||
var ratio = 0,
|
|
||||||
screen = window.screen,
|
|
||||||
ua = navigator.userAgent.toLowerCase();
|
|
||||||
|
|
||||||
if (window.devicePixelRatio !== undefined) {
|
|
||||||
ratio = window.devicePixelRatio;
|
|
||||||
} else if (~ua.indexOf('msie')) {
|
|
||||||
if (screen.deviceXDPI && screen.logicalXDPI) {
|
|
||||||
ratio = screen.deviceXDPI / screen.logicalXDPI;
|
|
||||||
}
|
|
||||||
} else if (window.outerWidth !== undefined && window.innerWidth !== undefined) {
|
|
||||||
ratio = window.outerWidth / window.innerWidth;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ratio == 0 ? 0 : Math.round(ratio * 100);
|
|
||||||
}
|
|
||||||
|
|
||||||
var showing = false;
|
|
||||||
|
|
||||||
var div = document.createElement("div");
|
|
||||||
div.style.position = "fixed";
|
|
||||||
div.style.top = "0px";
|
|
||||||
div.style.left = "0px";
|
|
||||||
div.style.width = "100vw";
|
|
||||||
div.style.backgroundColor = "firebrick";
|
|
||||||
div.style.textAlign = "center";
|
|
||||||
div.style.zIndex = 99;
|
|
||||||
|
|
||||||
var b = document.createElement("b");
|
|
||||||
b.innerHTML = 'Bad Scale: ??% ';
|
|
||||||
|
|
||||||
div.appendChild(b);
|
|
||||||
|
|
||||||
var note1 = document.createElement("p");
|
|
||||||
note1.innerHTML = "Change your browser or your computer settings!";
|
|
||||||
note1.title = 'Just make sure "computer-scale" * "browser-scale" = 100% ,\n' +
|
|
||||||
"you can keep your computer-scale and only change this page's scale,\n" +
|
|
||||||
"for example: your computer-scale is 125%, just use [\"CTRL\"+\"-\"] to make your browser-scale of this page to 80%.";
|
|
||||||
div.appendChild(note1);
|
|
||||||
|
|
||||||
var note2 = document.createElement("p");
|
|
||||||
note2.innerHTML = " Otherwise, it will cause this page to not function properly!";
|
|
||||||
note2.title = "When you click \"Copy image to: [inpaint sketch]\" in some img2img's tab,\n" +
|
|
||||||
"if scale<100% the canvas will be invisible,\n" +
|
|
||||||
"else if scale>100% this page will take large amount of memory and CPU performance.";
|
|
||||||
div.appendChild(note2);
|
|
||||||
|
|
||||||
var btn = document.createElement("button");
|
|
||||||
btn.innerHTML = "Click here to ignore";
|
|
||||||
|
|
||||||
div.appendChild(btn);
|
|
||||||
|
|
||||||
function tryShowTopBar(scale) {
|
|
||||||
if (showing) return;
|
|
||||||
|
|
||||||
b.innerHTML = 'Bad Scale: ' + scale + '% ';
|
|
||||||
|
|
||||||
var updateScaleTimer = setInterval(function() {
|
|
||||||
var newScale = getScale();
|
|
||||||
b.innerHTML = 'Bad Scale: ' + newScale + '% ';
|
|
||||||
if (newScale == 100) {
|
|
||||||
var p = div.parentNode;
|
|
||||||
if (p != null) p.removeChild(div);
|
|
||||||
showing = false;
|
|
||||||
clearInterval(updateScaleTimer);
|
|
||||||
check();
|
|
||||||
}
|
|
||||||
}, 999);
|
|
||||||
|
|
||||||
btn.onclick = function() {
|
|
||||||
clearInterval(updateScaleTimer);
|
|
||||||
var p = div.parentNode;
|
|
||||||
if (p != null) p.removeChild(div);
|
|
||||||
ignore = true;
|
|
||||||
showing = false;
|
|
||||||
localStorage.setItem("bad-scale-ignore-it", "ignore-it");
|
|
||||||
};
|
|
||||||
|
|
||||||
document.body.appendChild(div);
|
|
||||||
}
|
|
||||||
|
|
||||||
function check() {
|
|
||||||
if (!ignore) {
|
|
||||||
var timer = setInterval(function() {
|
|
||||||
var scale = getScale();
|
|
||||||
if (scale != 100 && !ignore) {
|
|
||||||
tryShowTopBar(scale);
|
|
||||||
clearInterval(timer);
|
|
||||||
}
|
|
||||||
if (ignore) {
|
|
||||||
clearInterval(timer);
|
|
||||||
}
|
|
||||||
}, 999);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (document.readyState != "complete") {
|
|
||||||
document.onreadystatechange = function() {
|
|
||||||
if (document.readyState != "complete") check();
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
check();
|
|
||||||
}
|
|
||||||
})();
|
|
@ -333,14 +333,16 @@ class Api:
|
|||||||
p.outpath_grids = opts.outdir_txt2img_grids
|
p.outpath_grids = opts.outdir_txt2img_grids
|
||||||
p.outpath_samples = opts.outdir_txt2img_samples
|
p.outpath_samples = opts.outdir_txt2img_samples
|
||||||
|
|
||||||
shared.state.begin(job="scripts_txt2img")
|
try:
|
||||||
if selectable_scripts is not None:
|
shared.state.begin(job="scripts_txt2img")
|
||||||
p.script_args = script_args
|
if selectable_scripts is not None:
|
||||||
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
|
p.script_args = script_args
|
||||||
else:
|
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
|
||||||
p.script_args = tuple(script_args) # Need to pass args as tuple here
|
else:
|
||||||
processed = process_images(p)
|
p.script_args = tuple(script_args) # Need to pass args as tuple here
|
||||||
shared.state.end()
|
processed = process_images(p)
|
||||||
|
finally:
|
||||||
|
shared.state.end()
|
||||||
|
|
||||||
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
|
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
|
||||||
|
|
||||||
@ -390,14 +392,16 @@ class Api:
|
|||||||
p.outpath_grids = opts.outdir_img2img_grids
|
p.outpath_grids = opts.outdir_img2img_grids
|
||||||
p.outpath_samples = opts.outdir_img2img_samples
|
p.outpath_samples = opts.outdir_img2img_samples
|
||||||
|
|
||||||
shared.state.begin(job="scripts_img2img")
|
try:
|
||||||
if selectable_scripts is not None:
|
shared.state.begin(job="scripts_img2img")
|
||||||
p.script_args = script_args
|
if selectable_scripts is not None:
|
||||||
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
|
p.script_args = script_args
|
||||||
else:
|
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
|
||||||
p.script_args = tuple(script_args) # Need to pass args as tuple here
|
else:
|
||||||
processed = process_images(p)
|
p.script_args = tuple(script_args) # Need to pass args as tuple here
|
||||||
shared.state.end()
|
processed = process_images(p)
|
||||||
|
finally:
|
||||||
|
shared.state.end()
|
||||||
|
|
||||||
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
|
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
|
||||||
|
|
||||||
@ -720,9 +724,9 @@ class Api:
|
|||||||
cuda = {'error': f'{err}'}
|
cuda = {'error': f'{err}'}
|
||||||
return models.MemoryResponse(ram=ram, cuda=cuda)
|
return models.MemoryResponse(ram=ram, cuda=cuda)
|
||||||
|
|
||||||
def launch(self, server_name, port):
|
def launch(self, server_name, port, root_path):
|
||||||
self.app.include_router(self.router)
|
self.app.include_router(self.router)
|
||||||
uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive)
|
uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive, root_path=root_path)
|
||||||
|
|
||||||
def kill_webui(self):
|
def kill_webui(self):
|
||||||
restart.stop_program()
|
restart.stop_program()
|
||||||
|
@ -208,11 +208,9 @@ class PreprocessResponse(BaseModel):
|
|||||||
fields = {}
|
fields = {}
|
||||||
for key, metadata in opts.data_labels.items():
|
for key, metadata in opts.data_labels.items():
|
||||||
value = opts.data.get(key)
|
value = opts.data.get(key)
|
||||||
optType = opts.typemap.get(type(metadata.default), type(metadata.default))
|
optType = opts.typemap.get(type(metadata.default), type(metadata.default)) if metadata.default else Any
|
||||||
|
|
||||||
if metadata.default is None:
|
if metadata is not None:
|
||||||
pass
|
|
||||||
elif metadata is not None:
|
|
||||||
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
|
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
|
||||||
else:
|
else:
|
||||||
fields.update({key: (Optional[optType], Field())})
|
fields.update({key: (Optional[optType], Field())})
|
||||||
|
@ -56,10 +56,12 @@ class Extension:
|
|||||||
self.do_read_info_from_repo()
|
self.do_read_info_from_repo()
|
||||||
|
|
||||||
return self.to_dict()
|
return self.to_dict()
|
||||||
|
try:
|
||||||
d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
|
d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
|
||||||
self.from_dict(d)
|
self.from_dict(d)
|
||||||
self.status = 'unknown'
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
self.status = 'unknown' if self.status == '' else self.status
|
||||||
|
|
||||||
def do_read_info_from_repo(self):
|
def do_read_info_from_repo(self):
|
||||||
repo = None
|
repo = None
|
||||||
|
@ -196,7 +196,7 @@ def run_extension_installer(extension_dir):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
env = os.environ.copy()
|
env = os.environ.copy()
|
||||||
env['PYTHONPATH'] = os.path.abspath(".")
|
env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}"
|
||||||
|
|
||||||
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
|
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -233,7 +233,7 @@ def run_extensions_installers(settings_file):
|
|||||||
re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
|
re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
|
||||||
|
|
||||||
|
|
||||||
def requrements_met(requirements_file):
|
def requirements_met(requirements_file):
|
||||||
"""
|
"""
|
||||||
Does a simple parse of a requirements.txt file to determine if all rerqirements in it
|
Does a simple parse of a requirements.txt file to determine if all rerqirements in it
|
||||||
are already installed. Returns True if so, False if not installed or parsing fails.
|
are already installed. Returns True if so, False if not installed or parsing fails.
|
||||||
@ -293,7 +293,7 @@ def prepare_environment():
|
|||||||
try:
|
try:
|
||||||
# the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution
|
# the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution
|
||||||
os.remove(os.path.join(script_path, "tmp", "restart"))
|
os.remove(os.path.join(script_path, "tmp", "restart"))
|
||||||
os.environ.setdefault('SD_WEBUI_RESTARTING ', '1')
|
os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -354,7 +354,7 @@ def prepare_environment():
|
|||||||
if not os.path.isfile(requirements_file):
|
if not os.path.isfile(requirements_file):
|
||||||
requirements_file = os.path.join(script_path, requirements_file)
|
requirements_file = os.path.join(script_path, requirements_file)
|
||||||
|
|
||||||
if not requrements_met(requirements_file):
|
if not requirements_met(requirements_file):
|
||||||
run_pip(f"install -r \"{requirements_file}\"", "requirements")
|
run_pip(f"install -r \"{requirements_file}\"", "requirements")
|
||||||
|
|
||||||
run_extensions_installers(settings_file=args.ui_settings_file)
|
run_extensions_installers(settings_file=args.ui_settings_file)
|
||||||
|
@ -600,8 +600,12 @@ def program_version():
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False):
|
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None):
|
||||||
index = position_in_batch + iteration * p.batch_size
|
if index is None:
|
||||||
|
index = position_in_batch + iteration * p.batch_size
|
||||||
|
|
||||||
|
if all_negative_prompts is None:
|
||||||
|
all_negative_prompts = p.all_negative_prompts
|
||||||
|
|
||||||
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
|
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
|
||||||
enable_hr = getattr(p, 'enable_hr', False)
|
enable_hr = getattr(p, 'enable_hr', False)
|
||||||
@ -617,12 +621,12 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
|||||||
"Sampler": p.sampler_name,
|
"Sampler": p.sampler_name,
|
||||||
"CFG scale": p.cfg_scale,
|
"CFG scale": p.cfg_scale,
|
||||||
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
|
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
|
||||||
"Seed": all_seeds[index],
|
"Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
|
||||||
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
|
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
|
||||||
"Size": f"{p.width}x{p.height}",
|
"Size": f"{p.width}x{p.height}",
|
||||||
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
|
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
|
||||||
"Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra),
|
"Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra),
|
||||||
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
|
"Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
|
||||||
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
|
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
|
||||||
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
|
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
|
||||||
"Denoising strength": getattr(p, 'denoising_strength', None),
|
"Denoising strength": getattr(p, 'denoising_strength', None),
|
||||||
@ -642,7 +646,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
|||||||
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
|
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
|
||||||
|
|
||||||
prompt_text = p.prompt if use_main_prompt else all_prompts[index]
|
prompt_text = p.prompt if use_main_prompt else all_prompts[index]
|
||||||
negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else ""
|
negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else ""
|
||||||
|
|
||||||
return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
|
return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
|
||||||
|
|
||||||
@ -716,9 +720,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
else:
|
else:
|
||||||
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
|
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
|
||||||
|
|
||||||
def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
|
|
||||||
return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch, use_main_prompt)
|
|
||||||
|
|
||||||
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
|
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
|
||||||
model_hijack.embedding_db.load_textual_inversion_embeddings()
|
model_hijack.embedding_db.load_textual_inversion_embeddings()
|
||||||
|
|
||||||
@ -806,6 +807,16 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
if p.scripts is not None:
|
if p.scripts is not None:
|
||||||
p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
|
p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
|
||||||
|
|
||||||
|
p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
|
||||||
|
p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
|
||||||
|
|
||||||
|
batch_params = scripts.PostprocessBatchListArgs(list(x_samples_ddim))
|
||||||
|
p.scripts.postprocess_batch_list(p, batch_params, batch_number=n)
|
||||||
|
x_samples_ddim = batch_params.images
|
||||||
|
|
||||||
|
def infotext(index=0, use_main_prompt=False):
|
||||||
|
return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts)
|
||||||
|
|
||||||
for i, x_sample in enumerate(x_samples_ddim):
|
for i, x_sample in enumerate(x_samples_ddim):
|
||||||
p.batch_index = i
|
p.batch_index = i
|
||||||
|
|
||||||
@ -814,7 +825,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
|
|
||||||
if p.restore_faces:
|
if p.restore_faces:
|
||||||
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
|
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
|
||||||
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
|
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration")
|
||||||
|
|
||||||
devices.torch_gc()
|
devices.torch_gc()
|
||||||
|
|
||||||
@ -831,15 +842,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
if p.color_corrections is not None and i < len(p.color_corrections):
|
if p.color_corrections is not None and i < len(p.color_corrections):
|
||||||
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
|
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
|
||||||
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
|
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
|
||||||
images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
|
images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction")
|
||||||
image = apply_color_correction(p.color_corrections[i], image)
|
image = apply_color_correction(p.color_corrections[i], image)
|
||||||
|
|
||||||
image = apply_overlay(image, p.paste_to, i, p.overlay_images)
|
image = apply_overlay(image, p.paste_to, i, p.overlay_images)
|
||||||
|
|
||||||
if opts.samples_save and not p.do_not_save_samples:
|
if opts.samples_save and not p.do_not_save_samples:
|
||||||
images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p)
|
images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p)
|
||||||
|
|
||||||
text = infotext(n, i)
|
text = infotext(i)
|
||||||
infotexts.append(text)
|
infotexts.append(text)
|
||||||
if opts.enable_pnginfo:
|
if opts.enable_pnginfo:
|
||||||
image.info["parameters"] = text
|
image.info["parameters"] = text
|
||||||
@ -850,10 +861,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
|
image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
|
||||||
|
|
||||||
if opts.save_mask:
|
if opts.save_mask:
|
||||||
images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
|
images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
|
||||||
|
|
||||||
if opts.save_mask_composite:
|
if opts.save_mask_composite:
|
||||||
images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
|
images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
|
||||||
|
|
||||||
if opts.return_mask:
|
if opts.return_mask:
|
||||||
output_images.append(image_mask)
|
output_images.append(image_mask)
|
||||||
@ -894,7 +905,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
p,
|
p,
|
||||||
images_list=output_images,
|
images_list=output_images,
|
||||||
seed=p.all_seeds[0],
|
seed=p.all_seeds[0],
|
||||||
info=infotext(),
|
info=infotexts[0],
|
||||||
comments="".join(f"{comment}\n" for comment in comments),
|
comments="".join(f"{comment}\n" for comment in comments),
|
||||||
subseed=p.all_subseeds[0],
|
subseed=p.all_subseeds[0],
|
||||||
index_of_first_image=index_of_first_image,
|
index_of_first_image=index_of_first_image,
|
||||||
|
@ -178,7 +178,7 @@ def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps):
|
|||||||
|
|
||||||
|
|
||||||
re_AND = re.compile(r"\bAND\b")
|
re_AND = re.compile(r"\bAND\b")
|
||||||
re_weight = re.compile(r"^(.*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$")
|
re_weight = re.compile(r"^((?:\s|.)*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$")
|
||||||
|
|
||||||
|
|
||||||
def get_multicond_prompt_list(prompts: SdConditioning | list[str]):
|
def get_multicond_prompt_list(prompts: SdConditioning | list[str]):
|
||||||
|
@ -16,6 +16,11 @@ class PostprocessImageArgs:
|
|||||||
self.image = image
|
self.image = image
|
||||||
|
|
||||||
|
|
||||||
|
class PostprocessBatchListArgs:
|
||||||
|
def __init__(self, images):
|
||||||
|
self.images = images
|
||||||
|
|
||||||
|
|
||||||
class Script:
|
class Script:
|
||||||
name = None
|
name = None
|
||||||
"""script's internal name derived from title"""
|
"""script's internal name derived from title"""
|
||||||
@ -119,7 +124,7 @@ class Script:
|
|||||||
|
|
||||||
def after_extra_networks_activate(self, p, *args, **kwargs):
|
def after_extra_networks_activate(self, p, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
Calledafter extra networks activation, before conds calculation
|
Called after extra networks activation, before conds calculation
|
||||||
allow modification of the network after extra networks activation been applied
|
allow modification of the network after extra networks activation been applied
|
||||||
won't be call if p.disable_extra_networks
|
won't be call if p.disable_extra_networks
|
||||||
|
|
||||||
@ -156,6 +161,25 @@ class Script:
|
|||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Same as postprocess_batch(), but receives batch images as a list of 3D tensors instead of a 4D tensor.
|
||||||
|
This is useful when you want to update the entire batch instead of individual images.
|
||||||
|
|
||||||
|
You can modify the postprocessing object (pp) to update the images in the batch, remove images, add images, etc.
|
||||||
|
If the number of images is different from the batch size when returning,
|
||||||
|
then the script has the responsibility to also update the following attributes in the processing object (p):
|
||||||
|
- p.prompts
|
||||||
|
- p.negative_prompts
|
||||||
|
- p.seeds
|
||||||
|
- p.subseeds
|
||||||
|
|
||||||
|
**kwargs will have same items as process_batch, and also:
|
||||||
|
- batch_number - index of current batch, from 0 to number of batches-1
|
||||||
|
"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
def postprocess_image(self, p, pp: PostprocessImageArgs, *args):
|
def postprocess_image(self, p, pp: PostprocessImageArgs, *args):
|
||||||
"""
|
"""
|
||||||
Called for every image after it has been generated.
|
Called for every image after it has been generated.
|
||||||
@ -536,6 +560,14 @@ class ScriptRunner:
|
|||||||
except Exception:
|
except Exception:
|
||||||
errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True)
|
errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True)
|
||||||
|
|
||||||
|
def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, **kwargs):
|
||||||
|
for script in self.alwayson_scripts:
|
||||||
|
try:
|
||||||
|
script_args = p.script_args[script.args_from:script.args_to]
|
||||||
|
script.postprocess_batch_list(p, pp, *script_args, **kwargs)
|
||||||
|
except Exception:
|
||||||
|
errors.report(f"Error running postprocess_batch_list: {script.filename}", exc_info=True)
|
||||||
|
|
||||||
def postprocess_image(self, p, pp: PostprocessImageArgs):
|
def postprocess_image(self, p, pp: PostprocessImageArgs):
|
||||||
for script in self.alwayson_scripts:
|
for script in self.alwayson_scripts:
|
||||||
try:
|
try:
|
||||||
|
@ -243,7 +243,7 @@ class StableDiffusionModelHijack:
|
|||||||
ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward
|
ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward
|
||||||
|
|
||||||
def undo_hijack(self, m):
|
def undo_hijack(self, m):
|
||||||
if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
|
if type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords:
|
||||||
m.cond_stage_model = m.cond_stage_model.wrapped
|
m.cond_stage_model = m.cond_stage_model.wrapped
|
||||||
|
|
||||||
elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
|
elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
|
||||||
|
@ -270,12 +270,17 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
|
|||||||
|
|
||||||
z = self.encode_with_transformers(tokens)
|
z = self.encode_with_transformers(tokens)
|
||||||
|
|
||||||
|
pooled = getattr(z, 'pooled', None)
|
||||||
|
|
||||||
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
|
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
|
||||||
batch_multipliers = torch.asarray(batch_multipliers).to(devices.device)
|
batch_multipliers = torch.asarray(batch_multipliers).to(devices.device)
|
||||||
original_mean = z.mean()
|
original_mean = z.mean()
|
||||||
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
|
z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
|
||||||
new_mean = z.mean()
|
new_mean = z.mean()
|
||||||
z *= (original_mean / new_mean)
|
z = z * (original_mean / new_mean)
|
||||||
|
|
||||||
|
if pooled is not None:
|
||||||
|
z.pooled = pooled
|
||||||
|
|
||||||
return z
|
return z
|
||||||
|
|
||||||
|
@ -253,7 +253,7 @@ class ExtraNetworksPage:
|
|||||||
"prompt": item.get("prompt", None),
|
"prompt": item.get("prompt", None),
|
||||||
"tabname": quote_js(tabname),
|
"tabname": quote_js(tabname),
|
||||||
"local_preview": quote_js(item["local_preview"]),
|
"local_preview": quote_js(item["local_preview"]),
|
||||||
"name": item["name"],
|
"name": html.escape(item["name"]),
|
||||||
"description": (item.get("description") or "" if shared.opts.extra_networks_card_show_desc else ""),
|
"description": (item.get("description") or "" if shared.opts.extra_networks_card_show_desc else ""),
|
||||||
"card_clicked": onclick,
|
"card_clicked": onclick,
|
||||||
"save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {quote_js(tabname)}, {quote_js(item["local_preview"])})""") + '"',
|
"save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {quote_js(tabname)}, {quote_js(item["local_preview"])})""") + '"',
|
||||||
|
@ -423,15 +423,16 @@ div#extras_scale_to_tab div.form{
|
|||||||
}
|
}
|
||||||
|
|
||||||
table.popup-table{
|
table.popup-table{
|
||||||
background: white;
|
background: var(--body-background-fill);
|
||||||
|
color: var(--body-text-color);
|
||||||
border-collapse: collapse;
|
border-collapse: collapse;
|
||||||
margin: 1em;
|
margin: 1em;
|
||||||
border: 4px solid white;
|
border: 4px solid var(--body-background-fill);
|
||||||
}
|
}
|
||||||
|
|
||||||
table.popup-table td{
|
table.popup-table td{
|
||||||
padding: 0.4em;
|
padding: 0.4em;
|
||||||
border: 1px solid #ccc;
|
border: 1px solid rgba(128, 128, 128, 0.5);
|
||||||
max-width: 36em;
|
max-width: 36em;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
webui.py
4
webui.py
@ -374,7 +374,7 @@ def api_only():
|
|||||||
api.launch(
|
api.launch(
|
||||||
server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1",
|
server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1",
|
||||||
port=cmd_opts.port if cmd_opts.port else 7861,
|
port=cmd_opts.port if cmd_opts.port else 7861,
|
||||||
root_path = f"/{cmd_opts.subpath}"
|
root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else ""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -407,7 +407,7 @@ def webui():
|
|||||||
ssl_verify=cmd_opts.disable_tls_verify,
|
ssl_verify=cmd_opts.disable_tls_verify,
|
||||||
debug=cmd_opts.gradio_debug,
|
debug=cmd_opts.gradio_debug,
|
||||||
auth=gradio_auth_creds,
|
auth=gradio_auth_creds,
|
||||||
inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING ') != '1',
|
inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING') != '1',
|
||||||
prevent_thread_lock=True,
|
prevent_thread_lock=True,
|
||||||
allowed_paths=cmd_opts.gradio_allowed_path,
|
allowed_paths=cmd_opts.gradio_allowed_path,
|
||||||
app_kwargs={
|
app_kwargs={
|
||||||
|
Loading…
Reference in New Issue
Block a user