autofixes from ruff
This commit is contained in:
parent
a617d64882
commit
762265eab5
@ -110,7 +110,6 @@ class LDSR:
|
||||
diffusion_steps = int(steps)
|
||||
eta = 1.0
|
||||
|
||||
down_sample_method = 'Lanczos'
|
||||
|
||||
gc.collect()
|
||||
if torch.cuda.is_available:
|
||||
|
@ -165,7 +165,7 @@ class VQModel(pl.LightningModule):
|
||||
def validation_step(self, batch, batch_idx):
|
||||
log_dict = self._validation_step(batch, batch_idx)
|
||||
with self.ema_scope():
|
||||
log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
|
||||
self._validation_step(batch, batch_idx, suffix="_ema")
|
||||
return log_dict
|
||||
|
||||
def _validation_step(self, batch, batch_idx, suffix=""):
|
||||
|
@ -60,7 +60,7 @@ def decode_base64_to_image(encoding):
|
||||
try:
|
||||
image = Image.open(BytesIO(base64.b64decode(encoding)))
|
||||
return image
|
||||
except Exception as err:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Invalid encoded image")
|
||||
|
||||
def encode_pil_to_base64(image):
|
||||
@ -264,11 +264,11 @@ class Api:
|
||||
if request.alwayson_scripts and (len(request.alwayson_scripts) > 0):
|
||||
for alwayson_script_name in request.alwayson_scripts.keys():
|
||||
alwayson_script = self.get_script(alwayson_script_name, script_runner)
|
||||
if alwayson_script == None:
|
||||
if alwayson_script is None:
|
||||
raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found")
|
||||
# Selectable script in always on script param check
|
||||
if alwayson_script.alwayson == False:
|
||||
raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params")
|
||||
if alwayson_script.alwayson is False:
|
||||
raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params")
|
||||
# always on script with no arg should always run so you don't really need to add them to the requests
|
||||
if "args" in request.alwayson_scripts[alwayson_script_name]:
|
||||
# min between arg length in scriptrunner and arg length in the request
|
||||
@ -310,7 +310,7 @@ class Api:
|
||||
p.outpath_samples = opts.outdir_txt2img_samples
|
||||
|
||||
shared.state.begin()
|
||||
if selectable_scripts != None:
|
||||
if selectable_scripts is not None:
|
||||
p.script_args = script_args
|
||||
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
|
||||
else:
|
||||
@ -367,7 +367,7 @@ class Api:
|
||||
p.outpath_samples = opts.outdir_img2img_samples
|
||||
|
||||
shared.state.begin()
|
||||
if selectable_scripts != None:
|
||||
if selectable_scripts is not None:
|
||||
p.script_args = script_args
|
||||
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
|
||||
else:
|
||||
@ -642,7 +642,7 @@ class Api:
|
||||
sd_hijack.apply_optimizations()
|
||||
shared.state.end()
|
||||
return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
|
||||
except AssertionError as msg:
|
||||
except AssertionError:
|
||||
shared.state.end()
|
||||
return TrainResponse(info=f"train embedding error: {error}")
|
||||
|
||||
|
@ -136,14 +136,14 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
|
||||
result_is_instruct_pix2pix_model = False
|
||||
|
||||
if theta_func2:
|
||||
shared.state.textinfo = f"Loading B"
|
||||
shared.state.textinfo = "Loading B"
|
||||
print(f"Loading {secondary_model_info.filename}...")
|
||||
theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
|
||||
else:
|
||||
theta_1 = None
|
||||
|
||||
if theta_func1:
|
||||
shared.state.textinfo = f"Loading C"
|
||||
shared.state.textinfo = "Loading C"
|
||||
print(f"Loading {tertiary_model_info.filename}...")
|
||||
theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu')
|
||||
|
||||
|
@ -409,13 +409,13 @@ class FilenameGenerator:
|
||||
time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
|
||||
try:
|
||||
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
|
||||
except pytz.exceptions.UnknownTimeZoneError as _:
|
||||
except pytz.exceptions.UnknownTimeZoneError:
|
||||
time_zone = None
|
||||
|
||||
time_zone_time = time_datetime.astimezone(time_zone)
|
||||
try:
|
||||
formatted_time = time_zone_time.strftime(time_format)
|
||||
except (ValueError, TypeError) as _:
|
||||
except (ValueError, TypeError):
|
||||
formatted_time = time_zone_time.strftime(self.default_time_format)
|
||||
|
||||
return sanitize_filename_part(formatted_time, replace_spaces=False)
|
||||
|
@ -59,7 +59,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
|
||||
# try to find corresponding mask for an image using simple filename matching
|
||||
mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))
|
||||
# if not found use first one ("same mask for all images" use-case)
|
||||
if not mask_image_path in inpaint_masks:
|
||||
if mask_image_path not in inpaint_masks:
|
||||
mask_image_path = inpaint_masks[0]
|
||||
mask_image = Image.open(mask_image_path)
|
||||
p.image_mask = mask_image
|
||||
|
@ -92,7 +92,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
|
||||
def get_schedule(prompt):
|
||||
try:
|
||||
tree = schedule_parser.parse(prompt)
|
||||
except lark.exceptions.LarkError as e:
|
||||
except lark.exceptions.LarkError:
|
||||
if 0:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
@ -134,6 +134,6 @@ def get_realesrgan_models(scaler):
|
||||
),
|
||||
]
|
||||
return models
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print("Error making Real-ESRGAN models list:", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
@ -61,7 +61,7 @@ class DisableInitialization:
|
||||
if res is None:
|
||||
res = original(url, *args, local_files_only=False, **kwargs)
|
||||
return res
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return original(url, *args, local_files_only=False, **kwargs)
|
||||
|
||||
def transformers_utils_hub_get_from_cache(url, *args, local_files_only=False, **kwargs):
|
||||
|
@ -118,7 +118,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs):
|
||||
try:
|
||||
#Delete temporary weights if appended
|
||||
del sd_model._custom_loss_weight
|
||||
except AttributeError as e:
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
#If we have an old loss function, reset the loss function to the original one
|
||||
@ -133,7 +133,7 @@ def apply_weighted_forward(sd_model):
|
||||
def undo_weighted_forward(sd_model):
|
||||
try:
|
||||
del sd_model.weighted_forward
|
||||
except AttributeError as e:
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
|
@ -10,4 +10,4 @@ def should_hijack_ip2p(checkpoint_info):
|
||||
ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
|
||||
cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower()
|
||||
|
||||
return "pix2pix" in ckpt_basename and not "pix2pix" in cfg_basename
|
||||
return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename
|
||||
|
@ -296,7 +296,6 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_
|
||||
if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes:
|
||||
# the big matmul fits into our memory limit; do everything in 1 chunk,
|
||||
# i.e. send it down the unchunked fast-path
|
||||
query_chunk_size = q_tokens
|
||||
kv_chunk_size = k_tokens
|
||||
|
||||
with devices.without_autocast(disable=q.dtype == v.dtype):
|
||||
|
@ -239,7 +239,7 @@ def read_metadata_from_safetensors(filename):
|
||||
if isinstance(v, str) and v[0:1] == '{':
|
||||
try:
|
||||
res[k] = json.loads(v)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return res
|
||||
@ -467,7 +467,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
|
||||
try:
|
||||
with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
|
||||
sd_model = instantiate_from_config(sd_config.model)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if sd_model is None:
|
||||
@ -544,7 +544,7 @@ def reload_model_weights(sd_model=None, info=None):
|
||||
|
||||
try:
|
||||
load_model_weights(sd_model, checkpoint_info, state_dict, timer)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print("Failed to load checkpoint, restoring previous")
|
||||
load_model_weights(sd_model, current_checkpoint_info, None, timer)
|
||||
raise
|
||||
|
@ -603,7 +603,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
|
||||
|
||||
try:
|
||||
vectorSize = list(data['string_to_param'].values())[0].shape[0]
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
vectorSize = '?'
|
||||
|
||||
checkpoint = sd_models.select_checkpoint()
|
||||
|
@ -246,7 +246,7 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
|
||||
all_seeds = gen_info.get('all_seeds', [-1])
|
||||
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
|
||||
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
except json.decoder.JSONDecodeError:
|
||||
if gen_info_string != '':
|
||||
print("Error parsing JSON generation info:", file=sys.stderr)
|
||||
print(gen_info_string, file=sys.stderr)
|
||||
@ -736,8 +736,8 @@ def create_ui():
|
||||
with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
|
||||
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
|
||||
gr.HTML(
|
||||
f"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
|
||||
f"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
|
||||
"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
|
||||
"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
|
||||
f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
|
||||
f"{hidden}</p>"
|
||||
)
|
||||
@ -746,7 +746,6 @@ def create_ui():
|
||||
img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
|
||||
|
||||
img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]
|
||||
img2img_image_inputs = [init_img, sketch, init_img_with_mask, inpaint_color_sketch]
|
||||
|
||||
for i, tab in enumerate(img2img_tabs):
|
||||
tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab])
|
||||
@ -1290,8 +1289,8 @@ def create_ui():
|
||||
|
||||
with gr.Column(elem_id='ti_gallery_container'):
|
||||
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
|
||||
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4)
|
||||
ti_progress = gr.HTML(elem_id="ti_progress", value="")
|
||||
gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4)
|
||||
gr.HTML(elem_id="ti_progress", value="")
|
||||
ti_outcome = gr.HTML(elem_id="ti_error", value="")
|
||||
|
||||
create_embedding.click(
|
||||
@ -1668,7 +1667,7 @@ def create_ui():
|
||||
interface.render()
|
||||
|
||||
if os.path.exists(os.path.join(script_path, "notification.mp3")):
|
||||
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
|
||||
gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
|
||||
|
||||
footer = shared.html("footer.html")
|
||||
footer = footer.format(versions=versions_html())
|
||||
|
@ -490,7 +490,7 @@ def create_ui():
|
||||
config_states.list_config_states()
|
||||
|
||||
with gr.Blocks(analytics_enabled=False) as ui:
|
||||
with gr.Tabs(elem_id="tabs_extensions") as tabs:
|
||||
with gr.Tabs(elem_id="tabs_extensions"):
|
||||
with gr.TabItem("Installed", id="installed"):
|
||||
|
||||
with gr.Row(elem_id="extensions_installed_top"):
|
||||
|
@ -263,7 +263,7 @@ def create_ui(container, button, tabname):
|
||||
ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy())
|
||||
ui.tabname = tabname
|
||||
|
||||
with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs:
|
||||
with gr.Tabs(elem_id=tabname+"_extra_tabs"):
|
||||
for page in ui.stored_extra_pages:
|
||||
page_id = page.title.lower().replace(" ", "_")
|
||||
|
||||
|
@ -2,7 +2,9 @@
|
||||
|
||||
ignore = [
|
||||
"E501",
|
||||
"E731"
|
||||
"E731",
|
||||
"E402", # Module level import not at top of file
|
||||
"F401" # Module imported but unused
|
||||
]
|
||||
|
||||
exclude = ["extensions"]
|
||||
|
@ -72,7 +72,7 @@ def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.0
|
||||
height = _np_src_image.shape[1]
|
||||
num_channels = _np_src_image.shape[2]
|
||||
|
||||
np_src_image = _np_src_image[:] * (1. - np_mask_rgb)
|
||||
_np_src_image[:] * (1. - np_mask_rgb)
|
||||
np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.)
|
||||
img_mask = np_mask_grey > 1e-6
|
||||
ref_mask = np_mask_grey < 1e-3
|
||||
|
@ -98,13 +98,13 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
|
||||
assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}'
|
||||
|
||||
upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
|
||||
pp.info[f"Postprocess upscaler"] = upscaler1.name
|
||||
pp.info["Postprocess upscaler"] = upscaler1.name
|
||||
|
||||
if upscaler2 and upscaler_2_visibility > 0:
|
||||
second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
|
||||
upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility)
|
||||
|
||||
pp.info[f"Postprocess upscaler 2"] = upscaler2.name
|
||||
pp.info["Postprocess upscaler 2"] = upscaler2.name
|
||||
|
||||
pp.image = upscaled_image
|
||||
|
||||
@ -134,4 +134,4 @@ class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
|
||||
assert upscaler1, f'could not find upscaler named {upscaler_name}'
|
||||
|
||||
pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False)
|
||||
pp.info[f"Postprocess upscaler"] = upscaler1.name
|
||||
pp.info["Postprocess upscaler"] = upscaler1.name
|
||||
|
@ -316,7 +316,7 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend
|
||||
return Processed(p, [])
|
||||
|
||||
z_count = len(zs)
|
||||
sub_grids = [None] * z_count
|
||||
|
||||
for i in range(z_count):
|
||||
start_index = (i * len(xs) * len(ys)) + i
|
||||
end_index = start_index + len(xs) * len(ys)
|
||||
|
2
webui.py
2
webui.py
@ -360,7 +360,7 @@ def webui():
|
||||
if cmd_opts.subpath:
|
||||
redirector = FastAPI()
|
||||
redirector.get("/")
|
||||
mounted_app = gradio.mount_gradio_app(redirector, shared.demo, path=f"/{cmd_opts.subpath}")
|
||||
gradio.mount_gradio_app(redirector, shared.demo, path=f"/{cmd_opts.subpath}")
|
||||
|
||||
wait_on_server(shared.demo)
|
||||
print('Restarting UI...')
|
||||
|
Loading…
Reference in New Issue
Block a user