fixes for B007

This commit is contained in:
AUTOMATIC 2023-05-10 11:37:18 +03:00
parent 550256db1c
commit a5121e7a06
28 changed files with 57 additions and 62 deletions

View File

@ -88,7 +88,7 @@ class LDSR:
x_t = None x_t = None
logs = None logs = None
for n in range(n_runs): for _ in range(n_runs):
if custom_shape is not None: if custom_shape is not None:
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device) x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0]) x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])

View File

@ -418,7 +418,7 @@ def infotext_pasted(infotext, params):
added = [] added = []
for k, v in params.items(): for k in params:
if not k.startswith("AddNet Model "): if not k.startswith("AddNet Model "):
continue continue

View File

@ -132,7 +132,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64) model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
model.load_state_dict(torch.load(filename), strict=True) model.load_state_dict(torch.load(filename), strict=True)
model.eval() model.eval()
for k, v in model.named_parameters(): for _, v in model.named_parameters():
v.requires_grad = False v.requires_grad = False
model = model.to(device) model = model.to(device)

View File

@ -848,7 +848,7 @@ class SwinIR(nn.Module):
H, W = self.patches_resolution H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9 flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops() flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers): for layer in self.layers:
flops += layer.flops() flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops() flops += self.upsample.flops()

View File

@ -1001,7 +1001,7 @@ class Swin2SR(nn.Module):
H, W = self.patches_resolution H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9 flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops() flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers): for layer in self.layers:
flops += layer.flops() flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops() flops += self.upsample.flops()

View File

@ -94,7 +94,7 @@ def setup_model(dirname):
self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
self.face_helper.align_warp_face() self.face_helper.align_warp_face()
for idx, cropped_face in enumerate(self.face_helper.cropped_faces): for cropped_face in self.face_helper.cropped_faces:
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer) cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer)

View File

@ -16,9 +16,7 @@ def mod2normal(state_dict):
# this code is copied from https://github.com/victorca25/iNNfer # this code is copied from https://github.com/victorca25/iNNfer
if 'conv_first.weight' in state_dict: if 'conv_first.weight' in state_dict:
crt_net = {} crt_net = {}
items = [] items = list(state_dict)
for k, v in state_dict.items():
items.append(k)
crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.weight'] = state_dict['conv_first.weight']
crt_net['model.0.bias'] = state_dict['conv_first.bias'] crt_net['model.0.bias'] = state_dict['conv_first.bias']
@ -52,9 +50,7 @@ def resrgan2normal(state_dict, nb=23):
if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
re8x = 0 re8x = 0
crt_net = {} crt_net = {}
items = [] items = list(state_dict)
for k, v in state_dict.items():
items.append(k)
crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.weight'] = state_dict['conv_first.weight']
crt_net['model.0.bias'] = state_dict['conv_first.bias'] crt_net['model.0.bias'] = state_dict['conv_first.bias']

View File

@ -91,7 +91,7 @@ def deactivate(p, extra_network_data):
"""call deactivate for extra networks in extra_network_data in specified order, then call """call deactivate for extra networks in extra_network_data in specified order, then call
deactivate for all remaining registered networks""" deactivate for all remaining registered networks"""
for extra_network_name, extra_network_args in extra_network_data.items(): for extra_network_name in extra_network_data:
extra_network = extra_network_registry.get(extra_network_name, None) extra_network = extra_network_registry.get(extra_network_name, None)
if extra_network is None: if extra_network is None:
continue continue

View File

@ -247,7 +247,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
lines.append(lastline) lines.append(lastline)
lastline = '' lastline = ''
for i, line in enumerate(lines): for line in lines:
line = line.strip() line = line.strip()
if line.startswith("Negative prompt:"): if line.startswith("Negative prompt:"):
done_with_prompt = True done_with_prompt = True

View File

@ -177,34 +177,34 @@ class Hypernetwork:
def weights(self): def weights(self):
res = [] res = []
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
res += layer.parameters() res += layer.parameters()
return res return res
def train(self, mode=True): def train(self, mode=True):
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
layer.train(mode=mode) layer.train(mode=mode)
for param in layer.parameters(): for param in layer.parameters():
param.requires_grad = mode param.requires_grad = mode
def to(self, device): def to(self, device):
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
layer.to(device) layer.to(device)
return self return self
def set_multiplier(self, multiplier): def set_multiplier(self, multiplier):
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
layer.multiplier = multiplier layer.multiplier = multiplier
return self return self
def eval(self): def eval(self):
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
layer.eval() layer.eval()
for param in layer.parameters(): for param in layer.parameters():
@ -619,7 +619,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
try: try:
sd_hijack_checkpoint.add() sd_hijack_checkpoint.add()
for i in range((steps-initial_step) * gradient_step): for _ in range((steps-initial_step) * gradient_step):
if scheduler.finished: if scheduler.finished:
break break
if shared.state.interrupted: if shared.state.interrupted:

View File

@ -149,7 +149,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
return ImageFont.truetype(Roboto, fontsize) return ImageFont.truetype(Roboto, fontsize)
def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize): def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize):
for i, line in enumerate(lines): for line in lines:
fnt = initial_fnt fnt = initial_fnt
fontsize = initial_fontsize fontsize = initial_fontsize
while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0: while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0:

View File

@ -207,8 +207,8 @@ class InterrogateModels:
image_features /= image_features.norm(dim=-1, keepdim=True) image_features /= image_features.norm(dim=-1, keepdim=True)
for name, topn, items in self.categories(): for cat in self.categories():
matches = self.rank(image_features, items, top_count=topn) matches = self.rank(image_features, cat.items, top_count=cat.topn)
for match, score in matches: for match, score in matches:
if shared.opts.interrogate_return_ranks: if shared.opts.interrogate_return_ranks:
res += f", ({match}:{score/100:.3f})" res += f", ({match}:{score/100:.3f})"

View File

@ -143,7 +143,7 @@ def get_learned_conditioning(model, prompts, steps):
conds = model.get_learned_conditioning(texts) conds = model.get_learned_conditioning(texts)
cond_schedule = [] cond_schedule = []
for i, (end_at_step, text) in enumerate(prompt_schedule): for i, (end_at_step, _) in enumerate(prompt_schedule):
cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i])) cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i]))
cache[prompt] = cond_schedule cache[prompt] = cond_schedule
@ -219,8 +219,8 @@ def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_s
res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
for i, cond_schedule in enumerate(c): for i, cond_schedule in enumerate(c):
target_index = 0 target_index = 0
for current, (end_at, cond) in enumerate(cond_schedule): for current, entry in enumerate(cond_schedule):
if current_step <= end_at: if current_step <= entry.end_at_step:
target_index = current target_index = current
break break
res[i] = cond_schedule[target_index].cond res[i] = cond_schedule[target_index].cond
@ -234,13 +234,13 @@ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step):
tensors = [] tensors = []
conds_list = [] conds_list = []
for batch_no, composable_prompts in enumerate(c.batch): for composable_prompts in c.batch:
conds_for_batch = [] conds_for_batch = []
for cond_index, composable_prompt in enumerate(composable_prompts): for composable_prompt in composable_prompts:
target_index = 0 target_index = 0
for current, (end_at, cond) in enumerate(composable_prompt.schedules): for current, entry in enumerate(composable_prompt.schedules):
if current_step <= end_at: if current_step <= entry.end_at_step:
target_index = current target_index = current
break break

View File

@ -95,11 +95,11 @@ def check_pt(filename, extra_handler):
except zipfile.BadZipfile: except zipfile.BadZipfile:
# if it's not a zip file, it's an olf pytorch format, with five objects written to pickle # if it's not a zip file, it's an old pytorch format, with five objects written to pickle
with open(filename, "rb") as file: with open(filename, "rb") as file:
unpickler = RestrictedUnpickler(file) unpickler = RestrictedUnpickler(file)
unpickler.extra_handler = extra_handler unpickler.extra_handler = extra_handler
for i in range(5): for _ in range(5):
unpickler.load() unpickler.load()

View File

@ -231,7 +231,7 @@ def load_scripts():
syspath = sys.path syspath = sys.path
def register_scripts_from_module(module): def register_scripts_from_module(module):
for key, script_class in module.__dict__.items(): for script_class in module.__dict__.values():
if type(script_class) != type: if type(script_class) != type:
continue continue
@ -295,9 +295,9 @@ class ScriptRunner:
auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data() auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data()
for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data: for script_data in auto_processing_scripts + scripts_data:
script = script_class() script = script_data.script_class()
script.filename = path script.filename = script_data.path
script.is_txt2img = not is_img2img script.is_txt2img = not is_img2img
script.is_img2img = is_img2img script.is_img2img = is_img2img
@ -492,7 +492,7 @@ class ScriptRunner:
module = script_loading.load_module(script.filename) module = script_loading.load_module(script.filename)
cache[filename] = module cache[filename] = module
for key, script_class in module.__dict__.items(): for script_class in module.__dict__.values():
if type(script_class) == type and issubclass(script_class, Script): if type(script_class) == type and issubclass(script_class, Script):
self.scripts[si] = script_class() self.scripts[si] = script_class()
self.scripts[si].filename = filename self.scripts[si].filename = filename

View File

@ -66,9 +66,9 @@ class ScriptPostprocessingRunner:
def initialize_scripts(self, scripts_data): def initialize_scripts(self, scripts_data):
self.scripts = [] self.scripts = []
for script_class, path, basedir, script_module in scripts_data: for script_data in scripts_data:
script: ScriptPostprocessing = script_class() script: ScriptPostprocessing = script_data.script_class()
script.filename = path script.filename = script_data.path
if script.name == "Simple Upscale": if script.name == "Simple Upscale":
continue continue
@ -124,7 +124,7 @@ class ScriptPostprocessingRunner:
script_args = args[script.args_from:script.args_to] script_args = args[script.args_from:script.args_to]
process_args = {} process_args = {}
for (name, component), value in zip(script.controls.items(), script_args): for (name, component), value in zip(script.controls.items(), script_args): # noqa B007
process_args[name] = value process_args[name] = value
script.process(pp, **process_args) script.process(pp, **process_args)

View File

@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
self.hijack.fixes = [x.fixes for x in batch_chunk] self.hijack.fixes = [x.fixes for x in batch_chunk]
for fixes in self.hijack.fixes: for fixes in self.hijack.fixes:
for position, embedding in fixes: for position, embedding in fixes: # noqa: B007
used_embeddings[embedding.name] = embedding used_embeddings[embedding.name] = embedding
z = self.process_tokens(tokens, multipliers) z = self.process_tokens(tokens, multipliers)

View File

@ -211,7 +211,7 @@ class OptionInfo:
def options_section(section_identifier, options_dict): def options_section(section_identifier, options_dict):
for k, v in options_dict.items(): for v in options_dict.values():
v.section = section_identifier v.section = section_identifier
return options_dict return options_dict
@ -579,7 +579,7 @@ class Options:
section_ids = {} section_ids = {}
settings_items = self.data_labels.items() settings_items = self.data_labels.items()
for k, item in settings_items: for _, item in settings_items:
if item.section not in section_ids: if item.section not in section_ids:
section_ids[item.section] = len(section_ids) section_ids[item.section] = len(section_ids)
@ -740,7 +740,7 @@ def walk_files(path, allowed_extensions=None):
if allowed_extensions is not None: if allowed_extensions is not None:
allowed_extensions = set(allowed_extensions) allowed_extensions = set(allowed_extensions)
for root, dirs, files in os.walk(path): for root, _, files in os.walk(path):
for filename in files: for filename in files:
if allowed_extensions is not None: if allowed_extensions is not None:
_, ext = os.path.splitext(filename) _, ext = os.path.splitext(filename)

View File

@ -12,7 +12,7 @@ class LearnScheduleIterator:
self.it = 0 self.it = 0
self.maxit = 0 self.maxit = 0
try: try:
for i, pair in enumerate(pairs): for pair in pairs:
if not pair.strip(): if not pair.strip():
continue continue
tmp = pair.split(':') tmp = pair.split(':')

View File

@ -29,7 +29,7 @@ textual_inversion_templates = {}
def list_textual_inversion_templates(): def list_textual_inversion_templates():
textual_inversion_templates.clear() textual_inversion_templates.clear()
for root, dirs, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir): for root, _, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir):
for fn in fns: for fn in fns:
path = os.path.join(root, fn) path = os.path.join(root, fn)
@ -198,7 +198,7 @@ class EmbeddingDatabase:
if not os.path.isdir(embdir.path): if not os.path.isdir(embdir.path):
return return
for root, dirs, fns in os.walk(embdir.path, followlinks=True): for root, _, fns in os.walk(embdir.path, followlinks=True):
for fn in fns: for fn in fns:
try: try:
fullfn = os.path.join(root, fn) fullfn = os.path.join(root, fn)
@ -215,7 +215,7 @@ class EmbeddingDatabase:
def load_textual_inversion_embeddings(self, force_reload=False): def load_textual_inversion_embeddings(self, force_reload=False):
if not force_reload: if not force_reload:
need_reload = False need_reload = False
for path, embdir in self.embedding_dirs.items(): for embdir in self.embedding_dirs.values():
if embdir.has_changed(): if embdir.has_changed():
need_reload = True need_reload = True
break break
@ -228,7 +228,7 @@ class EmbeddingDatabase:
self.skipped_embeddings.clear() self.skipped_embeddings.clear()
self.expected_shape = self.get_expected_shape() self.expected_shape = self.get_expected_shape()
for path, embdir in self.embedding_dirs.items(): for embdir in self.embedding_dirs.values():
self.load_from_dir(embdir) self.load_from_dir(embdir)
embdir.update() embdir.update()
@ -469,7 +469,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
try: try:
sd_hijack_checkpoint.add() sd_hijack_checkpoint.add()
for i in range((steps-initial_step) * gradient_step): for _ in range((steps-initial_step) * gradient_step):
if scheduler.finished: if scheduler.finished:
break break
if shared.state.interrupted: if shared.state.interrupted:

View File

@ -416,7 +416,7 @@ def create_sampler_and_steps_selection(choices, tabname):
def ordered_ui_categories(): def ordered_ui_categories():
user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))} user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))}
for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): for _, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
yield category yield category
@ -1646,7 +1646,7 @@ def create_ui():
with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings", variant="compact"): with gr.Row(elem_id="quicksettings", variant="compact"):
for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): for _i, k, _item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
component = create_setting_component(k, is_quicksettings=True) component = create_setting_component(k, is_quicksettings=True)
component_dict[k] = component component_dict[k] = component
@ -1673,7 +1673,7 @@ def create_ui():
outputs=[text_settings, result], outputs=[text_settings, result],
) )
for i, k, item in quicksettings_list: for _i, k, _item in quicksettings_list:
component = component_dict[k] component = component_dict[k]
info = opts.data_labels[k] info = opts.data_labels[k]

View File

@ -90,7 +90,7 @@ class ExtraNetworksPage:
subdirs = {} subdirs = {}
for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]:
for root, dirs, files in os.walk(parentdir): for root, dirs, _ in os.walk(parentdir):
for dirname in dirs: for dirname in dirs:
x = os.path.join(root, dirname) x = os.path.join(root, dirname)

View File

@ -72,7 +72,7 @@ def cleanup_tmpdr():
if temp_dir == "" or not os.path.isdir(temp_dir): if temp_dir == "" or not os.path.isdir(temp_dir):
return return
for root, dirs, files in os.walk(temp_dir, topdown=False): for root, _, files in os.walk(temp_dir, topdown=False):
for name in files: for name in files:
_, extension = os.path.splitext(name) _, extension = os.path.splitext(name)
if extension != ".png": if extension != ".png":

View File

@ -55,7 +55,7 @@ class Upscaler:
dest_w = int(img.width * scale) dest_w = int(img.width * scale)
dest_h = int(img.height * scale) dest_h = int(img.height * scale)
for i in range(3): for _ in range(3):
shape = (img.width, img.height) shape = (img.width, img.height)
img = self.do_upscale(img, selected_model) img = self.do_upscale(img, selected_model)

View File

@ -20,7 +20,6 @@ ignore = [
"I001", # Import block is un-sorted or un-formatted "I001", # Import block is un-sorted or un-formatted
"C901", # Function is too complex "C901", # Function is too complex
"C408", # Rewrite as a literal "C408", # Rewrite as a literal
"B007", # Loop control variable not used within loop body
] ]

View File

@ -156,7 +156,7 @@ class Script(scripts.Script):
images = [] images = []
all_prompts = [] all_prompts = []
infotexts = [] infotexts = []
for n, args in enumerate(jobs): for args in jobs:
state.job = f"{state.job_no + 1} out of {state.job_count}" state.job = f"{state.job_no + 1} out of {state.job_count}"
copy_p = copy.copy(p) copy_p = copy.copy(p)

View File

@ -56,7 +56,7 @@ class Script(scripts.Script):
work = [] work = []
for y, h, row in grid.tiles: for _y, _h, row in grid.tiles:
for tiledata in row: for tiledata in row:
work.append(tiledata[2]) work.append(tiledata[2])
@ -85,7 +85,7 @@ class Script(scripts.Script):
work_results += processed.images work_results += processed.images
image_index = 0 image_index = 0
for y, h, row in grid.tiles: for _y, _h, row in grid.tiles:
for tiledata in row: for tiledata in row:
tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height)) tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
image_index += 1 image_index += 1

View File

@ -704,7 +704,7 @@ class Script(scripts.Script):
if not include_sub_grids: if not include_sub_grids:
# Done with sub-grids, drop all related information: # Done with sub-grids, drop all related information:
for sg in range(z_count): for _ in range(z_count):
del processed.images[1] del processed.images[1]
del processed.all_prompts[1] del processed.all_prompts[1]
del processed.all_seeds[1] del processed.all_seeds[1]