Merge pull request #6700 from Shondoit/weighted-learning
Weighted learning of TIs and HNs
This commit is contained in:
commit
e452facef4
@ -496,7 +496,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
|
|||||||
shared.reload_hypernetworks()
|
shared.reload_hypernetworks()
|
||||||
|
|
||||||
|
|
||||||
def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
|
def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
|
||||||
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
|
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
|
||||||
from modules import images
|
from modules import images
|
||||||
|
|
||||||
@ -554,7 +554,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
|
|||||||
|
|
||||||
pin_memory = shared.opts.pin_memory
|
pin_memory = shared.opts.pin_memory
|
||||||
|
|
||||||
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
|
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
|
||||||
|
|
||||||
if shared.opts.save_training_settings_to_txt:
|
if shared.opts.save_training_settings_to_txt:
|
||||||
saved_params = dict(
|
saved_params = dict(
|
||||||
@ -640,13 +640,19 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
|
|||||||
|
|
||||||
with devices.autocast():
|
with devices.autocast():
|
||||||
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
|
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
|
||||||
|
if use_weight:
|
||||||
|
w = batch.weight.to(devices.device, non_blocking=pin_memory)
|
||||||
if tag_drop_out != 0 or shuffle_tags:
|
if tag_drop_out != 0 or shuffle_tags:
|
||||||
shared.sd_model.cond_stage_model.to(devices.device)
|
shared.sd_model.cond_stage_model.to(devices.device)
|
||||||
c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
|
c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
|
||||||
shared.sd_model.cond_stage_model.to(devices.cpu)
|
shared.sd_model.cond_stage_model.to(devices.cpu)
|
||||||
else:
|
else:
|
||||||
c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
|
c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
|
||||||
loss = shared.sd_model(x, c)[0] / gradient_step
|
if use_weight:
|
||||||
|
loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step
|
||||||
|
del w
|
||||||
|
else:
|
||||||
|
loss = shared.sd_model.forward(x, c)[0] / gradient_step
|
||||||
del x
|
del x
|
||||||
del c
|
del c
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import torch
|
import torch
|
||||||
from torch.nn.functional import silu
|
from torch.nn.functional import silu
|
||||||
|
from types import MethodType
|
||||||
|
|
||||||
import modules.textual_inversion.textual_inversion
|
import modules.textual_inversion.textual_inversion
|
||||||
from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint
|
from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint
|
||||||
@ -76,6 +77,54 @@ def fix_checkpoint():
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def weighted_loss(sd_model, pred, target, mean=True):
|
||||||
|
#Calculate the weight normally, but ignore the mean
|
||||||
|
loss = sd_model._old_get_loss(pred, target, mean=False)
|
||||||
|
|
||||||
|
#Check if we have weights available
|
||||||
|
weight = getattr(sd_model, '_custom_loss_weight', None)
|
||||||
|
if weight is not None:
|
||||||
|
loss *= weight
|
||||||
|
|
||||||
|
#Return the loss, as mean if specified
|
||||||
|
return loss.mean() if mean else loss
|
||||||
|
|
||||||
|
def weighted_forward(sd_model, x, c, w, *args, **kwargs):
|
||||||
|
try:
|
||||||
|
#Temporarily append weights to a place accessible during loss calc
|
||||||
|
sd_model._custom_loss_weight = w
|
||||||
|
|
||||||
|
#Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely
|
||||||
|
#Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set
|
||||||
|
if not hasattr(sd_model, '_old_get_loss'):
|
||||||
|
sd_model._old_get_loss = sd_model.get_loss
|
||||||
|
sd_model.get_loss = MethodType(weighted_loss, sd_model)
|
||||||
|
|
||||||
|
#Run the standard forward function, but with the patched 'get_loss'
|
||||||
|
return sd_model.forward(x, c, *args, **kwargs)
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
#Delete temporary weights if appended
|
||||||
|
del sd_model._custom_loss_weight
|
||||||
|
except AttributeError as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
#If we have an old loss function, reset the loss function to the original one
|
||||||
|
if hasattr(sd_model, '_old_get_loss'):
|
||||||
|
sd_model.get_loss = sd_model._old_get_loss
|
||||||
|
del sd_model._old_get_loss
|
||||||
|
|
||||||
|
def apply_weighted_forward(sd_model):
|
||||||
|
#Add new function 'weighted_forward' that can be called to calc weighted loss
|
||||||
|
sd_model.weighted_forward = MethodType(weighted_forward, sd_model)
|
||||||
|
|
||||||
|
def undo_weighted_forward(sd_model):
|
||||||
|
try:
|
||||||
|
del sd_model.weighted_forward
|
||||||
|
except AttributeError as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class StableDiffusionModelHijack:
|
class StableDiffusionModelHijack:
|
||||||
fixes = None
|
fixes = None
|
||||||
comments = []
|
comments = []
|
||||||
@ -104,6 +153,7 @@ class StableDiffusionModelHijack:
|
|||||||
m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self)
|
m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self)
|
||||||
m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
|
m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
|
||||||
|
|
||||||
|
apply_weighted_forward(m)
|
||||||
if m.cond_stage_key == "edit":
|
if m.cond_stage_key == "edit":
|
||||||
sd_hijack_unet.hijack_ddpm_edit()
|
sd_hijack_unet.hijack_ddpm_edit()
|
||||||
|
|
||||||
@ -135,6 +185,7 @@ class StableDiffusionModelHijack:
|
|||||||
m.cond_stage_model = m.cond_stage_model.wrapped
|
m.cond_stage_model = m.cond_stage_model.wrapped
|
||||||
|
|
||||||
undo_optimizations()
|
undo_optimizations()
|
||||||
|
undo_weighted_forward(m)
|
||||||
|
|
||||||
self.apply_circular(False)
|
self.apply_circular(False)
|
||||||
self.layers = None
|
self.layers = None
|
||||||
|
@ -19,9 +19,10 @@ re_numbers_at_start = re.compile(r"^[-\d]+\s*")
|
|||||||
|
|
||||||
|
|
||||||
class DatasetEntry:
|
class DatasetEntry:
|
||||||
def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None):
|
def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None, weight=None):
|
||||||
self.filename = filename
|
self.filename = filename
|
||||||
self.filename_text = filename_text
|
self.filename_text = filename_text
|
||||||
|
self.weight = weight
|
||||||
self.latent_dist = latent_dist
|
self.latent_dist = latent_dist
|
||||||
self.latent_sample = latent_sample
|
self.latent_sample = latent_sample
|
||||||
self.cond = cond
|
self.cond = cond
|
||||||
@ -30,7 +31,7 @@ class DatasetEntry:
|
|||||||
|
|
||||||
|
|
||||||
class PersonalizedBase(Dataset):
|
class PersonalizedBase(Dataset):
|
||||||
def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False):
|
def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False, use_weight=False):
|
||||||
re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None
|
re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None
|
||||||
|
|
||||||
self.placeholder_token = placeholder_token
|
self.placeholder_token = placeholder_token
|
||||||
@ -56,10 +57,16 @@ class PersonalizedBase(Dataset):
|
|||||||
|
|
||||||
print("Preparing dataset...")
|
print("Preparing dataset...")
|
||||||
for path in tqdm.tqdm(self.image_paths):
|
for path in tqdm.tqdm(self.image_paths):
|
||||||
|
alpha_channel = None
|
||||||
if shared.state.interrupted:
|
if shared.state.interrupted:
|
||||||
raise Exception("interrupted")
|
raise Exception("interrupted")
|
||||||
try:
|
try:
|
||||||
image = Image.open(path).convert('RGB')
|
image = Image.open(path)
|
||||||
|
#Currently does not work for single color transparency
|
||||||
|
#We would need to read image.info['transparency'] for that
|
||||||
|
if use_weight and 'A' in image.getbands():
|
||||||
|
alpha_channel = image.getchannel('A')
|
||||||
|
image = image.convert('RGB')
|
||||||
if not varsize:
|
if not varsize:
|
||||||
image = image.resize((width, height), PIL.Image.BICUBIC)
|
image = image.resize((width, height), PIL.Image.BICUBIC)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -87,17 +94,35 @@ class PersonalizedBase(Dataset):
|
|||||||
with devices.autocast():
|
with devices.autocast():
|
||||||
latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0))
|
latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0))
|
||||||
|
|
||||||
if latent_sampling_method == "once" or (latent_sampling_method == "deterministic" and not isinstance(latent_dist, DiagonalGaussianDistribution)):
|
#Perform latent sampling, even for random sampling.
|
||||||
latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu)
|
#We need the sample dimensions for the weights
|
||||||
latent_sampling_method = "once"
|
if latent_sampling_method == "deterministic":
|
||||||
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample)
|
if isinstance(latent_dist, DiagonalGaussianDistribution):
|
||||||
elif latent_sampling_method == "deterministic":
|
# Works only for DiagonalGaussianDistribution
|
||||||
# Works only for DiagonalGaussianDistribution
|
latent_dist.std = 0
|
||||||
latent_dist.std = 0
|
else:
|
||||||
latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu)
|
latent_sampling_method = "once"
|
||||||
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample)
|
latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu)
|
||||||
elif latent_sampling_method == "random":
|
|
||||||
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist)
|
if use_weight and alpha_channel is not None:
|
||||||
|
channels, *latent_size = latent_sample.shape
|
||||||
|
weight_img = alpha_channel.resize(latent_size)
|
||||||
|
npweight = np.array(weight_img).astype(np.float32)
|
||||||
|
#Repeat for every channel in the latent sample
|
||||||
|
weight = torch.tensor([npweight] * channels).reshape([channels] + latent_size)
|
||||||
|
#Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default.
|
||||||
|
weight -= weight.min()
|
||||||
|
weight /= weight.mean()
|
||||||
|
elif use_weight:
|
||||||
|
#If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later
|
||||||
|
weight = torch.ones([channels] + latent_size)
|
||||||
|
else:
|
||||||
|
weight = None
|
||||||
|
|
||||||
|
if latent_sampling_method == "random":
|
||||||
|
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight)
|
||||||
|
else:
|
||||||
|
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample, weight=weight)
|
||||||
|
|
||||||
if not (self.tag_drop_out != 0 or self.shuffle_tags):
|
if not (self.tag_drop_out != 0 or self.shuffle_tags):
|
||||||
entry.cond_text = self.create_text(filename_text)
|
entry.cond_text = self.create_text(filename_text)
|
||||||
@ -110,6 +135,7 @@ class PersonalizedBase(Dataset):
|
|||||||
del torchdata
|
del torchdata
|
||||||
del latent_dist
|
del latent_dist
|
||||||
del latent_sample
|
del latent_sample
|
||||||
|
del weight
|
||||||
|
|
||||||
self.length = len(self.dataset)
|
self.length = len(self.dataset)
|
||||||
self.groups = list(groups.values())
|
self.groups = list(groups.values())
|
||||||
@ -195,6 +221,10 @@ class BatchLoader:
|
|||||||
self.cond_text = [entry.cond_text for entry in data]
|
self.cond_text = [entry.cond_text for entry in data]
|
||||||
self.cond = [entry.cond for entry in data]
|
self.cond = [entry.cond for entry in data]
|
||||||
self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1)
|
self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1)
|
||||||
|
if all(entry.weight is not None for entry in data):
|
||||||
|
self.weight = torch.stack([entry.weight for entry in data]).squeeze(1)
|
||||||
|
else:
|
||||||
|
self.weight = None
|
||||||
#self.emb_index = [entry.emb_index for entry in data]
|
#self.emb_index = [entry.emb_index for entry in data]
|
||||||
#print(self.latent_sample.device)
|
#print(self.latent_sample.device)
|
||||||
|
|
||||||
|
@ -351,7 +351,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat
|
|||||||
assert log_directory, "Log directory is empty"
|
assert log_directory, "Log directory is empty"
|
||||||
|
|
||||||
|
|
||||||
def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
|
def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
|
||||||
save_embedding_every = save_embedding_every or 0
|
save_embedding_every = save_embedding_every or 0
|
||||||
create_image_every = create_image_every or 0
|
create_image_every = create_image_every or 0
|
||||||
template_file = textual_inversion_templates.get(template_filename, None)
|
template_file = textual_inversion_templates.get(template_filename, None)
|
||||||
@ -410,7 +410,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
|
|||||||
|
|
||||||
pin_memory = shared.opts.pin_memory
|
pin_memory = shared.opts.pin_memory
|
||||||
|
|
||||||
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
|
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
|
||||||
|
|
||||||
if shared.opts.save_training_settings_to_txt:
|
if shared.opts.save_training_settings_to_txt:
|
||||||
save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
|
save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
|
||||||
@ -480,6 +480,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
|
|||||||
|
|
||||||
with devices.autocast():
|
with devices.autocast():
|
||||||
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
|
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
|
||||||
|
if use_weight:
|
||||||
|
w = batch.weight.to(devices.device, non_blocking=pin_memory)
|
||||||
c = shared.sd_model.cond_stage_model(batch.cond_text)
|
c = shared.sd_model.cond_stage_model(batch.cond_text)
|
||||||
|
|
||||||
if is_training_inpainting_model:
|
if is_training_inpainting_model:
|
||||||
@ -490,7 +492,11 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
|
|||||||
else:
|
else:
|
||||||
cond = c
|
cond = c
|
||||||
|
|
||||||
loss = shared.sd_model(x, cond)[0] / gradient_step
|
if use_weight:
|
||||||
|
loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step
|
||||||
|
del w
|
||||||
|
else:
|
||||||
|
loss = shared.sd_model.forward(x, cond)[0] / gradient_step
|
||||||
del x
|
del x
|
||||||
|
|
||||||
_loss_step += loss.item()
|
_loss_step += loss.item()
|
||||||
|
@ -1191,6 +1191,8 @@ def create_ui():
|
|||||||
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
|
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
|
||||||
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
|
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
|
||||||
|
|
||||||
|
use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight")
|
||||||
|
|
||||||
save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
|
save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
|
||||||
preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
|
preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
|
||||||
|
|
||||||
@ -1304,6 +1306,7 @@ def create_ui():
|
|||||||
shuffle_tags,
|
shuffle_tags,
|
||||||
tag_drop_out,
|
tag_drop_out,
|
||||||
latent_sampling_method,
|
latent_sampling_method,
|
||||||
|
use_weight,
|
||||||
create_image_every,
|
create_image_every,
|
||||||
save_embedding_every,
|
save_embedding_every,
|
||||||
template_file,
|
template_file,
|
||||||
@ -1337,6 +1340,7 @@ def create_ui():
|
|||||||
shuffle_tags,
|
shuffle_tags,
|
||||||
tag_drop_out,
|
tag_drop_out,
|
||||||
latent_sampling_method,
|
latent_sampling_method,
|
||||||
|
use_weight,
|
||||||
create_image_every,
|
create_image_every,
|
||||||
save_embedding_every,
|
save_embedding_every,
|
||||||
template_file,
|
template_file,
|
||||||
|
Loading…
Reference in New Issue
Block a user