Merge pull request #189 from bmaltais/LR-Free

v20.7.3
This commit is contained in:
bmaltais 2023-02-17 19:18:39 -05:00 committed by GitHub
commit 48122347a3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 246 additions and 71 deletions

View File

@ -163,11 +163,14 @@ This will store your a backup file with your current locally installed pip packa
## Change History
* 2023/02/16 (v20.7.3)
- Noise offset is recorded to the metadata. Thanks to space-nuko!
- Show the moving average loss to prevent loss jumping in `train_network.py` and `train_db.py`. Thanks to shirayu!
* 2023/02/11 (v20.7.2):
- ``lora_interrogator.py`` is added in ``networks`` folder. See ``python networks\lora_interrogator.py -h`` for usage.
- `lora_interrogator.py` is added in `networks` folder. See `python networks\lora_interrogator.py -h` for usage.
- For LoRAs where the activation word is unknown, this script compares the output of Text Encoder after applying LoRA to that of unapplied to find out which token is affected by LoRA. Hopefully you can figure out the activation word. LoRA trained with captions does not seem to be able to interrogate.
- Batch size can be large (like 64 or 128).
- ``train_textual_inversion.py`` now supports multiple init words.
- `train_textual_inversion.py` now supports multiple init words.
- Following feature is reverted to be the same as before. Sorry for confusion:
> Now the number of data in each batch is limited to the number of actual images (not duplicated). Because a certain bucket may contain smaller number of actual images, so the batch may contain same (duplicated) images.
- Add new tool to sort, group and average crop image in a dataset

View File

@ -14,6 +14,9 @@ from diffusers import DDPMScheduler
import library.train_util as train_util
import torch.optim as optim
import dadaptation
def collate_fn(examples):
return examples[0]
@ -162,7 +165,9 @@ def train(args):
optimizer_class = torch.optim.AdamW
# betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略
optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)
# optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)
print('enable dadatation.')
optimizer = dadaptation.DAdaptAdam(params_to_optimize, lr=1.0, decouple=True, weight_decay=0)
# dataloaderを準備する
# DataLoaderのプロセス数0はメインプロセスになる
@ -176,8 +181,20 @@ def train(args):
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# lr schedulerを用意する
lr_scheduler = diffusers.optimization.get_scheduler(
args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)
# lr_scheduler = diffusers.optimization.get_scheduler(
# args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)
# For Adam
# lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer,
# lr_lambda=[lambda epoch: 1],
# last_epoch=-1,
# verbose=False)
# For SGD optim
lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer,
lr_lambda=[lambda epoch: 1],
last_epoch=-1,
verbose=True)
# 実験的機能勾配も含めたfp16学習を行う モデル全体をfp16にする
if args.full_fp16:
@ -293,12 +310,16 @@ def train(args):
current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず
if args.logging_dir is not None:
logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]}
# logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]}
# accelerator.log(logs, step=global_step)
logs = {"loss": current_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']}
accelerator.log(logs, step=global_step)
loss_total += current_loss
avr_loss = loss_total / (step+1)
logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
# logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
# progress_bar.set_postfix(**logs)
logs = {"avg_loss": avr_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} # , "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
if global_step >= args.max_train_steps:

View File

@ -22,5 +22,7 @@ fairscale==0.4.13
tensorflow==2.10.1
huggingface-hub==0.12.0
xformers @ https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl
# for dadaptation
dadaptation
# for kohya_ss library
.

View File

@ -7,6 +7,7 @@
import os
import cv2
import argparse
import shutil
def aspect_ratio(img_path):
"""Return aspect ratio of an image"""
@ -19,7 +20,7 @@ def sort_images_by_aspect_ratio(path):
"""Sort all images in a folder by aspect ratio"""
images = []
for filename in os.listdir(path):
if filename.endswith(".jpg") or filename.endswith(".jpeg") or filename.endswith(".png"):
if filename.endswith(".jpg") or filename.endswith(".jpeg") or filename.endswith(".png") or filename.endswith(".webp"):
img_path = os.path.join(path, filename)
images.append((img_path, aspect_ratio(img_path)))
# sort the list of tuples based on the aspect ratio
@ -38,9 +39,22 @@ def average_aspect_ratio(group):
"""Calculate average aspect ratio for a group"""
aspect_ratios = [aspect_ratio for _, aspect_ratio in group]
avg_aspect_ratio = sum(aspect_ratios) / len(aspect_ratios)
print(f"Average aspect ratio for group: {avg_aspect_ratio}")
return avg_aspect_ratio
def center_crop_image(image, target_aspect_ratio):
"""Crop the input image to the target aspect ratio.
The function calculates the crop region for the input image based on its current aspect ratio and the target aspect ratio.
Args:
image: A numpy array representing the input image.
target_aspect_ratio: A float representing the target aspect ratio.
Returns:
A numpy array representing the cropped image.
"""
height, width = image.shape[:2]
current_aspect_ratio = float(width) / float(height)
@ -58,45 +72,111 @@ def center_crop_image(image, target_aspect_ratio):
return cropped_image
def save_cropped_images(group, folder_name, group_number, avg_aspect_ratio):
def copy_related_files(img_path, save_path):
"""
Copy all files in the same directory as the input image that have the same base name as the input image to the
output directory with the corresponding new filename.
:param img_path: Path to the input image.
:param save_path: Path to the output image.
"""
# Get the base filename and directory
img_dir, img_basename = os.path.split(img_path)
img_base, img_ext = os.path.splitext(img_basename)
save_dir, save_basename = os.path.split(save_path)
save_base, save_ext = os.path.splitext(save_basename)
# Create the output directory if it does not exist
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Loop over all files in the same directory as the input image
try:
for filename in os.listdir(img_dir):
# Skip files with the same name as the input image
if filename == img_basename:
continue
# Check if the file has the same base name as the input image
file_base, file_ext = os.path.splitext(filename)
if file_base == img_base:
# Build the new filename and copy the file
new_filename = os.path.join(save_dir, f"{save_base}{file_ext}")
shutil.copy2(os.path.join(img_dir, filename), new_filename)
except OSError as e:
print(f"Error: {e}") # Handle errors from os.listdir()
def save_resized_cropped_images(group, folder_name, group_number, avg_aspect_ratio, use_original_name=False):
"""Crop and resize all images in the input group to the smallest resolution, and save them to a folder.
Args:
group: A list of tuples, where each tuple contains the path to an image and its aspect ratio.
folder_name: A string representing the name of the folder to save the images to.
group_number: An integer representing the group number.
avg_aspect_ratio: A float representing the average aspect ratio of the images in the group.
use_original_name: A boolean indicating whether to save the images with their original file names.
"""
if not os.path.exists(folder_name):
os.makedirs(folder_name)
# get the smallest size of the images
small_height = 0
small_width = 0
smallest_res = 100000000
for i, image in enumerate(group):
img_path, aspect_ratio = image
smallest_res = float("inf")
for img_path, _ in group:
image = cv2.imread(img_path)
cropped_image = center_crop_image(image, avg_aspect_ratio)
height, width = cropped_image.shape[:2]
if smallest_res > height * width:
small_height = height
small_width = width
smallest_res = height * width
image_res = height * width
if image_res < smallest_res:
smallest_res = image_res
small_height, small_width = height, width
# resize all images to the smallest resolution of the images in the group
for i, image in enumerate(group):
img_path, aspect_ratio = image
for i, (img_path, aspect_ratio) in enumerate(group):
image = cv2.imread(img_path)
cropped_image = center_crop_image(image, avg_aspect_ratio)
resized_image = cv2.resize(cropped_image, (small_width, small_height))
save_path = os.path.join(folder_name, "group_{}_{}.jpg".format(group_number, i))
if use_original_name:
save_name = os.path.basename(img_path)
else:
save_name = f"group_{group_number}_{i}.jpg"
save_path = os.path.join(folder_name, save_name)
cv2.imwrite(save_path, resized_image)
# Copy matching files named the same as img_path to
copy_related_files(img_path, save_path)
print(f"Saved {save_name} to {folder_name}")
def main():
parser = argparse.ArgumentParser(description='Sort images and crop them based on aspect ratio')
parser.add_argument('--path', type=str, help='Path to the directory containing images', required=True)
parser.add_argument('--dst_path', type=str, help='Path to the directory to save the cropped images', required=True)
parser.add_argument('--batch_size', type=int, help='Size of the batches to create', required=True)
parser.add_argument('input_dir', type=str, help='Path to the directory containing images')
parser.add_argument('output_dir', type=str, help='Path to the directory to save the cropped images')
parser.add_argument('batch_size', type=int, help='Size of the batches to create')
parser.add_argument('--use_original_name', action='store_true', help='Whether to use original file names for the saved images')
args = parser.parse_args()
sorted_images = sort_images_by_aspect_ratio(args.path)
print(f"Sorting images by aspect ratio in {args.input_dir}...")
if not os.path.exists(args.input_dir):
print(f"Error: Input directory does not exist: {args.input_dir}")
return
if not os.path.exists(args.output_dir):
try:
os.makedirs(args.output_dir)
except OSError:
print(f"Error: Failed to create output directory: {args.output_dir}")
return
sorted_images = sort_images_by_aspect_ratio(args.input_dir)
total_images = len(sorted_images)
print(f'Total images: {total_images}')
if args.batch_size <= 0:
print("Error: Batch size must be greater than 0")
return
group_size = total_images // args.batch_size
@ -111,11 +191,18 @@ def main():
print('Creating groups...')
groups = create_groups(sorted_images, group_size)
print(f"Created {len(groups)} groups")
print('Saving cropped and resize images...')
for i, group in enumerate(groups):
avg_aspect_ratio = average_aspect_ratio(group)
save_cropped_images(group, args.dst_path, i+1, avg_aspect_ratio)
print(f"Processing group {i+1} with {len(group)} images...")
try:
save_resized_cropped_images(group, args.output_dir, i+1, avg_aspect_ratio, args.use_original_name)
except Exception as e:
print(f"Error: Failed to save images in group {i+1}: {e}")
print('Done')
if __name__ == '__main__':
main()

View File

@ -206,6 +206,8 @@ def train(args):
if accelerator.is_main_process:
accelerator.init_trackers("dreambooth")
loss_list = []
loss_total = 0.0
for epoch in range(num_train_epochs):
print(f"epoch {epoch+1}/{num_train_epochs}")
train_dataset.set_current_epoch(epoch + 1)
@ -216,7 +218,6 @@ def train(args):
if args.gradient_checkpointing or global_step < args.stop_text_encoder_training:
text_encoder.train()
loss_total = 0
for step, batch in enumerate(train_dataloader):
# 指定したステップ数でText Encoderの学習を止める
if global_step == args.stop_text_encoder_training:
@ -233,10 +234,13 @@ def train(args):
else:
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
b_size = latents.shape[0]
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
b_size = latents.shape[0]
if args.noise_offset:
# https://www.crosslabs.org//blog/diffusion-with-offset-noise
noise += args.noise_offset * torch.randn((latents.shape[0], latents.shape[1], 1, 1), device=latents.device)
# Get the text embedding for conditioning
with torch.set_grad_enabled(global_step < args.stop_text_encoder_training):
@ -291,8 +295,13 @@ def train(args):
logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]}
accelerator.log(logs, step=global_step)
if epoch == 0:
loss_list.append(current_loss)
else:
loss_total -= loss_list[step]
loss_list[step] = current_loss
loss_total += current_loss
avr_loss = loss_total / (step+1)
avr_loss = loss_total / len(loss_list)
logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
@ -300,7 +309,7 @@ def train(args):
break
if args.logging_dir is not None:
logs = {"epoch_loss": loss_total / len(train_dataloader)}
logs = {"loss/epoch": loss_total / len(loss_list)}
accelerator.log(logs, step=epoch+1)
accelerator.wait_for_everyone()

View File

@ -1,5 +1,7 @@
from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION
from torch.optim import Optimizer
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel as DDP
from typing import Optional, Union
import importlib
import argparse
@ -154,7 +156,9 @@ def train(args):
# モデルを読み込む
text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype)
# unnecessary, but work on low-ram device
text_encoder.to("cuda")
unet.to("cuda")
# モデルに xformers とか memory efficient attention を組み込む
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
@ -258,17 +262,26 @@ def train(args):
unet.requires_grad_(False)
unet.to(accelerator.device, dtype=weight_dtype)
text_encoder.requires_grad_(False)
text_encoder.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device)
if args.gradient_checkpointing: # according to TI example in Diffusers, train is required
unet.train()
text_encoder.train()
# set top parameter requires_grad = True for gradient checkpointing works
text_encoder.text_model.embeddings.requires_grad_(True)
if type(text_encoder) == DDP:
text_encoder.module.text_model.embeddings.requires_grad_(True)
else:
text_encoder.text_model.embeddings.requires_grad_(True)
else:
unet.eval()
text_encoder.eval()
# support DistributedDataParallel
if type(text_encoder) == DDP:
text_encoder = text_encoder.module
unet = unet.module
network = network.module
network.prepare_grad_etc(text_encoder, unet)
if not cache_latents:
@ -340,11 +353,13 @@ def train(args):
"ss_max_bucket_reso": train_dataset.max_bucket_reso,
"ss_seed": args.seed,
"ss_keep_tokens": args.keep_tokens,
"ss_noise_offset": args.noise_offset,
"ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs_info),
"ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs_info),
"ss_tag_frequency": json.dumps(train_dataset.tag_frequency),
"ss_bucket_info": json.dumps(train_dataset.bucket_info),
"ss_training_comment": args.training_comment # will not be updated after training
"ss_training_comment": args.training_comment, # will not be updated after training
"ss_sd_scripts_commit_hash": train_util.get_git_revision_hash()
}
# uncomment if another network is added
@ -378,6 +393,8 @@ def train(args):
if accelerator.is_main_process:
accelerator.init_trackers("network_train")
loss_list = []
loss_total = 0.0
for epoch in range(num_train_epochs):
print(f"epoch {epoch+1}/{num_train_epochs}")
train_dataset.set_current_epoch(epoch + 1)
@ -386,7 +403,6 @@ def train(args):
network.on_epoch_start(text_encoder, unet)
loss_total = 0
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(network):
with torch.no_grad():
@ -405,6 +421,9 @@ def train(args):
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
if args.noise_offset:
# https://www.crosslabs.org//blog/diffusion-with-offset-noise
noise += args.noise_offset * torch.randn((latents.shape[0], latents.shape[1], 1, 1), device=latents.device)
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)
@ -415,7 +434,8 @@ def train(args):
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Predict the noise residual
noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
with autocast():
noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
if args.v_parameterization:
# v-parameterization training
@ -446,8 +466,13 @@ def train(args):
global_step += 1
current_loss = loss.detach().item()
if epoch == 0:
loss_list.append(current_loss)
else:
loss_total -= loss_list[step]
loss_list[step] = current_loss
loss_total += current_loss
avr_loss = loss_total / (step+1)
avr_loss = loss_total / len(loss_list)
logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
@ -459,7 +484,7 @@ def train(args):
break
if args.logging_dir is not None:
logs = {"loss/epoch": loss_total / len(train_dataloader)}
logs = {"loss/epoch": loss_total / len(loss_list)}
accelerator.log(logs, step=epoch+1)
accelerator.wait_for_everyone()

View File

@ -13,34 +13,41 @@ from diffusers import DDPMScheduler
import library.train_util as train_util
from library.train_util import DreamBoothDataset, FineTuningDataset
import torch.optim as optim
import dadaptation
# imagenet_templates_small = [
# "a photo of a {}",
# "a rendering of a {}",
# "a cropped photo of the {}",
# "the photo of a {}",
# "a photo of a clean {}",
# "a photo of a dirty {}",
# "a dark photo of the {}",
# "a photo of my {}",
# "a photo of the cool {}",
# "a close-up photo of a {}",
# "a bright photo of the {}",
# "a cropped photo of a {}",
# "a photo of the {}",
# "a good photo of the {}",
# "a photo of one {}",
# "a close-up photo of the {}",
# "a rendition of the {}",
# "a photo of the clean {}",
# "a rendition of a {}",
# "a photo of a nice {}",
# "a good photo of a {}",
# "a photo of the nice {}",
# "a photo of the small {}",
# "a photo of the weird {}",
# "a photo of the large {}",
# "a photo of a cool {}",
# "a photo of a small {}",
# ]
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
"{}",
]
imagenet_style_templates_small = [
@ -213,7 +220,12 @@ def train(args):
trainable_params = text_encoder.get_input_embeddings().parameters()
# betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略
optimizer = optimizer_class(trainable_params, lr=args.learning_rate)
# optimizer = optimizer_class(trainable_params, lr=args.learning_rate)
print('enable dadapation.')
optimizer = dadaptation.DAdaptAdam(trainable_params, lr=1.0, decouple=True, weight_decay=0)
# optimizer = dadaptation.DAdaptSGD(trainable_params, lr=1.0, weight_decay=0, d0=1e-6)
# optimizer = dadaptation.DAdaptAdaGrad(trainable_params, lr=1.0, weight_decay=0, d0=1e-6)
# dataloaderを準備する
# DataLoaderのプロセス数0はメインプロセスになる
@ -227,8 +239,20 @@ def train(args):
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# lr schedulerを用意する
lr_scheduler = diffusers.optimization.get_scheduler(
args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)
# lr_scheduler = diffusers.optimization.get_scheduler(
# args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)
# For Adam
lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer,
lr_lambda=[lambda epoch: 1],
last_epoch=-1,
verbose=False)
# For SGD optim
# lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer,
# lr_lambda=[lambda epoch: 1],
# last_epoch=-1,
# verbose=False)
# acceleratorがなんかよろしくやってくれるらしい
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
@ -366,12 +390,16 @@ def train(args):
current_loss = loss.detach().item()
if args.logging_dir is not None:
logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]}
#logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]}
avr_loss = loss_total / (step+1)
logs = {"loss": avr_loss, "dlr0": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']}
accelerator.log(logs, step=global_step)
loss_total += current_loss
avr_loss = loss_total / (step+1)
logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
# logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
logs = {"loss": avr_loss, "dlr0": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']}
progress_bar.set_postfix(**logs)
if global_step >= args.max_train_steps: