From 261b6790ee1e92d6d84220ef2d990542acc3b8aa Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 12 Feb 2023 07:02:05 -0500 Subject: [PATCH 1/5] Update tool --- README.md | 4 +- tools/crop_images_to_n_buckets.py | 125 +++++++++++++++++++++++++----- 2 files changed, 108 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 0b0d45e..d5cb7b8 100644 --- a/README.md +++ b/README.md @@ -144,10 +144,10 @@ Then redo the installation instruction within the kohya_ss venv. ## Change history * 2023/02/11 (v20.7.2): - - ``lora_interrogator.py`` is added in ``networks`` folder. See ``python networks\lora_interrogator.py -h`` for usage. + - `lora_interrogator.py` is added in `networks` folder. See `python networks\lora_interrogator.py -h` for usage. - For LoRAs where the activation word is unknown, this script compares the output of Text Encoder after applying LoRA to that of unapplied to find out which token is affected by LoRA. Hopefully you can figure out the activation word. LoRA trained with captions does not seem to be able to interrogate. - Batch size can be large (like 64 or 128). - - ``train_textual_inversion.py`` now supports multiple init words. + - `train_textual_inversion.py` now supports multiple init words. - Following feature is reverted to be the same as before. Sorry for confusion: > Now the number of data in each batch is limited to the number of actual images (not duplicated). Because a certain bucket may contain smaller number of actual images, so the batch may contain same (duplicated) images. - Add new tool to sort, group and average crop image in a dataset diff --git a/tools/crop_images_to_n_buckets.py b/tools/crop_images_to_n_buckets.py index 3b0d68a..dff7825 100644 --- a/tools/crop_images_to_n_buckets.py +++ b/tools/crop_images_to_n_buckets.py @@ -7,6 +7,7 @@ import os import cv2 import argparse +import shutil def aspect_ratio(img_path): """Return aspect ratio of an image""" @@ -38,9 +39,22 @@ def average_aspect_ratio(group): """Calculate average aspect ratio for a group""" aspect_ratios = [aspect_ratio for _, aspect_ratio in group] avg_aspect_ratio = sum(aspect_ratios) / len(aspect_ratios) + print(f"Average aspect ratio for group: {avg_aspect_ratio}") return avg_aspect_ratio def center_crop_image(image, target_aspect_ratio): + """Crop the input image to the target aspect ratio. + + The function calculates the crop region for the input image based on its current aspect ratio and the target aspect ratio. + + Args: + image: A numpy array representing the input image. + target_aspect_ratio: A float representing the target aspect ratio. + + Returns: + A numpy array representing the cropped image. + + """ height, width = image.shape[:2] current_aspect_ratio = float(width) / float(height) @@ -58,45 +72,111 @@ def center_crop_image(image, target_aspect_ratio): return cropped_image -def save_cropped_images(group, folder_name, group_number, avg_aspect_ratio): +def copy_related_files(img_path, save_path): + """ + Copy all files in the same directory as the input image that have the same base name as the input image to the + output directory with the corresponding new filename. + :param img_path: Path to the input image. + :param save_path: Path to the output image. + """ + # Get the base filename and directory + img_dir, img_basename = os.path.split(img_path) + img_base, img_ext = os.path.splitext(img_basename) + + save_dir, save_basename = os.path.split(save_path) + save_base, save_ext = os.path.splitext(save_basename) + + # Create the output directory if it does not exist + if not os.path.exists(save_dir): + os.makedirs(save_dir) + + # Loop over all files in the same directory as the input image + try: + for filename in os.listdir(img_dir): + # Skip files with the same name as the input image + if filename == img_basename: + continue + + # Check if the file has the same base name as the input image + file_base, file_ext = os.path.splitext(filename) + if file_base == img_base: + # Build the new filename and copy the file + new_filename = os.path.join(save_dir, f"{save_base}{file_ext}") + shutil.copy2(os.path.join(img_dir, filename), new_filename) + except OSError as e: + print(f"Error: {e}") # Handle errors from os.listdir() + +def save_resized_cropped_images(group, folder_name, group_number, avg_aspect_ratio, use_original_name=False): + """Crop and resize all images in the input group to the smallest resolution, and save them to a folder. + + Args: + group: A list of tuples, where each tuple contains the path to an image and its aspect ratio. + folder_name: A string representing the name of the folder to save the images to. + group_number: An integer representing the group number. + avg_aspect_ratio: A float representing the average aspect ratio of the images in the group. + use_original_name: A boolean indicating whether to save the images with their original file names. + + """ if not os.path.exists(folder_name): os.makedirs(folder_name) # get the smallest size of the images - small_height = 0 - small_width = 0 - smallest_res = 100000000 - for i, image in enumerate(group): - img_path, aspect_ratio = image + smallest_res = float("inf") + for img_path, _ in group: image = cv2.imread(img_path) cropped_image = center_crop_image(image, avg_aspect_ratio) height, width = cropped_image.shape[:2] - if smallest_res > height * width: - small_height = height - small_width = width - smallest_res = height * width + image_res = height * width + if image_res < smallest_res: + smallest_res = image_res + small_height, small_width = height, width # resize all images to the smallest resolution of the images in the group - for i, image in enumerate(group): - img_path, aspect_ratio = image + for i, (img_path, aspect_ratio) in enumerate(group): image = cv2.imread(img_path) cropped_image = center_crop_image(image, avg_aspect_ratio) resized_image = cv2.resize(cropped_image, (small_width, small_height)) - save_path = os.path.join(folder_name, "group_{}_{}.jpg".format(group_number, i)) + if use_original_name: + save_name = os.path.basename(img_path) + else: + save_name = f"group_{group_number}_{i}.jpg" + save_path = os.path.join(folder_name, save_name) cv2.imwrite(save_path, resized_image) + # Copy matching files named the same as img_path to + copy_related_files(img_path, save_path) + + print(f"Saved {save_name} to {folder_name}") + def main(): parser = argparse.ArgumentParser(description='Sort images and crop them based on aspect ratio') - parser.add_argument('--path', type=str, help='Path to the directory containing images', required=True) - parser.add_argument('--dst_path', type=str, help='Path to the directory to save the cropped images', required=True) - parser.add_argument('--batch_size', type=int, help='Size of the batches to create', required=True) + parser.add_argument('input_dir', type=str, help='Path to the directory containing images') + parser.add_argument('output_dir', type=str, help='Path to the directory to save the cropped images') + parser.add_argument('batch_size', type=int, help='Size of the batches to create') + parser.add_argument('--use_original_name', action='store_true', help='Whether to use original file names for the saved images') args = parser.parse_args() - sorted_images = sort_images_by_aspect_ratio(args.path) + print(f"Sorting images by aspect ratio in {args.input_dir}...") + if not os.path.exists(args.input_dir): + print(f"Error: Input directory does not exist: {args.input_dir}") + return + + if not os.path.exists(args.output_dir): + try: + os.makedirs(args.output_dir) + except OSError: + print(f"Error: Failed to create output directory: {args.output_dir}") + return + + sorted_images = sort_images_by_aspect_ratio(args.input_dir) total_images = len(sorted_images) print(f'Total images: {total_images}') + + if args.batch_size <= 0: + print("Error: Batch size must be greater than 0") + return group_size = total_images // args.batch_size @@ -111,11 +191,18 @@ def main(): print('Creating groups...') groups = create_groups(sorted_images, group_size) - + print(f"Created {len(groups)} groups") + print('Saving cropped and resize images...') for i, group in enumerate(groups): avg_aspect_ratio = average_aspect_ratio(group) - save_cropped_images(group, args.dst_path, i+1, avg_aspect_ratio) + print(f"Processing group {i+1} with {len(group)} images...") + try: + save_resized_cropped_images(group, args.output_dir, i+1, avg_aspect_ratio, args.use_original_name) + except Exception as e: + print(f"Error: Failed to save images in group {i+1}: {e}") + + print('Done') if __name__ == '__main__': main() \ No newline at end of file From 6129c7dd40b2d8ef3bcfceaa0dbffee2a86667eb Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 13 Feb 2023 21:20:09 -0500 Subject: [PATCH 2/5] 1st implementation --- requirements.txt | 2 ++ train_network.py | 33 ++++++++++++++++++++++++--------- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/requirements.txt b/requirements.txt index a8bcefb..20c699d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,5 +22,7 @@ fairscale==0.4.13 tensorflow==2.10.1 huggingface-hub==0.12.0 xformers @ https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl +# for dadaptation +dadaptation # for kohya_ss library . \ No newline at end of file diff --git a/train_network.py b/train_network.py index bb3159f..e729793 100644 --- a/train_network.py +++ b/train_network.py @@ -19,6 +19,8 @@ from diffusers import DDPMScheduler import library.train_util as train_util from library.train_util import DreamBoothDataset, FineTuningDataset +import torch.optim as optim +import dadaptation def collate_fn(examples): return examples[0] @@ -212,10 +214,15 @@ def train(args): else: optimizer_class = torch.optim.AdamW - trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr) + # trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr) + trainable_params = network.prepare_optimizer_params(None, None) # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - optimizer = optimizer_class(trainable_params, lr=args.learning_rate) + # optimizer = optimizer_class(trainable_params, lr=args.learning_rate) + print('enable dadatation.') + optimizer = dadaptation.DAdaptAdam(trainable_params, lr=1.0, decouple=True, weight_decay=0) + # optimizer = dadaptation.DAdaptSGD(trainable_params, lr=1.0, weight_decay=0, d0=1e-6) + # optimizer = dadaptation.DAdaptAdaGrad(trainable_params, lr=1.0, weight_decay=0, d0=1e-8,) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -230,10 +237,15 @@ def train(args): # lr schedulerを用意する # lr_scheduler = diffusers.optimization.get_scheduler( - lr_scheduler = get_scheduler_fix( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) + # lr_scheduler = get_scheduler_fix( + # args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, + # num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + # num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) + # override lr_scheduler. + lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, + lr_lambda=[lambda epoch: 0.5, lambda epoch: 1], + last_epoch=-1, + verbose=False) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -448,11 +460,14 @@ def train(args): current_loss = loss.detach().item() loss_total += current_loss avr_loss = loss_total / (step+1) - logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) + # logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + # progress_bar.set_postfix(**logs) + logs_str = f"loss: {avr_loss:.3f}, dlr: {optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']:.2e}" + progress_bar.set_postfix_str(logs_str) if args.logging_dir is not None: logs = generate_step_logs(args, current_loss, avr_loss, lr_scheduler) + logs['lr/d*lr'] = optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr'] accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: @@ -545,4 +560,4 @@ if __name__ == '__main__': help="arbitrary comment string stored in metadata / メタデータに記録する任意のコメント文字列") args = parser.parse_args() - train(args) + train(args) \ No newline at end of file From 655f885cf44cd4993217eda5f60f6a3c13de7b3d Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 16 Feb 2023 19:33:33 -0500 Subject: [PATCH 3/5] Add dadapation to other trainers --- train_textual_inversion.py | 92 +++++++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 32 deletions(-) diff --git a/train_textual_inversion.py b/train_textual_inversion.py index 4aa91ee..118c99f 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -13,34 +13,41 @@ from diffusers import DDPMScheduler import library.train_util as train_util from library.train_util import DreamBoothDataset, FineTuningDataset +import torch.optim as optim +import dadaptation + +# imagenet_templates_small = [ +# "a photo of a {}", +# "a rendering of a {}", +# "a cropped photo of the {}", +# "the photo of a {}", +# "a photo of a clean {}", +# "a photo of a dirty {}", +# "a dark photo of the {}", +# "a photo of my {}", +# "a photo of the cool {}", +# "a close-up photo of a {}", +# "a bright photo of the {}", +# "a cropped photo of a {}", +# "a photo of the {}", +# "a good photo of the {}", +# "a photo of one {}", +# "a close-up photo of the {}", +# "a rendition of the {}", +# "a photo of the clean {}", +# "a rendition of a {}", +# "a photo of a nice {}", +# "a good photo of a {}", +# "a photo of the nice {}", +# "a photo of the small {}", +# "a photo of the weird {}", +# "a photo of the large {}", +# "a photo of a cool {}", +# "a photo of a small {}", +# ] + imagenet_templates_small = [ - "a photo of a {}", - "a rendering of a {}", - "a cropped photo of the {}", - "the photo of a {}", - "a photo of a clean {}", - "a photo of a dirty {}", - "a dark photo of the {}", - "a photo of my {}", - "a photo of the cool {}", - "a close-up photo of a {}", - "a bright photo of the {}", - "a cropped photo of a {}", - "a photo of the {}", - "a good photo of the {}", - "a photo of one {}", - "a close-up photo of the {}", - "a rendition of the {}", - "a photo of the clean {}", - "a rendition of a {}", - "a photo of a nice {}", - "a good photo of a {}", - "a photo of the nice {}", - "a photo of the small {}", - "a photo of the weird {}", - "a photo of the large {}", - "a photo of a cool {}", - "a photo of a small {}", + "{}", ] imagenet_style_templates_small = [ @@ -213,7 +220,12 @@ def train(args): trainable_params = text_encoder.get_input_embeddings().parameters() # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - optimizer = optimizer_class(trainable_params, lr=args.learning_rate) + # optimizer = optimizer_class(trainable_params, lr=args.learning_rate) + print('enable dadapation.') + optimizer = dadaptation.DAdaptAdam(trainable_params, lr=1.0, decouple=True, weight_decay=0) + # optimizer = dadaptation.DAdaptSGD(trainable_params, lr=1.0, weight_decay=0, d0=1e-6) + # optimizer = dadaptation.DAdaptAdaGrad(trainable_params, lr=1.0, weight_decay=0, d0=1e-6) + # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -227,8 +239,20 @@ def train(args): print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する - lr_scheduler = diffusers.optimization.get_scheduler( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps) + # lr_scheduler = diffusers.optimization.get_scheduler( + # args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps) + + # For Adam + lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, + lr_lambda=[lambda epoch: 1], + last_epoch=-1, + verbose=False) + + # For SGD optim + # lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, + # lr_lambda=[lambda epoch: 1], + # last_epoch=-1, + # verbose=False) # acceleratorがなんかよろしくやってくれるらしい text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( @@ -366,12 +390,16 @@ def train(args): current_loss = loss.detach().item() if args.logging_dir is not None: - logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + #logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + + avr_loss = loss_total / (step+1) + logs = {"loss": avr_loss, "dlr0": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} accelerator.log(logs, step=global_step) loss_total += current_loss avr_loss = loss_total / (step+1) - logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + # logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + logs = {"loss": avr_loss, "dlr0": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: From f9863e3950d6e217e33d007c40c599d47c826f7c Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 16 Feb 2023 19:33:46 -0500 Subject: [PATCH 4/5] add dadapation to other trainers --- fine_tune.py | 31 ++++++++++++++++++++++++++----- tools/crop_images_to_n_buckets.py | 2 +- train_db.py | 23 ++++++++++++++++++----- train_network.py | 14 +++++++++++--- 4 files changed, 56 insertions(+), 14 deletions(-) diff --git a/fine_tune.py b/fine_tune.py index 5292153..12fb91f 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -14,6 +14,9 @@ from diffusers import DDPMScheduler import library.train_util as train_util +import torch.optim as optim +import dadaptation + def collate_fn(examples): return examples[0] @@ -162,7 +165,9 @@ def train(args): optimizer_class = torch.optim.AdamW # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate) + # optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate) + print('enable dadatation.') + optimizer = dadaptation.DAdaptAdam(params_to_optimize, lr=1.0, decouple=True, weight_decay=0) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -176,8 +181,20 @@ def train(args): print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する - lr_scheduler = diffusers.optimization.get_scheduler( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps) + # lr_scheduler = diffusers.optimization.get_scheduler( + # args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps) + + # For Adam + # lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, + # lr_lambda=[lambda epoch: 1], + # last_epoch=-1, + # verbose=False) + + # For SGD optim + lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, + lr_lambda=[lambda epoch: 1], + last_epoch=-1, + verbose=True) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -293,12 +310,16 @@ def train(args): current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず if args.logging_dir is not None: - logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + # logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + # accelerator.log(logs, step=global_step) + logs = {"loss": current_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} accelerator.log(logs, step=global_step) loss_total += current_loss avr_loss = loss_total / (step+1) - logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + # logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + # progress_bar.set_postfix(**logs) + logs = {"avg_loss": avr_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} # , "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: diff --git a/tools/crop_images_to_n_buckets.py b/tools/crop_images_to_n_buckets.py index dff7825..688b42b 100644 --- a/tools/crop_images_to_n_buckets.py +++ b/tools/crop_images_to_n_buckets.py @@ -20,7 +20,7 @@ def sort_images_by_aspect_ratio(path): """Sort all images in a folder by aspect ratio""" images = [] for filename in os.listdir(path): - if filename.endswith(".jpg") or filename.endswith(".jpeg") or filename.endswith(".png"): + if filename.endswith(".jpg") or filename.endswith(".jpeg") or filename.endswith(".png") or filename.endswith(".webp"): img_path = os.path.join(path, filename) images.append((img_path, aspect_ratio(img_path))) # sort the list of tuples based on the aspect ratio diff --git a/train_db.py b/train_db.py index c210767..aacfcc8 100644 --- a/train_db.py +++ b/train_db.py @@ -17,6 +17,8 @@ from diffusers import DDPMScheduler import library.train_util as train_util from library.train_util import DreamBoothDataset +import torch.optim as optim +import dadaptation def collate_fn(examples): return examples[0] @@ -133,13 +135,16 @@ def train(args): trainable_params = unet.parameters() # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - optimizer = optimizer_class(trainable_params, lr=args.learning_rate) + # optimizer = optimizer_class(trainable_params, lr=args.learning_rate) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers) + print('enable dadatation.') + optimizer = dadaptation.DAdaptAdam(trainable_params, lr=1.0, decouple=True, weight_decay=0, d0=0.00000001) + # 学習ステップ数を計算する if args.max_train_epochs is not None: @@ -150,8 +155,14 @@ def train(args): args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end # lr schedulerを用意する - lr_scheduler = diffusers.optimization.get_scheduler( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps) + # lr_scheduler = diffusers.optimization.get_scheduler( + # args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps) + + # For Adam + lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, + lr_lambda=[lambda epoch: 1], + last_epoch=-1, + verbose=False) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -288,12 +299,14 @@ def train(args): current_loss = loss.detach().item() if args.logging_dir is not None: - logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + # logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + logs = {"loss": current_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} accelerator.log(logs, step=global_step) loss_total += current_loss avr_loss = loss_total / (step+1) - logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + # logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + logs = {"avg_loss": avr_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} # , "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: diff --git a/train_network.py b/train_network.py index e729793..824c298 100644 --- a/train_network.py +++ b/train_network.py @@ -222,7 +222,7 @@ def train(args): print('enable dadatation.') optimizer = dadaptation.DAdaptAdam(trainable_params, lr=1.0, decouple=True, weight_decay=0) # optimizer = dadaptation.DAdaptSGD(trainable_params, lr=1.0, weight_decay=0, d0=1e-6) - # optimizer = dadaptation.DAdaptAdaGrad(trainable_params, lr=1.0, weight_decay=0, d0=1e-8,) + # optimizer = dadaptation.DAdaptAdaGrad(trainable_params, lr=1.0, weight_decay=0, d0=1e-6) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -242,10 +242,18 @@ def train(args): # num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, # num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # override lr_scheduler. + + # For Adam lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, - lr_lambda=[lambda epoch: 0.5, lambda epoch: 1], + lr_lambda=[lambda epoch: 0.25, lambda epoch: 1], last_epoch=-1, verbose=False) + + # For SGD optim + # lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, + # lr_lambda=[lambda epoch: 1, lambda epoch: 0.5], + # last_epoch=-1, + # verbose=False) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -462,7 +470,7 @@ def train(args): avr_loss = loss_total / (step+1) # logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} # progress_bar.set_postfix(**logs) - logs_str = f"loss: {avr_loss:.3f}, dlr: {optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']:.2e}" + logs_str = f"loss: {avr_loss:.3f}, dlr0: {optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']:.2e}, dlr1: {optimizer.param_groups[1]['d']*optimizer.param_groups[1]['lr']:.2e}" progress_bar.set_postfix_str(logs_str) if args.logging_dir is not None: From 674ed88d13dcb373284ff0050bbefedcd3ed2e29 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 17 Feb 2023 19:18:11 -0500 Subject: [PATCH 5/5] * 2023/02/16 (v20.7.3) - Noise offset is recorded to the metadata. Thanks to space-nuko! - Show the moving average loss to prevent loss jumping in `train_network.py` and `train_db.py`. Thanks to shirayu! --- README.md | 3 ++ train_db.py | 40 +++++++++++------------ train_network.py | 82 +++++++++++++++++++++++++----------------------- 3 files changed, 63 insertions(+), 62 deletions(-) diff --git a/README.md b/README.md index d5cb7b8..4d36042 100644 --- a/README.md +++ b/README.md @@ -143,6 +143,9 @@ Then redo the installation instruction within the kohya_ss venv. ## Change history +* 2023/02/16 (v20.7.3) + - Noise offset is recorded to the metadata. Thanks to space-nuko! + - Show the moving average loss to prevent loss jumping in `train_network.py` and `train_db.py`. Thanks to shirayu! * 2023/02/11 (v20.7.2): - `lora_interrogator.py` is added in `networks` folder. See `python networks\lora_interrogator.py -h` for usage. - For LoRAs where the activation word is unknown, this script compares the output of Text Encoder after applying LoRA to that of unapplied to find out which token is affected by LoRA. Hopefully you can figure out the activation word. LoRA trained with captions does not seem to be able to interrogate. diff --git a/train_db.py b/train_db.py index aacfcc8..e4f1e54 100644 --- a/train_db.py +++ b/train_db.py @@ -17,8 +17,6 @@ from diffusers import DDPMScheduler import library.train_util as train_util from library.train_util import DreamBoothDataset -import torch.optim as optim -import dadaptation def collate_fn(examples): return examples[0] @@ -135,16 +133,13 @@ def train(args): trainable_params = unet.parameters() # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - # optimizer = optimizer_class(trainable_params, lr=args.learning_rate) + optimizer = optimizer_class(trainable_params, lr=args.learning_rate) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers) - print('enable dadatation.') - optimizer = dadaptation.DAdaptAdam(trainable_params, lr=1.0, decouple=True, weight_decay=0, d0=0.00000001) - # 学習ステップ数を計算する if args.max_train_epochs is not None: @@ -155,14 +150,8 @@ def train(args): args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end # lr schedulerを用意する - # lr_scheduler = diffusers.optimization.get_scheduler( - # args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps) - - # For Adam - lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, - lr_lambda=[lambda epoch: 1], - last_epoch=-1, - verbose=False) + lr_scheduler = diffusers.optimization.get_scheduler( + args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -217,6 +206,8 @@ def train(args): if accelerator.is_main_process: accelerator.init_trackers("dreambooth") + loss_list = [] + loss_total = 0.0 for epoch in range(num_train_epochs): print(f"epoch {epoch+1}/{num_train_epochs}") train_dataset.set_current_epoch(epoch + 1) @@ -227,7 +218,6 @@ def train(args): if args.gradient_checkpointing or global_step < args.stop_text_encoder_training: text_encoder.train() - loss_total = 0 for step, batch in enumerate(train_dataloader): # 指定したステップ数でText Encoderの学習を止める if global_step == args.stop_text_encoder_training: @@ -244,10 +234,13 @@ def train(args): else: latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * 0.18215 + b_size = latents.shape[0] # Sample noise that we'll add to the latents noise = torch.randn_like(latents, device=latents.device) - b_size = latents.shape[0] + if args.noise_offset: + # https://www.crosslabs.org//blog/diffusion-with-offset-noise + noise += args.noise_offset * torch.randn((latents.shape[0], latents.shape[1], 1, 1), device=latents.device) # Get the text embedding for conditioning with torch.set_grad_enabled(global_step < args.stop_text_encoder_training): @@ -299,21 +292,24 @@ def train(args): current_loss = loss.detach().item() if args.logging_dir is not None: - # logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} - logs = {"loss": current_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} + logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} accelerator.log(logs, step=global_step) + if epoch == 0: + loss_list.append(current_loss) + else: + loss_total -= loss_list[step] + loss_list[step] = current_loss loss_total += current_loss - avr_loss = loss_total / (step+1) - # logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} - logs = {"avg_loss": avr_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} # , "lr": lr_scheduler.get_last_lr()[0]} + avr_loss = loss_total / len(loss_list) + logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if args.logging_dir is not None: - logs = {"epoch_loss": loss_total / len(train_dataloader)} + logs = {"loss/epoch": loss_total / len(loss_list)} accelerator.log(logs, step=epoch+1) accelerator.wait_for_everyone() diff --git a/train_network.py b/train_network.py index 824c298..5983a7e 100644 --- a/train_network.py +++ b/train_network.py @@ -1,5 +1,7 @@ from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION from torch.optim import Optimizer +from torch.cuda.amp import autocast +from torch.nn.parallel import DistributedDataParallel as DDP from typing import Optional, Union import importlib import argparse @@ -19,8 +21,6 @@ from diffusers import DDPMScheduler import library.train_util as train_util from library.train_util import DreamBoothDataset, FineTuningDataset -import torch.optim as optim -import dadaptation def collate_fn(examples): return examples[0] @@ -156,7 +156,9 @@ def train(args): # モデルを読み込む text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype) - + # unnecessary, but work on low-ram device + text_encoder.to("cuda") + unet.to("cuda") # モデルに xformers とか memory efficient attention を組み込む train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers) @@ -214,15 +216,10 @@ def train(args): else: optimizer_class = torch.optim.AdamW - # trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr) - trainable_params = network.prepare_optimizer_params(None, None) + trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr) # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - # optimizer = optimizer_class(trainable_params, lr=args.learning_rate) - print('enable dadatation.') - optimizer = dadaptation.DAdaptAdam(trainable_params, lr=1.0, decouple=True, weight_decay=0) - # optimizer = dadaptation.DAdaptSGD(trainable_params, lr=1.0, weight_decay=0, d0=1e-6) - # optimizer = dadaptation.DAdaptAdaGrad(trainable_params, lr=1.0, weight_decay=0, d0=1e-6) + optimizer = optimizer_class(trainable_params, lr=args.learning_rate) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -237,23 +234,10 @@ def train(args): # lr schedulerを用意する # lr_scheduler = diffusers.optimization.get_scheduler( - # lr_scheduler = get_scheduler_fix( - # args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, - # num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - # num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) - # override lr_scheduler. - - # For Adam - lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, - lr_lambda=[lambda epoch: 0.25, lambda epoch: 1], - last_epoch=-1, - verbose=False) - - # For SGD optim - # lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, - # lr_lambda=[lambda epoch: 1, lambda epoch: 0.5], - # last_epoch=-1, - # verbose=False) + lr_scheduler = get_scheduler_fix( + args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -278,17 +262,26 @@ def train(args): unet.requires_grad_(False) unet.to(accelerator.device, dtype=weight_dtype) text_encoder.requires_grad_(False) - text_encoder.to(accelerator.device, dtype=weight_dtype) + text_encoder.to(accelerator.device) if args.gradient_checkpointing: # according to TI example in Diffusers, train is required unet.train() text_encoder.train() # set top parameter requires_grad = True for gradient checkpointing works - text_encoder.text_model.embeddings.requires_grad_(True) + if type(text_encoder) == DDP: + text_encoder.module.text_model.embeddings.requires_grad_(True) + else: + text_encoder.text_model.embeddings.requires_grad_(True) else: unet.eval() text_encoder.eval() + # support DistributedDataParallel + if type(text_encoder) == DDP: + text_encoder = text_encoder.module + unet = unet.module + network = network.module + network.prepare_grad_etc(text_encoder, unet) if not cache_latents: @@ -360,11 +353,13 @@ def train(args): "ss_max_bucket_reso": train_dataset.max_bucket_reso, "ss_seed": args.seed, "ss_keep_tokens": args.keep_tokens, + "ss_noise_offset": args.noise_offset, "ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs_info), "ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs_info), "ss_tag_frequency": json.dumps(train_dataset.tag_frequency), "ss_bucket_info": json.dumps(train_dataset.bucket_info), - "ss_training_comment": args.training_comment # will not be updated after training + "ss_training_comment": args.training_comment, # will not be updated after training + "ss_sd_scripts_commit_hash": train_util.get_git_revision_hash() } # uncomment if another network is added @@ -398,6 +393,8 @@ def train(args): if accelerator.is_main_process: accelerator.init_trackers("network_train") + loss_list = [] + loss_total = 0.0 for epoch in range(num_train_epochs): print(f"epoch {epoch+1}/{num_train_epochs}") train_dataset.set_current_epoch(epoch + 1) @@ -406,7 +403,6 @@ def train(args): network.on_epoch_start(text_encoder, unet) - loss_total = 0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(network): with torch.no_grad(): @@ -425,6 +421,9 @@ def train(args): # Sample noise that we'll add to the latents noise = torch.randn_like(latents, device=latents.device) + if args.noise_offset: + # https://www.crosslabs.org//blog/diffusion-with-offset-noise + noise += args.noise_offset * torch.randn((latents.shape[0], latents.shape[1], 1, 1), device=latents.device) # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device) @@ -435,7 +434,8 @@ def train(args): noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Predict the noise residual - noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample + with autocast(): + noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample if args.v_parameterization: # v-parameterization training @@ -466,23 +466,25 @@ def train(args): global_step += 1 current_loss = loss.detach().item() + if epoch == 0: + loss_list.append(current_loss) + else: + loss_total -= loss_list[step] + loss_list[step] = current_loss loss_total += current_loss - avr_loss = loss_total / (step+1) - # logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} - # progress_bar.set_postfix(**logs) - logs_str = f"loss: {avr_loss:.3f}, dlr0: {optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']:.2e}, dlr1: {optimizer.param_groups[1]['d']*optimizer.param_groups[1]['lr']:.2e}" - progress_bar.set_postfix_str(logs_str) + avr_loss = loss_total / len(loss_list) + logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) if args.logging_dir is not None: logs = generate_step_logs(args, current_loss, avr_loss, lr_scheduler) - logs['lr/d*lr'] = optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr'] accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break if args.logging_dir is not None: - logs = {"loss/epoch": loss_total / len(train_dataloader)} + logs = {"loss/epoch": loss_total / len(loss_list)} accelerator.log(logs, step=epoch+1) accelerator.wait_for_everyone() @@ -568,4 +570,4 @@ if __name__ == '__main__': help="arbitrary comment string stored in metadata / メタデータに記録する任意のコメント文字列") args = parser.parse_args() - train(args) \ No newline at end of file + train(args)