From bb57c1a36e6fefe110ba4f98e12e6a4649a01ea5 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 19 Feb 2023 06:50:33 -0500 Subject: [PATCH] Update code to latest sd-script version --- README-ja.md | 6 ++++++ README.md | 14 ++++++++++++++ fine_tune.py | 7 +++++++ library/train_util.py | 6 +++++- train_db.py | 19 ++++++++++++++++--- train_network.py | 34 +++++++++++++++++++++++++++------- train_textual_inversion.py | 7 +++++++ 7 files changed, 82 insertions(+), 11 deletions(-) diff --git a/README-ja.md b/README-ja.md index adf44d2..064464c 100644 --- a/README-ja.md +++ b/README-ja.md @@ -64,6 +64,12 @@ cp .\bitsandbytes_windows\main.py .\venv\Lib\site-packages\bitsandbytes\cuda_set accelerate config ``` + + コマンドプロンプトでは以下になります。 diff --git a/README.md b/README.md index 6165627..9f98ec5 100644 --- a/README.md +++ b/README.md @@ -143,9 +143,23 @@ Then redo the installation instruction within the kohya_ss venv. ## Change history +* 2023/02/19 (v20.7.4): + - Add `--use_lion_optimizer` to each training script to use [Lion optimizer](https://github.com/lucidrains/lion-pytorch). + - Please install Lion optimizer with `pip install lion-pytorch` (it is not in ``requirements.txt`` currently.) + - Add `--lowram` option to `train_network.py`. Load models to VRAM instead of VRAM (for machines which have bigger VRAM than RAM such as Colab and Kaggle). Thanks to Isotr0py! + - Default behavior (without lowram) has reverted to the same as before 14 Feb. + - Fixed git commit hash to be set correctly regardless of the working directory. Thanks to vladmandic! * 2023/02/15 (v20.7.3): - Update upgrade.ps1 script - Integrate new kohya sd-script + - Noise offset is recorded to the metadata. Thanks to space-nuko! + - Show the moving average loss to prevent loss jumping in `train_network.py` and `train_db.py`. Thanks to shirayu! + - Add support with multi-gpu trainining for `train_network.py`. Thanks to Isotr0py! + - Add `--verbose` option for `resize_lora.py`. For details, see [this PR](https://github.com/kohya-ss/sd-scripts/pull/179). Thanks to mgz-dev! + - Git commit hash is added to the metadata for LoRA. Thanks to space-nuko! + - Add `--noise_offset` option for each training scripts. + - Implementation of https://www.crosslabs.org//blog/diffusion-with-offset-noise + - This option may improve ability to generate darker/lighter images. May work with LoRA. * 2023/02/11 (v20.7.2): - `lora_interrogator.py` is added in `networks` folder. See `python networks\lora_interrogator.py -h` for usage. - For LoRAs where the activation word is unknown, this script compares the output of Text Encoder after applying LoRA to that of unapplied to find out which token is affected by LoRA. Hopefully you can figure out the activation word. LoRA trained with captions does not seem to be able to interrogate. diff --git a/fine_tune.py b/fine_tune.py index 3ba6306..13241bc 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -158,6 +158,13 @@ def train(args): raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") print("use 8-bit Adam optimizer") optimizer_class = bnb.optim.AdamW8bit + elif args.use_lion_optimizer: + try: + import lion_pytorch + except ImportError: + raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") + print("use Lion optimizer") + optimizer_class = lion_pytorch.Lion else: optimizer_class = torch.optim.AdamW diff --git a/library/train_util.py b/library/train_util.py index 415f9b7..63868f9 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1103,7 +1103,7 @@ def addnet_hash_safetensors(b): def get_git_revision_hash() -> str: try: - return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip() + return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__)).decode('ascii').strip() except: return "(unknown)" @@ -1389,6 +1389,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: help="max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)") parser.add_argument("--use_8bit_adam", action="store_true", help="use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)") + parser.add_argument("--use_lion_optimizer", action="store_true", + help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う( lion-pytorch のインストールが必要)") parser.add_argument("--mem_eff_attn", action="store_true", help="use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う") parser.add_argument("--xformers", action="store_true", @@ -1423,6 +1425,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)") parser.add_argument("--noise_offset", type=float, default=None, help="enable noise offset with this value (if enabled, around 0.1 is recommended) / Noise offsetを有効にしてこの値を設定する(有効にする場合は0.1程度を推奨)") + parser.add_argument("--lowram", action="store_true", + help="enable low RAM optimization. e.g. load models to VRAM instead of RAM (for machines which have bigger VRAM than RAM such as Colab and Kaggle) / メインメモリが少ない環境向け最適化を有効にする。たとえばVRAMにモデルを読み込むなど(ColabやKaggleなどRAMに比べてVRAMが多い環境向け)") if support_dreambooth: # DreamBooth training diff --git a/train_db.py b/train_db.py index 4a50dc9..1903c4c 100644 --- a/train_db.py +++ b/train_db.py @@ -124,6 +124,13 @@ def train(args): raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") print("use 8-bit Adam optimizer") optimizer_class = bnb.optim.AdamW8bit + elif args.use_lion_optimizer: + try: + import lion_pytorch + except ImportError: + raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") + print("use Lion optimizer") + optimizer_class = lion_pytorch.Lion else: optimizer_class = torch.optim.AdamW @@ -206,6 +213,8 @@ def train(args): if accelerator.is_main_process: accelerator.init_trackers("dreambooth") + loss_list = [] + loss_total = 0.0 for epoch in range(num_train_epochs): print(f"epoch {epoch+1}/{num_train_epochs}") train_dataset.set_current_epoch(epoch + 1) @@ -216,7 +225,6 @@ def train(args): if args.gradient_checkpointing or global_step < args.stop_text_encoder_training: text_encoder.train() - loss_total = 0 for step, batch in enumerate(train_dataloader): # 指定したステップ数でText Encoderの学習を止める if global_step == args.stop_text_encoder_training: @@ -294,8 +302,13 @@ def train(args): logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} accelerator.log(logs, step=global_step) + if epoch == 0: + loss_list.append(current_loss) + else: + loss_total -= loss_list[step] + loss_list[step] = current_loss loss_total += current_loss - avr_loss = loss_total / (step+1) + avr_loss = loss_total / len(loss_list) logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) @@ -303,7 +316,7 @@ def train(args): break if args.logging_dir is not None: - logs = {"epoch_loss": loss_total / len(train_dataloader)} + logs = {"loss/epoch": loss_total / len(loss_list)} accelerator.log(logs, step=epoch+1) accelerator.wait_for_everyone() diff --git a/train_network.py b/train_network.py index 1b8046d..1489691 100644 --- a/train_network.py +++ b/train_network.py @@ -156,9 +156,12 @@ def train(args): # モデルを読み込む text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype) - # unnecessary, but work on low-ram device - text_encoder.to("cuda") - unet.to("cuda") + + # work on low-ram device + if args.lowram: + text_encoder.to("cuda") + unet.to("cuda") + # モデルに xformers とか memory efficient attention を組み込む train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers) @@ -213,9 +216,18 @@ def train(args): raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") print("use 8-bit Adam optimizer") optimizer_class = bnb.optim.AdamW8bit + elif args.use_lion_optimizer: + try: + import lion_pytorch + except ImportError: + raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") + print("use Lion optimizer") + optimizer_class = lion_pytorch.Lion else: optimizer_class = torch.optim.AdamW + optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__ + trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr) # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 @@ -353,12 +365,14 @@ def train(args): "ss_max_bucket_reso": train_dataset.max_bucket_reso, "ss_seed": args.seed, "ss_keep_tokens": args.keep_tokens, + "ss_noise_offset": args.noise_offset, "ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs_info), "ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs_info), "ss_tag_frequency": json.dumps(train_dataset.tag_frequency), "ss_bucket_info": json.dumps(train_dataset.bucket_info), "ss_training_comment": args.training_comment, # will not be updated after training - "ss_sd_scripts_commit_hash": train_util.get_git_revision_hash() + "ss_sd_scripts_commit_hash": train_util.get_git_revision_hash(), + "ss_optimizer": optimizer_name } # uncomment if another network is added @@ -392,6 +406,8 @@ def train(args): if accelerator.is_main_process: accelerator.init_trackers("network_train") + loss_list = [] + loss_total = 0.0 for epoch in range(num_train_epochs): print(f"epoch {epoch+1}/{num_train_epochs}") train_dataset.set_current_epoch(epoch + 1) @@ -400,7 +416,6 @@ def train(args): network.on_epoch_start(text_encoder, unet) - loss_total = 0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(network): with torch.no_grad(): @@ -464,8 +479,13 @@ def train(args): global_step += 1 current_loss = loss.detach().item() + if epoch == 0: + loss_list.append(current_loss) + else: + loss_total -= loss_list[step] + loss_list[step] = current_loss loss_total += current_loss - avr_loss = loss_total / (step+1) + avr_loss = loss_total / len(loss_list) logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) @@ -477,7 +497,7 @@ def train(args): break if args.logging_dir is not None: - logs = {"loss/epoch": loss_total / len(train_dataloader)} + logs = {"loss/epoch": loss_total / len(loss_list)} accelerator.log(logs, step=epoch+1) accelerator.wait_for_everyone() diff --git a/train_textual_inversion.py b/train_textual_inversion.py index 010bd04..ffec051 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -207,6 +207,13 @@ def train(args): raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") print("use 8-bit Adam optimizer") optimizer_class = bnb.optim.AdamW8bit + elif args.use_lion_optimizer: + try: + import lion_pytorch + except ImportError: + raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") + print("use Lion optimizer") + optimizer_class = lion_pytorch.Lion else: optimizer_class = torch.optim.AdamW