Update code to latest sd-script version

This commit is contained in:
bmaltais 2023-02-19 06:50:33 -05:00
parent 641a168e55
commit bb57c1a36e
7 changed files with 82 additions and 11 deletions

View File

@ -64,6 +64,12 @@ cp .\bitsandbytes_windows\main.py .\venv\Lib\site-packages\bitsandbytes\cuda_set
accelerate config accelerate config
``` ```
<!--
pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117
pip install --use-pep517 --upgrade -r requirements.txt
pip install -U -I --no-deps xformers==0.0.16
-->
コマンドプロンプトでは以下になります。 コマンドプロンプトでは以下になります。

View File

@ -143,9 +143,23 @@ Then redo the installation instruction within the kohya_ss venv.
## Change history ## Change history
* 2023/02/19 (v20.7.4):
- Add `--use_lion_optimizer` to each training script to use [Lion optimizer](https://github.com/lucidrains/lion-pytorch).
- Please install Lion optimizer with `pip install lion-pytorch` (it is not in ``requirements.txt`` currently.)
- Add `--lowram` option to `train_network.py`. Load models to VRAM instead of VRAM (for machines which have bigger VRAM than RAM such as Colab and Kaggle). Thanks to Isotr0py!
- Default behavior (without lowram) has reverted to the same as before 14 Feb.
- Fixed git commit hash to be set correctly regardless of the working directory. Thanks to vladmandic!
* 2023/02/15 (v20.7.3): * 2023/02/15 (v20.7.3):
- Update upgrade.ps1 script - Update upgrade.ps1 script
- Integrate new kohya sd-script - Integrate new kohya sd-script
- Noise offset is recorded to the metadata. Thanks to space-nuko!
- Show the moving average loss to prevent loss jumping in `train_network.py` and `train_db.py`. Thanks to shirayu!
- Add support with multi-gpu trainining for `train_network.py`. Thanks to Isotr0py!
- Add `--verbose` option for `resize_lora.py`. For details, see [this PR](https://github.com/kohya-ss/sd-scripts/pull/179). Thanks to mgz-dev!
- Git commit hash is added to the metadata for LoRA. Thanks to space-nuko!
- Add `--noise_offset` option for each training scripts.
- Implementation of https://www.crosslabs.org//blog/diffusion-with-offset-noise
- This option may improve ability to generate darker/lighter images. May work with LoRA.
* 2023/02/11 (v20.7.2): * 2023/02/11 (v20.7.2):
- `lora_interrogator.py` is added in `networks` folder. See `python networks\lora_interrogator.py -h` for usage. - `lora_interrogator.py` is added in `networks` folder. See `python networks\lora_interrogator.py -h` for usage.
- For LoRAs where the activation word is unknown, this script compares the output of Text Encoder after applying LoRA to that of unapplied to find out which token is affected by LoRA. Hopefully you can figure out the activation word. LoRA trained with captions does not seem to be able to interrogate. - For LoRAs where the activation word is unknown, this script compares the output of Text Encoder after applying LoRA to that of unapplied to find out which token is affected by LoRA. Hopefully you can figure out the activation word. LoRA trained with captions does not seem to be able to interrogate.

View File

@ -158,6 +158,13 @@ def train(args):
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
print("use 8-bit Adam optimizer") print("use 8-bit Adam optimizer")
optimizer_class = bnb.optim.AdamW8bit optimizer_class = bnb.optim.AdamW8bit
elif args.use_lion_optimizer:
try:
import lion_pytorch
except ImportError:
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
print("use Lion optimizer")
optimizer_class = lion_pytorch.Lion
else: else:
optimizer_class = torch.optim.AdamW optimizer_class = torch.optim.AdamW

View File

@ -1103,7 +1103,7 @@ def addnet_hash_safetensors(b):
def get_git_revision_hash() -> str: def get_git_revision_hash() -> str:
try: try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip() return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__)).decode('ascii').strip()
except: except:
return "(unknown)" return "(unknown)"
@ -1389,6 +1389,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth:
help="max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長未指定で75、150または225が指定可") help="max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長未指定で75、150または225が指定可")
parser.add_argument("--use_8bit_adam", action="store_true", parser.add_argument("--use_8bit_adam", action="store_true",
help="use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使うbitsandbytesのインストールが必要") help="use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使うbitsandbytesのインストールが必要")
parser.add_argument("--use_lion_optimizer", action="store_true",
help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う lion-pytorch のインストールが必要)")
parser.add_argument("--mem_eff_attn", action="store_true", parser.add_argument("--mem_eff_attn", action="store_true",
help="use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う") help="use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う")
parser.add_argument("--xformers", action="store_true", parser.add_argument("--xformers", action="store_true",
@ -1423,6 +1425,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth:
help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数デフォルト0") help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数デフォルト0")
parser.add_argument("--noise_offset", type=float, default=None, parser.add_argument("--noise_offset", type=float, default=None,
help="enable noise offset with this value (if enabled, around 0.1 is recommended) / Noise offsetを有効にしてこの値を設定する有効にする場合は0.1程度を推奨)") help="enable noise offset with this value (if enabled, around 0.1 is recommended) / Noise offsetを有効にしてこの値を設定する有効にする場合は0.1程度を推奨)")
parser.add_argument("--lowram", action="store_true",
help="enable low RAM optimization. e.g. load models to VRAM instead of RAM (for machines which have bigger VRAM than RAM such as Colab and Kaggle) / メインメモリが少ない環境向け最適化を有効にする。たとえばVRAMにモデルを読み込むなどColabやKaggleなどRAMに比べてVRAMが多い環境向け")
if support_dreambooth: if support_dreambooth:
# DreamBooth training # DreamBooth training

View File

@ -124,6 +124,13 @@ def train(args):
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
print("use 8-bit Adam optimizer") print("use 8-bit Adam optimizer")
optimizer_class = bnb.optim.AdamW8bit optimizer_class = bnb.optim.AdamW8bit
elif args.use_lion_optimizer:
try:
import lion_pytorch
except ImportError:
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
print("use Lion optimizer")
optimizer_class = lion_pytorch.Lion
else: else:
optimizer_class = torch.optim.AdamW optimizer_class = torch.optim.AdamW
@ -206,6 +213,8 @@ def train(args):
if accelerator.is_main_process: if accelerator.is_main_process:
accelerator.init_trackers("dreambooth") accelerator.init_trackers("dreambooth")
loss_list = []
loss_total = 0.0
for epoch in range(num_train_epochs): for epoch in range(num_train_epochs):
print(f"epoch {epoch+1}/{num_train_epochs}") print(f"epoch {epoch+1}/{num_train_epochs}")
train_dataset.set_current_epoch(epoch + 1) train_dataset.set_current_epoch(epoch + 1)
@ -216,7 +225,6 @@ def train(args):
if args.gradient_checkpointing or global_step < args.stop_text_encoder_training: if args.gradient_checkpointing or global_step < args.stop_text_encoder_training:
text_encoder.train() text_encoder.train()
loss_total = 0
for step, batch in enumerate(train_dataloader): for step, batch in enumerate(train_dataloader):
# 指定したステップ数でText Encoderの学習を止める # 指定したステップ数でText Encoderの学習を止める
if global_step == args.stop_text_encoder_training: if global_step == args.stop_text_encoder_training:
@ -294,8 +302,13 @@ def train(args):
logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]}
accelerator.log(logs, step=global_step) accelerator.log(logs, step=global_step)
if epoch == 0:
loss_list.append(current_loss)
else:
loss_total -= loss_list[step]
loss_list[step] = current_loss
loss_total += current_loss loss_total += current_loss
avr_loss = loss_total / (step+1) avr_loss = loss_total / len(loss_list)
logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs) progress_bar.set_postfix(**logs)
@ -303,7 +316,7 @@ def train(args):
break break
if args.logging_dir is not None: if args.logging_dir is not None:
logs = {"epoch_loss": loss_total / len(train_dataloader)} logs = {"loss/epoch": loss_total / len(loss_list)}
accelerator.log(logs, step=epoch+1) accelerator.log(logs, step=epoch+1)
accelerator.wait_for_everyone() accelerator.wait_for_everyone()

View File

@ -156,9 +156,12 @@ def train(args):
# モデルを読み込む # モデルを読み込む
text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype) text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype)
# unnecessary, but work on low-ram device
text_encoder.to("cuda") # work on low-ram device
unet.to("cuda") if args.lowram:
text_encoder.to("cuda")
unet.to("cuda")
# モデルに xformers とか memory efficient attention を組み込む # モデルに xformers とか memory efficient attention を組み込む
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers) train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
@ -213,9 +216,18 @@ def train(args):
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
print("use 8-bit Adam optimizer") print("use 8-bit Adam optimizer")
optimizer_class = bnb.optim.AdamW8bit optimizer_class = bnb.optim.AdamW8bit
elif args.use_lion_optimizer:
try:
import lion_pytorch
except ImportError:
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
print("use Lion optimizer")
optimizer_class = lion_pytorch.Lion
else: else:
optimizer_class = torch.optim.AdamW optimizer_class = torch.optim.AdamW
optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__
trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr) trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr)
# betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略
@ -353,12 +365,14 @@ def train(args):
"ss_max_bucket_reso": train_dataset.max_bucket_reso, "ss_max_bucket_reso": train_dataset.max_bucket_reso,
"ss_seed": args.seed, "ss_seed": args.seed,
"ss_keep_tokens": args.keep_tokens, "ss_keep_tokens": args.keep_tokens,
"ss_noise_offset": args.noise_offset,
"ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs_info), "ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs_info),
"ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs_info), "ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs_info),
"ss_tag_frequency": json.dumps(train_dataset.tag_frequency), "ss_tag_frequency": json.dumps(train_dataset.tag_frequency),
"ss_bucket_info": json.dumps(train_dataset.bucket_info), "ss_bucket_info": json.dumps(train_dataset.bucket_info),
"ss_training_comment": args.training_comment, # will not be updated after training "ss_training_comment": args.training_comment, # will not be updated after training
"ss_sd_scripts_commit_hash": train_util.get_git_revision_hash() "ss_sd_scripts_commit_hash": train_util.get_git_revision_hash(),
"ss_optimizer": optimizer_name
} }
# uncomment if another network is added # uncomment if another network is added
@ -392,6 +406,8 @@ def train(args):
if accelerator.is_main_process: if accelerator.is_main_process:
accelerator.init_trackers("network_train") accelerator.init_trackers("network_train")
loss_list = []
loss_total = 0.0
for epoch in range(num_train_epochs): for epoch in range(num_train_epochs):
print(f"epoch {epoch+1}/{num_train_epochs}") print(f"epoch {epoch+1}/{num_train_epochs}")
train_dataset.set_current_epoch(epoch + 1) train_dataset.set_current_epoch(epoch + 1)
@ -400,7 +416,6 @@ def train(args):
network.on_epoch_start(text_encoder, unet) network.on_epoch_start(text_encoder, unet)
loss_total = 0
for step, batch in enumerate(train_dataloader): for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(network): with accelerator.accumulate(network):
with torch.no_grad(): with torch.no_grad():
@ -464,8 +479,13 @@ def train(args):
global_step += 1 global_step += 1
current_loss = loss.detach().item() current_loss = loss.detach().item()
if epoch == 0:
loss_list.append(current_loss)
else:
loss_total -= loss_list[step]
loss_list[step] = current_loss
loss_total += current_loss loss_total += current_loss
avr_loss = loss_total / (step+1) avr_loss = loss_total / len(loss_list)
logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs) progress_bar.set_postfix(**logs)
@ -477,7 +497,7 @@ def train(args):
break break
if args.logging_dir is not None: if args.logging_dir is not None:
logs = {"loss/epoch": loss_total / len(train_dataloader)} logs = {"loss/epoch": loss_total / len(loss_list)}
accelerator.log(logs, step=epoch+1) accelerator.log(logs, step=epoch+1)
accelerator.wait_for_everyone() accelerator.wait_for_everyone()

View File

@ -207,6 +207,13 @@ def train(args):
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
print("use 8-bit Adam optimizer") print("use 8-bit Adam optimizer")
optimizer_class = bnb.optim.AdamW8bit optimizer_class = bnb.optim.AdamW8bit
elif args.use_lion_optimizer:
try:
import lion_pytorch
except ImportError:
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
print("use Lion optimizer")
optimizer_class = lion_pytorch.Lion
else: else:
optimizer_class = torch.optim.AdamW optimizer_class = torch.optim.AdamW