commit
49bfd3a618
14
README.md
14
README.md
@ -163,6 +163,20 @@ This will store your a backup file with your current locally installed pip packa
|
||||
|
||||
## Change History
|
||||
|
||||
* 2023/02/22 (v20.8.0):
|
||||
- Add gui support for optimizers: `AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation, AdaFactor`
|
||||
- Add gui support for `--noise_offset`
|
||||
- Refactor optmizer options. Thanks to mgz-dev!
|
||||
- Add `--optimizer_type` option for each training script. Please see help. Japanese documentation is [here](https://github-com.translate.goog/kohya-ss/sd-scripts/blob/main/train_network_README-ja.md?_x_tr_sl=fr&_x_tr_tl=en&_x_tr_hl=en-US&_x_tr_pto=wapp#%E3%82%AA%E3%83%97%E3%83%86%E3%82%A3%E3%83%9E%E3%82%A4%E3%82%B6%E3%81%AE%E6%8C%87%E5%AE%9A%E3%81%AB%E3%81%A4%E3%81%84%E3%81%A6).
|
||||
- `--use_8bit_adam` and `--use_lion_optimizer` options also work and will override the options above for backward compatibility.
|
||||
- Add SGDNesterov and its 8bit.
|
||||
- Add [D-Adaptation](https://github.com/facebookresearch/dadaptation) optimizer. Thanks to BootsofLagrangian and all!
|
||||
- Please install D-Adaptation optimizer with `pip install dadaptation` (it is not in requirements.txt currently.)
|
||||
- Please see https://github.com/kohya-ss/sd-scripts/issues/181 for details.
|
||||
- Add AdaFactor optimizer. Thanks to Toshiaki!
|
||||
- Extra lr scheduler settings (num_cycles etc.) are working in training scripts other than `train_network.py`.
|
||||
- Add `--max_grad_norm` option for each training script for gradient clipping. `0.0` disables clipping.
|
||||
- Symbolic link can be loaded in each training script. Thanks to TkskKurumi!
|
||||
* 2023/02/19 (v20.7.4):
|
||||
- Add `--use_lion_optimizer` to each training script to use [Lion optimizer](https://github.com/lucidrains/lion-pytorch).
|
||||
- Please install Lion optimizer with `pip install lion-pytorch` (it is not in ``requirements.txt`` currently.)
|
||||
|
@ -90,6 +90,7 @@ def save_configuration(
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,noise_offset,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
@ -122,6 +123,13 @@ def save_configuration(
|
||||
]
|
||||
}
|
||||
|
||||
# Extract the destination directory from the file path
|
||||
destination_directory = os.path.dirname(file_path)
|
||||
|
||||
# Create the destination directory if it doesn't exist
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
# Save the data to the selected file
|
||||
with open(file_path, 'w') as file:
|
||||
json.dump(variables, file, indent=2)
|
||||
@ -181,6 +189,7 @@ def open_configuration(
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,noise_offset,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
@ -256,6 +265,7 @@ def train_model(
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,noise_offset,
|
||||
):
|
||||
if pretrained_model_name_or_path == '':
|
||||
msgbox('Source model information is missing')
|
||||
@ -400,7 +410,8 @@ def train_model(
|
||||
seed=seed,
|
||||
caption_extension=caption_extension,
|
||||
cache_latents=cache_latents,
|
||||
optimizer=optimizer
|
||||
optimizer=optimizer,
|
||||
optimizer_args=optimizer_args,
|
||||
)
|
||||
|
||||
run_cmd += run_cmd_advanced_training(
|
||||
@ -425,6 +436,7 @@ def train_model(
|
||||
bucket_reso_steps=bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs=caption_dropout_every_n_epochs,
|
||||
caption_dropout_rate=caption_dropout_rate,
|
||||
noise_offset=noise_offset,
|
||||
)
|
||||
|
||||
print(run_cmd)
|
||||
@ -545,7 +557,7 @@ def dreambooth_tab(
|
||||
seed,
|
||||
caption_extension,
|
||||
cache_latents,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,
|
||||
) = gradio_training(
|
||||
learning_rate_value='1e-5',
|
||||
lr_scheduler_value='cosine',
|
||||
@ -603,7 +615,7 @@ def dreambooth_tab(
|
||||
bucket_no_upscale,
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,noise_offset,
|
||||
) = gradio_advanced_training()
|
||||
color_aug.change(
|
||||
color_aug_changed,
|
||||
@ -673,7 +685,7 @@ def dreambooth_tab(
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,noise_offset,
|
||||
]
|
||||
|
||||
button_open_config.click(
|
||||
|
62
fine_tune.py
62
fine_tune.py
@ -14,9 +14,6 @@ from diffusers import DDPMScheduler
|
||||
|
||||
import library.train_util as train_util
|
||||
|
||||
import torch.optim as optim
|
||||
import dadaptation
|
||||
|
||||
|
||||
def collate_fn(examples):
|
||||
return examples[0]
|
||||
@ -152,29 +149,7 @@ def train(args):
|
||||
|
||||
# 学習に必要なクラスを準備する
|
||||
print("prepare optimizer, data loader etc.")
|
||||
|
||||
# 8-bit Adamを使う
|
||||
if args.use_8bit_adam:
|
||||
try:
|
||||
import bitsandbytes as bnb
|
||||
except ImportError:
|
||||
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
|
||||
print("use 8-bit Adam optimizer")
|
||||
optimizer_class = bnb.optim.AdamW8bit
|
||||
elif args.use_lion_optimizer:
|
||||
try:
|
||||
import lion_pytorch
|
||||
except ImportError:
|
||||
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
|
||||
print("use Lion optimizer")
|
||||
optimizer_class = lion_pytorch.Lion
|
||||
else:
|
||||
optimizer_class = torch.optim.AdamW
|
||||
|
||||
# betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略
|
||||
# optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)
|
||||
print('enable dadatation.')
|
||||
optimizer = dadaptation.DAdaptAdam(params_to_optimize, lr=1.0, decouple=True, weight_decay=0)
|
||||
_, _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize)
|
||||
|
||||
# dataloaderを準備する
|
||||
# DataLoaderのプロセス数:0はメインプロセスになる
|
||||
@ -188,20 +163,9 @@ def train(args):
|
||||
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
|
||||
|
||||
# lr schedulerを用意する
|
||||
# lr_scheduler = diffusers.optimization.get_scheduler(
|
||||
# args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)
|
||||
|
||||
# For Adam
|
||||
# lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer,
|
||||
# lr_lambda=[lambda epoch: 1],
|
||||
# last_epoch=-1,
|
||||
# verbose=False)
|
||||
|
||||
# For SGD optim
|
||||
lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer,
|
||||
lr_lambda=[lambda epoch: 1],
|
||||
last_epoch=-1,
|
||||
verbose=True)
|
||||
lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps,
|
||||
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
|
||||
num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power)
|
||||
|
||||
# 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする
|
||||
if args.full_fp16:
|
||||
@ -303,11 +267,11 @@ def train(args):
|
||||
loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="mean")
|
||||
|
||||
accelerator.backward(loss)
|
||||
if accelerator.sync_gradients:
|
||||
if accelerator.sync_gradients and args.max_grad_norm != 0.0:
|
||||
params_to_clip = []
|
||||
for m in training_models:
|
||||
params_to_clip.extend(m.parameters())
|
||||
accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)
|
||||
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
||||
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
@ -320,23 +284,22 @@ def train(args):
|
||||
|
||||
current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず
|
||||
if args.logging_dir is not None:
|
||||
# logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]}
|
||||
# accelerator.log(logs, step=global_step)
|
||||
logs = {"loss": current_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']}
|
||||
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
||||
if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value
|
||||
logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr']
|
||||
accelerator.log(logs, step=global_step)
|
||||
|
||||
# TODO moving averageにする
|
||||
loss_total += current_loss
|
||||
avr_loss = loss_total / (step+1)
|
||||
# logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
|
||||
# progress_bar.set_postfix(**logs)
|
||||
logs = {"avg_loss": avr_loss, "dlr": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']} # , "lr": lr_scheduler.get_last_lr()[0]}
|
||||
logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
|
||||
progress_bar.set_postfix(**logs)
|
||||
|
||||
if global_step >= args.max_train_steps:
|
||||
break
|
||||
|
||||
if args.logging_dir is not None:
|
||||
logs = {"epoch_loss": loss_total / len(train_dataloader)}
|
||||
logs = {"loss/epoch": loss_total / len(train_dataloader)}
|
||||
accelerator.log(logs, step=epoch+1)
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
@ -372,6 +335,7 @@ if __name__ == '__main__':
|
||||
train_util.add_dataset_arguments(parser, False, True, True)
|
||||
train_util.add_training_arguments(parser, False)
|
||||
train_util.add_sd_saving_arguments(parser)
|
||||
train_util.add_optimizer_arguments(parser)
|
||||
|
||||
parser.add_argument("--diffusers_xformers", action='store_true',
|
||||
help='use xformers by diffusers / Diffusersでxformersを使用する')
|
||||
|
@ -85,7 +85,7 @@ def save_configuration(
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,noise_offset,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
@ -104,8 +104,8 @@ def save_configuration(
|
||||
|
||||
# print(file_path)
|
||||
|
||||
if file_path == None:
|
||||
return original_file_path
|
||||
if file_path == None or file_path == '':
|
||||
return original_file_path # In case a file_path was provided and the user decide to cancel the open action
|
||||
|
||||
# Return the values of the variables as a dictionary
|
||||
variables = {
|
||||
@ -118,6 +118,13 @@ def save_configuration(
|
||||
]
|
||||
}
|
||||
|
||||
# Extract the destination directory from the file path
|
||||
destination_directory = os.path.dirname(file_path)
|
||||
|
||||
# Create the destination directory if it doesn't exist
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
# Save the data to the selected file
|
||||
with open(file_path, 'w') as file:
|
||||
json.dump(variables, file, indent=2)
|
||||
@ -182,7 +189,7 @@ def open_config_file(
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,noise_offset,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
@ -264,7 +271,7 @@ def train_model(
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,noise_offset,
|
||||
):
|
||||
# create caption json file
|
||||
if generate_caption_database:
|
||||
@ -390,6 +397,7 @@ def train_model(
|
||||
caption_extension=caption_extension,
|
||||
cache_latents=cache_latents,
|
||||
optimizer=optimizer,
|
||||
optimizer_args=optimizer_args,
|
||||
)
|
||||
|
||||
run_cmd += run_cmd_advanced_training(
|
||||
@ -414,6 +422,7 @@ def train_model(
|
||||
bucket_reso_steps=bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs=caption_dropout_every_n_epochs,
|
||||
caption_dropout_rate=caption_dropout_rate,
|
||||
noise_offset=noise_offset,
|
||||
)
|
||||
|
||||
print(run_cmd)
|
||||
@ -568,7 +577,7 @@ def finetune_tab():
|
||||
seed,
|
||||
caption_extension,
|
||||
cache_latents,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,
|
||||
) = gradio_training(learning_rate_value='1e-5')
|
||||
with gr.Row():
|
||||
dataset_repeats = gr.Textbox(label='Dataset repeats', value=40)
|
||||
@ -600,7 +609,7 @@ def finetune_tab():
|
||||
bucket_no_upscale,
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,noise_offset,
|
||||
) = gradio_advanced_training()
|
||||
color_aug.change(
|
||||
color_aug_changed,
|
||||
@ -666,7 +675,7 @@ def finetune_tab():
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,noise_offset,
|
||||
]
|
||||
|
||||
button_run.click(train_model, inputs=settings_list)
|
||||
|
@ -469,11 +469,20 @@ def gradio_training(
|
||||
label='Optimizer',
|
||||
choices=[
|
||||
'AdamW',
|
||||
'AdamW8bit',
|
||||
'Adafactor',
|
||||
'DAdaptation',
|
||||
'Lion',
|
||||
'SGDNesterov',
|
||||
'SGDNesterov8bit'
|
||||
],
|
||||
value="AdamW",
|
||||
interactive=True,
|
||||
)
|
||||
with gr.Row():
|
||||
optimizer_args = gr.Textbox(
|
||||
label='Optimizer extra arguments', placeholder='(Optional) eg: relative_step=True scale_parameter=True warmup_init=True'
|
||||
)
|
||||
return (
|
||||
learning_rate,
|
||||
lr_scheduler,
|
||||
@ -488,6 +497,7 @@ def gradio_training(
|
||||
caption_extension,
|
||||
cache_latents,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
)
|
||||
|
||||
|
||||
@ -522,7 +532,9 @@ def run_cmd_training(**kwargs):
|
||||
if kwargs.get('caption_extension')
|
||||
else '',
|
||||
' --cache_latents' if kwargs.get('cache_latents') else '',
|
||||
' --use_lion_optimizer' if kwargs.get('optimizer') == 'Lion' else '',
|
||||
# ' --use_lion_optimizer' if kwargs.get('optimizer') == 'Lion' else '',
|
||||
f' --optimizer_type="{kwargs.get("optimizer", "AdamW")}"',
|
||||
f' --optimizer_args {kwargs.get("optimizer_args", "")}' if not kwargs.get('optimizer_args') == '' else '',
|
||||
]
|
||||
run_cmd = ''.join(options)
|
||||
return run_cmd
|
||||
@ -597,6 +609,10 @@ def gradio_advanced_training():
|
||||
random_crop = gr.Checkbox(
|
||||
label='Random crop instead of center crop', value=False
|
||||
)
|
||||
noise_offset = gr.Textbox(
|
||||
label='Noise offset (0 - 1)', placeholder='(Oprional) eg: 0.1'
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
caption_dropout_every_n_epochs = gr.Number(
|
||||
label="Dropout caption every n epochs",
|
||||
@ -644,7 +660,7 @@ def gradio_advanced_training():
|
||||
bucket_no_upscale,
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,noise_offset,
|
||||
)
|
||||
|
||||
|
||||
@ -695,6 +711,9 @@ def run_cmd_advanced_training(**kwargs):
|
||||
else '',
|
||||
' --bucket_no_upscale' if kwargs.get('bucket_no_upscale') else '',
|
||||
' --random_crop' if kwargs.get('random_crop') else '',
|
||||
f' --noise_offset={float(kwargs.get("noise_offset", 0))}'
|
||||
if not kwargs.get('noise_offset', '') == ''
|
||||
else '',
|
||||
]
|
||||
run_cmd = ''.join(options)
|
||||
return run_cmd
|
||||
|
@ -1,10 +1,12 @@
|
||||
# common functions for training
|
||||
|
||||
import argparse
|
||||
import importlib
|
||||
import json
|
||||
import shutil
|
||||
import time
|
||||
from typing import Dict, List, NamedTuple, Tuple
|
||||
from typing import Optional, Union
|
||||
from accelerate import Accelerator
|
||||
from torch.autograd.function import Function
|
||||
import glob
|
||||
@ -17,9 +19,12 @@ from io import BytesIO
|
||||
|
||||
from tqdm import tqdm
|
||||
import torch
|
||||
from torch.optim import Optimizer
|
||||
from torchvision import transforms
|
||||
from transformers import CLIPTokenizer
|
||||
import transformers
|
||||
import diffusers
|
||||
from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION
|
||||
from diffusers import DDPMScheduler, StableDiffusionPipeline
|
||||
import albumentations as albu
|
||||
import numpy as np
|
||||
@ -1366,6 +1371,33 @@ def add_sd_models_arguments(parser: argparse.ArgumentParser):
|
||||
help="pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル")
|
||||
|
||||
|
||||
def add_optimizer_arguments(parser: argparse.ArgumentParser):
|
||||
parser.add_argument("--optimizer_type", type=str, default="AdamW",
|
||||
help="Optimizer to use / オプティマイザの種類: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation, AdaFactor")
|
||||
|
||||
# backward compatibility
|
||||
parser.add_argument("--use_8bit_adam", action="store_true",
|
||||
help="use 8bit AdamW optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)")
|
||||
parser.add_argument("--use_lion_optimizer", action="store_true",
|
||||
help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う( lion-pytorch のインストールが必要)")
|
||||
|
||||
parser.add_argument("--learning_rate", type=float, default=2.0e-6, help="learning rate / 学習率")
|
||||
parser.add_argument("--max_grad_norm", default=1.0, type=float,
|
||||
help="Max gradient norm, 0 for no clipping / 勾配正規化の最大norm、0でclippingを行わない")
|
||||
|
||||
parser.add_argument("--optimizer_args", type=str, default=None, nargs='*',
|
||||
help="additional arguments for optimizer (like \"weight_decay=0.01 betas=0.9,0.999 ...\") / オプティマイザの追加引数(例: \"weight_decay=0.01 betas=0.9,0.999 ...\")")
|
||||
|
||||
parser.add_argument("--lr_scheduler", type=str, default="constant",
|
||||
help="scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup, adafactor")
|
||||
parser.add_argument("--lr_warmup_steps", type=int, default=0,
|
||||
help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)")
|
||||
parser.add_argument("--lr_scheduler_num_cycles", type=int, default=1,
|
||||
help="Number of restarts for cosine scheduler with restarts / cosine with restartsスケジューラでのリスタート回数")
|
||||
parser.add_argument("--lr_scheduler_power", type=float, default=1,
|
||||
help="Polynomial power for polynomial scheduler / polynomialスケジューラでのpolynomial power")
|
||||
|
||||
|
||||
def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: bool):
|
||||
parser.add_argument("--output_dir", type=str, default=None,
|
||||
help="directory to output trained model / 学習後のモデル出力先ディレクトリ")
|
||||
@ -1387,10 +1419,6 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth:
|
||||
parser.add_argument("--train_batch_size", type=int, default=1, help="batch size for training / 学習時のバッチサイズ")
|
||||
parser.add_argument("--max_token_length", type=int, default=None, choices=[None, 150, 225],
|
||||
help="max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)")
|
||||
parser.add_argument("--use_8bit_adam", action="store_true",
|
||||
help="use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)")
|
||||
parser.add_argument("--use_lion_optimizer", action="store_true",
|
||||
help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う( lion-pytorch のインストールが必要)")
|
||||
parser.add_argument("--mem_eff_attn", action="store_true",
|
||||
help="use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う")
|
||||
parser.add_argument("--xformers", action="store_true",
|
||||
@ -1398,7 +1426,6 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth:
|
||||
parser.add_argument("--vae", type=str, default=None,
|
||||
help="path to checkpoint of vae to replace / VAEを入れ替える場合、VAEのcheckpointファイルまたはディレクトリ")
|
||||
|
||||
parser.add_argument("--learning_rate", type=float, default=2.0e-6, help="learning rate / 学習率")
|
||||
parser.add_argument("--max_train_steps", type=int, default=1600, help="training steps / 学習ステップ数")
|
||||
parser.add_argument("--max_train_epochs", type=int, default=None,
|
||||
help="training epochs (overrides max_train_steps) / 学習エポック数(max_train_stepsを上書きします)")
|
||||
@ -1419,10 +1446,6 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth:
|
||||
parser.add_argument("--logging_dir", type=str, default=None,
|
||||
help="enable logging and output TensorBoard log to this directory / ログ出力を有効にしてこのディレクトリにTensorBoard用のログを出力する")
|
||||
parser.add_argument("--log_prefix", type=str, default=None, help="add prefix for each log directory / ログディレクトリ名の先頭に追加する文字列")
|
||||
parser.add_argument("--lr_scheduler", type=str, default="constant",
|
||||
help="scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup")
|
||||
parser.add_argument("--lr_warmup_steps", type=int, default=0,
|
||||
help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)")
|
||||
parser.add_argument("--noise_offset", type=float, default=None,
|
||||
help="enable noise offset with this value (if enabled, around 0.1 is recommended) / Noise offsetを有効にしてこの値を設定する(有効にする場合は0.1程度を推奨)")
|
||||
parser.add_argument("--lowram", action="store_true",
|
||||
@ -1504,6 +1527,238 @@ def add_sd_saving_arguments(parser: argparse.ArgumentParser):
|
||||
# region utils
|
||||
|
||||
|
||||
def get_optimizer(args, trainable_params):
|
||||
# "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation, Adafactor"
|
||||
|
||||
optimizer_type = args.optimizer_type
|
||||
if args.use_8bit_adam:
|
||||
print(f"*** use_8bit_adam option is specified. optimizer_type is ignored / use_8bit_adamオプションが指定されているためoptimizer_typeは無視されます")
|
||||
optimizer_type = "AdamW8bit"
|
||||
elif args.use_lion_optimizer:
|
||||
print(f"*** use_lion_optimizer option is specified. optimizer_type is ignored / use_lion_optimizerオプションが指定されているためoptimizer_typeは無視されます")
|
||||
optimizer_type = "Lion"
|
||||
optimizer_type = optimizer_type.lower()
|
||||
|
||||
# 引数を分解する:boolとfloat、tupleのみ対応
|
||||
optimizer_kwargs = {}
|
||||
if args.optimizer_args is not None and len(args.optimizer_args) > 0:
|
||||
for arg in args.optimizer_args:
|
||||
key, value = arg.split('=')
|
||||
|
||||
value = value.split(",")
|
||||
for i in range(len(value)):
|
||||
if value[i].lower() == "true" or value[i].lower() == "false":
|
||||
value[i] = (value[i].lower() == "true")
|
||||
else:
|
||||
value[i] = float(value[i])
|
||||
if len(value) == 1:
|
||||
value = value[0]
|
||||
else:
|
||||
value = tuple(value)
|
||||
|
||||
optimizer_kwargs[key] = value
|
||||
print("optkwargs:", optimizer_kwargs)
|
||||
|
||||
lr = args.learning_rate
|
||||
|
||||
if optimizer_type == "AdamW8bit".lower():
|
||||
try:
|
||||
import bitsandbytes as bnb
|
||||
except ImportError:
|
||||
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
|
||||
print(f"use 8-bit AdamW optimizer | {optimizer_kwargs}")
|
||||
optimizer_class = bnb.optim.AdamW8bit
|
||||
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||
|
||||
elif optimizer_type == "SGDNesterov8bit".lower():
|
||||
try:
|
||||
import bitsandbytes as bnb
|
||||
except ImportError:
|
||||
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
|
||||
print(f"use 8-bit SGD with Nesterov optimizer | {optimizer_kwargs}")
|
||||
if "momentum" not in optimizer_kwargs:
|
||||
print(f"8-bit SGD with Nesterov must be with momentum, set momentum to 0.9 / 8-bit SGD with Nesterovはmomentum指定が必須のため0.9に設定します")
|
||||
optimizer_kwargs["momentum"] = 0.9
|
||||
|
||||
optimizer_class = bnb.optim.SGD8bit
|
||||
optimizer = optimizer_class(trainable_params, lr=lr, nesterov=True, **optimizer_kwargs)
|
||||
|
||||
elif optimizer_type == "Lion".lower():
|
||||
try:
|
||||
import lion_pytorch
|
||||
except ImportError:
|
||||
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
|
||||
print(f"use Lion optimizer | {optimizer_kwargs}")
|
||||
optimizer_class = lion_pytorch.Lion
|
||||
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||
|
||||
elif optimizer_type == "SGDNesterov".lower():
|
||||
print(f"use SGD with Nesterov optimizer | {optimizer_kwargs}")
|
||||
if "momentum" not in optimizer_kwargs:
|
||||
print(f"SGD with Nesterov must be with momentum, set momentum to 0.9 / SGD with Nesterovはmomentum指定が必須のため0.9に設定します")
|
||||
optimizer_kwargs["momentum"] = 0.9
|
||||
|
||||
optimizer_class = torch.optim.SGD
|
||||
optimizer = optimizer_class(trainable_params, lr=lr, nesterov=True, **optimizer_kwargs)
|
||||
|
||||
elif optimizer_type == "DAdaptation".lower():
|
||||
try:
|
||||
import dadaptation
|
||||
except ImportError:
|
||||
raise ImportError("No dadaptation / dadaptation がインストールされていないようです")
|
||||
print(f"use D-Adaptation Adam optimizer | {optimizer_kwargs}")
|
||||
|
||||
min_lr = lr
|
||||
if type(trainable_params) == list and type(trainable_params[0]) == dict:
|
||||
for group in trainable_params:
|
||||
min_lr = min(min_lr, group.get("lr", lr))
|
||||
|
||||
if min_lr <= 0.1:
|
||||
print(
|
||||
f'learning rate is too low. If using dadaptation, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: {min_lr}')
|
||||
print('recommend option: lr=1.0 / 推奨は1.0です')
|
||||
|
||||
optimizer_class = dadaptation.DAdaptAdam
|
||||
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||
|
||||
elif optimizer_type == "Adafactor".lower():
|
||||
# 引数を確認して適宜補正する
|
||||
if "relative_step" not in optimizer_kwargs:
|
||||
optimizer_kwargs["relative_step"] = True # default
|
||||
if not optimizer_kwargs["relative_step"] and optimizer_kwargs.get("warmup_init", False):
|
||||
print(f"set relative_step to True because warmup_init is True / warmup_initがTrueのためrelative_stepをTrueにします")
|
||||
optimizer_kwargs["relative_step"] = True
|
||||
print(f"use Adafactor optimizer | {optimizer_kwargs}")
|
||||
|
||||
if optimizer_kwargs["relative_step"]:
|
||||
print(f"relative_step is true / relative_stepがtrueです")
|
||||
if lr != 0.0:
|
||||
print(f"learning rate is used as initial_lr / 指定したlearning rate はinitial_lrとして使用されます: {lr}")
|
||||
args.learning_rate = None
|
||||
|
||||
# trainable_paramsがgroupだった時の処理:lrを削除する
|
||||
if type(trainable_params) == list and type(trainable_params[0]) == dict:
|
||||
has_group_lr = False
|
||||
for group in trainable_params:
|
||||
p = group.pop("lr", None)
|
||||
has_group_lr = has_group_lr or (p is not None)
|
||||
|
||||
if has_group_lr:
|
||||
# 一応argsを無効にしておく TODO 依存関係が逆転してるのであまり望ましくない
|
||||
print(f"unet_lr and text_encoder_lr are ignored / unet_lrとtext_encoder_lrは無視されます")
|
||||
args.unet_lr = None
|
||||
args.text_encoder_lr = None
|
||||
|
||||
if args.lr_scheduler != "adafactor":
|
||||
print(f"use adafactor_scheduler / スケジューラにadafactor_schedulerを使用します")
|
||||
args.lr_scheduler = f"adafactor:{lr}" # ちょっと微妙だけど
|
||||
|
||||
lr = None
|
||||
else:
|
||||
if args.max_grad_norm != 0.0:
|
||||
print(f"because max_grad_norm is set, clip_grad_norm is enabled. consider set to 0 / max_grad_normが設定されているためclip_grad_normが有効になります。0に設定して無効にしたほうがいいかもしれません")
|
||||
if args.lr_scheduler != "constant_with_warmup":
|
||||
print(f"constant_with_warmup will be good / スケジューラはconstant_with_warmupが良いかもしれません")
|
||||
if optimizer_kwargs.get("clip_threshold", 1.0) != 1.0:
|
||||
print(f"clip_threshold=1.0 will be good / clip_thresholdは1.0が良いかもしれません")
|
||||
|
||||
optimizer_class = transformers.optimization.Adafactor
|
||||
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||
|
||||
elif optimizer_type == "AdamW".lower():
|
||||
print(f"use AdamW optimizer | {optimizer_kwargs}")
|
||||
optimizer_class = torch.optim.AdamW
|
||||
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||
|
||||
else:
|
||||
# 任意のoptimizerを使う
|
||||
optimizer_type = args.optimizer_type # lowerでないやつ(微妙)
|
||||
print(f"use {optimizer_type} | {optimizer_kwargs}")
|
||||
if "." not in optimizer_type:
|
||||
optimizer_module = torch.optim
|
||||
else:
|
||||
values = optimizer_type.split(".")
|
||||
optimizer_module = importlib.import_module(".".join(values[:-1]))
|
||||
optimizer_type = values[-1]
|
||||
|
||||
optimizer_class = getattr(optimizer_module, optimizer_type)
|
||||
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||
|
||||
optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__
|
||||
optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()])
|
||||
|
||||
return optimizer_name, optimizer_args, optimizer
|
||||
|
||||
|
||||
# Monkeypatch newer get_scheduler() function overridng current version of diffusers.optimizer.get_scheduler
|
||||
# code is taken from https://github.com/huggingface/diffusers diffusers.optimizer, commit d87cc15977b87160c30abaace3894e802ad9e1e6
|
||||
# Which is a newer release of diffusers than currently packaged with sd-scripts
|
||||
# This code can be removed when newer diffusers version (v0.12.1 or greater) is tested and implemented to sd-scripts
|
||||
|
||||
|
||||
def get_scheduler_fix(
|
||||
name: Union[str, SchedulerType],
|
||||
optimizer: Optimizer,
|
||||
num_warmup_steps: Optional[int] = None,
|
||||
num_training_steps: Optional[int] = None,
|
||||
num_cycles: int = 1,
|
||||
power: float = 1.0,
|
||||
):
|
||||
"""
|
||||
Unified API to get any scheduler from its name.
|
||||
Args:
|
||||
name (`str` or `SchedulerType`):
|
||||
The name of the scheduler to use.
|
||||
optimizer (`torch.optim.Optimizer`):
|
||||
The optimizer that will be used during training.
|
||||
num_warmup_steps (`int`, *optional*):
|
||||
The number of warmup steps to do. This is not required by all schedulers (hence the argument being
|
||||
optional), the function will raise an error if it's unset and the scheduler type requires it.
|
||||
num_training_steps (`int``, *optional*):
|
||||
The number of training steps to do. This is not required by all schedulers (hence the argument being
|
||||
optional), the function will raise an error if it's unset and the scheduler type requires it.
|
||||
num_cycles (`int`, *optional*):
|
||||
The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler.
|
||||
power (`float`, *optional*, defaults to 1.0):
|
||||
Power factor. See `POLYNOMIAL` scheduler
|
||||
last_epoch (`int`, *optional*, defaults to -1):
|
||||
The index of the last epoch when resuming training.
|
||||
"""
|
||||
if name.startswith("adafactor"):
|
||||
assert type(optimizer) == transformers.optimization.Adafactor, f"adafactor scheduler must be used with Adafactor optimizer / adafactor schedulerはAdafactorオプティマイザと同時に使ってください"
|
||||
initial_lr = float(name.split(':')[1])
|
||||
# print("adafactor scheduler init lr", initial_lr)
|
||||
return transformers.optimization.AdafactorSchedule(optimizer, initial_lr)
|
||||
|
||||
name = SchedulerType(name)
|
||||
schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]
|
||||
if name == SchedulerType.CONSTANT:
|
||||
return schedule_func(optimizer)
|
||||
|
||||
# All other schedulers require `num_warmup_steps`
|
||||
if num_warmup_steps is None:
|
||||
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
|
||||
|
||||
if name == SchedulerType.CONSTANT_WITH_WARMUP:
|
||||
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)
|
||||
|
||||
# All other schedulers require `num_training_steps`
|
||||
if num_training_steps is None:
|
||||
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.")
|
||||
|
||||
if name == SchedulerType.COSINE_WITH_RESTARTS:
|
||||
return schedule_func(
|
||||
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles
|
||||
)
|
||||
|
||||
if name == SchedulerType.POLYNOMIAL:
|
||||
return schedule_func(
|
||||
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, power=power
|
||||
)
|
||||
|
||||
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
|
||||
|
||||
|
||||
def prepare_dataset_args(args: argparse.Namespace, support_metadata: bool):
|
||||
# backward compatibility
|
||||
if args.caption_extention is not None:
|
||||
@ -1592,13 +1847,19 @@ def prepare_dtype(args: argparse.Namespace):
|
||||
|
||||
|
||||
def load_target_model(args: argparse.Namespace, weight_dtype):
|
||||
load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path) # determine SD or Diffusers
|
||||
name_or_path = args.pretrained_model_name_or_path
|
||||
name_or_path = os.readlink(name_or_path) if os.path.islink(name_or_path) else name_or_path
|
||||
load_stable_diffusion_format = os.path.isfile(name_or_path) # determine SD or Diffusers
|
||||
if load_stable_diffusion_format:
|
||||
print("load StableDiffusion checkpoint")
|
||||
text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)
|
||||
text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, name_or_path)
|
||||
else:
|
||||
print("load Diffusers pretrained models")
|
||||
pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)
|
||||
try:
|
||||
pipe = StableDiffusionPipeline.from_pretrained(name_or_path, tokenizer=None, safety_checker=None)
|
||||
except EnvironmentError as ex:
|
||||
print(
|
||||
f"model is not found as a file or in Hugging Face, perhaps file name is wrong? / 指定したモデル名のファイル、またはHugging Faceのモデルが見つかりません。ファイル名が誤っているかもしれません: {name_or_path}")
|
||||
text_encoder = pipe.text_encoder
|
||||
vae = pipe.vae
|
||||
unet = pipe.unet
|
||||
|
16
lora_gui.py
16
lora_gui.py
@ -101,6 +101,7 @@ def save_configuration(
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,noise_offset,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
@ -133,6 +134,13 @@ def save_configuration(
|
||||
]
|
||||
}
|
||||
|
||||
# Extract the destination directory from the file path
|
||||
destination_directory = os.path.dirname(file_path)
|
||||
|
||||
# Create the destination directory if it doesn't exist
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
# Save the data to the selected file
|
||||
with open(file_path, 'w') as file:
|
||||
json.dump(variables, file, indent=2)
|
||||
@ -199,6 +207,7 @@ def open_configuration(
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,noise_offset,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
@ -281,6 +290,7 @@ def train_model(
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,noise_offset,
|
||||
):
|
||||
if pretrained_model_name_or_path == '':
|
||||
msgbox('Source model information is missing')
|
||||
@ -461,6 +471,7 @@ def train_model(
|
||||
caption_extension=caption_extension,
|
||||
cache_latents=cache_latents,
|
||||
optimizer=optimizer,
|
||||
optimizer_args=optimizer_args,
|
||||
)
|
||||
|
||||
run_cmd += run_cmd_advanced_training(
|
||||
@ -485,6 +496,7 @@ def train_model(
|
||||
bucket_reso_steps=bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs=caption_dropout_every_n_epochs,
|
||||
caption_dropout_rate=caption_dropout_rate,
|
||||
noise_offset=noise_offset,
|
||||
)
|
||||
|
||||
print(run_cmd)
|
||||
@ -614,6 +626,7 @@ def lora_tab(
|
||||
caption_extension,
|
||||
cache_latents,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
) = gradio_training(
|
||||
learning_rate_value='0.0001',
|
||||
lr_scheduler_value='cosine',
|
||||
@ -701,7 +714,7 @@ def lora_tab(
|
||||
bucket_no_upscale,
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,noise_offset,
|
||||
) = gradio_advanced_training()
|
||||
color_aug.change(
|
||||
color_aug_changed,
|
||||
@ -784,6 +797,7 @@ def lora_tab(
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,noise_offset,
|
||||
]
|
||||
|
||||
button_open_config.click(
|
||||
|
@ -95,7 +95,7 @@ def save_configuration(
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,noise_offset,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
@ -117,10 +117,6 @@ def save_configuration(
|
||||
if file_path == None or file_path == '':
|
||||
return original_file_path # In case a file_path was provided and the user decide to cancel the open action
|
||||
|
||||
directory = os.path.dirname(file_path)
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
|
||||
# Return the values of the variables as a dictionary
|
||||
variables = {
|
||||
name: value
|
||||
@ -132,6 +128,13 @@ def save_configuration(
|
||||
]
|
||||
}
|
||||
|
||||
# Extract the destination directory from the file path
|
||||
destination_directory = os.path.dirname(file_path)
|
||||
|
||||
# Create the destination directory if it doesn't exist
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
# Save the data to the selected file
|
||||
with open(file_path, 'w') as file:
|
||||
json.dump(variables, file, indent=2)
|
||||
@ -196,7 +199,7 @@ def open_configuration(
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,noise_offset,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
@ -277,7 +280,7 @@ def train_model(
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,noise_offset,
|
||||
):
|
||||
if pretrained_model_name_or_path == '':
|
||||
msgbox('Source model information is missing')
|
||||
@ -438,6 +441,7 @@ def train_model(
|
||||
caption_extension=caption_extension,
|
||||
cache_latents=cache_latents,
|
||||
optimizer=optimizer,
|
||||
optimizer_args=optimizer_args,
|
||||
)
|
||||
|
||||
run_cmd += run_cmd_advanced_training(
|
||||
@ -462,6 +466,7 @@ def train_model(
|
||||
bucket_reso_steps=bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs=caption_dropout_every_n_epochs,
|
||||
caption_dropout_rate=caption_dropout_rate,
|
||||
noise_offset=noise_offset,
|
||||
)
|
||||
run_cmd += f' --token_string="{token_string}"'
|
||||
run_cmd += f' --init_word="{init_word}"'
|
||||
@ -627,7 +632,7 @@ def ti_tab(
|
||||
seed,
|
||||
caption_extension,
|
||||
cache_latents,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,
|
||||
) = gradio_training(
|
||||
learning_rate_value='1e-5',
|
||||
lr_scheduler_value='cosine',
|
||||
@ -685,7 +690,7 @@ def ti_tab(
|
||||
bucket_no_upscale,
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,noise_offset,
|
||||
) = gradio_advanced_training()
|
||||
color_aug.change(
|
||||
color_aug_changed,
|
||||
@ -761,7 +766,7 @@ def ti_tab(
|
||||
random_crop,
|
||||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs, caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer,optimizer_args,noise_offset,
|
||||
]
|
||||
|
||||
button_open_config.click(
|
||||
|
38
train_db.py
38
train_db.py
@ -115,32 +115,12 @@ def train(args):
|
||||
|
||||
# 学習に必要なクラスを準備する
|
||||
print("prepare optimizer, data loader etc.")
|
||||
|
||||
# 8-bit Adamを使う
|
||||
if args.use_8bit_adam:
|
||||
try:
|
||||
import bitsandbytes as bnb
|
||||
except ImportError:
|
||||
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
|
||||
print("use 8-bit Adam optimizer")
|
||||
optimizer_class = bnb.optim.AdamW8bit
|
||||
elif args.use_lion_optimizer:
|
||||
try:
|
||||
import lion_pytorch
|
||||
except ImportError:
|
||||
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
|
||||
print("use Lion optimizer")
|
||||
optimizer_class = lion_pytorch.Lion
|
||||
else:
|
||||
optimizer_class = torch.optim.AdamW
|
||||
|
||||
if train_text_encoder:
|
||||
trainable_params = (itertools.chain(unet.parameters(), text_encoder.parameters()))
|
||||
else:
|
||||
trainable_params = unet.parameters()
|
||||
|
||||
# betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略
|
||||
optimizer = optimizer_class(trainable_params, lr=args.learning_rate)
|
||||
_, _, optimizer = train_util.get_optimizer(args, trainable_params)
|
||||
|
||||
# dataloaderを準備する
|
||||
# DataLoaderのプロセス数:0はメインプロセスになる
|
||||
@ -156,9 +136,10 @@ def train(args):
|
||||
if args.stop_text_encoder_training is None:
|
||||
args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end
|
||||
|
||||
# lr schedulerを用意する
|
||||
lr_scheduler = diffusers.optimization.get_scheduler(
|
||||
args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps)
|
||||
# lr schedulerを用意する TODO gradient_accumulation_stepsの扱いが何かおかしいかもしれない。後で確認する
|
||||
lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps,
|
||||
num_training_steps=args.max_train_steps,
|
||||
num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power)
|
||||
|
||||
# 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする
|
||||
if args.full_fp16:
|
||||
@ -281,12 +262,12 @@ def train(args):
|
||||
loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし
|
||||
|
||||
accelerator.backward(loss)
|
||||
if accelerator.sync_gradients:
|
||||
if accelerator.sync_gradients and args.max_grad_norm != 0.0:
|
||||
if train_text_encoder:
|
||||
params_to_clip = (itertools.chain(unet.parameters(), text_encoder.parameters()))
|
||||
else:
|
||||
params_to_clip = unet.parameters()
|
||||
accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)
|
||||
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
||||
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
@ -299,7 +280,9 @@ def train(args):
|
||||
|
||||
current_loss = loss.detach().item()
|
||||
if args.logging_dir is not None:
|
||||
logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]}
|
||||
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
||||
if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value
|
||||
logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr']
|
||||
accelerator.log(logs, step=global_step)
|
||||
|
||||
if epoch == 0:
|
||||
@ -352,6 +335,7 @@ if __name__ == '__main__':
|
||||
train_util.add_dataset_arguments(parser, True, False, True)
|
||||
train_util.add_training_arguments(parser, True)
|
||||
train_util.add_sd_saving_arguments(parser)
|
||||
train_util.add_optimizer_arguments(parser)
|
||||
|
||||
parser.add_argument("--no_token_padding", action="store_true",
|
||||
help="disable token padding (same as Diffuser's DreamBooth) / トークンのpaddingを無効にする(Diffusers版DreamBoothと同じ動作)")
|
||||
|
124
train_network.py
124
train_network.py
@ -1,8 +1,5 @@
|
||||
from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION
|
||||
from torch.optim import Optimizer
|
||||
from torch.cuda.amp import autocast
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from typing import Optional, Union
|
||||
import importlib
|
||||
import argparse
|
||||
import gc
|
||||
@ -26,83 +23,25 @@ def collate_fn(examples):
|
||||
return examples[0]
|
||||
|
||||
|
||||
# TODO 他のスクリプトと共通化する
|
||||
def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_scheduler):
|
||||
logs = {"loss/current": current_loss, "loss/average": avr_loss}
|
||||
|
||||
if args.network_train_unet_only:
|
||||
logs["lr/unet"] = lr_scheduler.get_last_lr()[0]
|
||||
logs["lr/unet"] = float(lr_scheduler.get_last_lr()[0])
|
||||
elif args.network_train_text_encoder_only:
|
||||
logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0]
|
||||
logs["lr/textencoder"] = float(lr_scheduler.get_last_lr()[0])
|
||||
else:
|
||||
logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0]
|
||||
logs["lr/unet"] = lr_scheduler.get_last_lr()[-1] # may be same to textencoder
|
||||
logs["lr/textencoder"] = float(lr_scheduler.get_last_lr()[0])
|
||||
logs["lr/unet"] = float(lr_scheduler.get_last_lr()[-1]) # may be same to textencoder
|
||||
|
||||
if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value of unet.
|
||||
logs["lr/d*lr-textencoder"] = lr_scheduler.optimizers[-1].param_groups[0]['d']*lr_scheduler.optimizers[-1].param_groups[0]['lr']
|
||||
logs["lr/d*lr-unet"] = lr_scheduler.optimizers[-1].param_groups[1]['d']*lr_scheduler.optimizers[-1].param_groups[1]['lr']
|
||||
|
||||
return logs
|
||||
|
||||
|
||||
# Monkeypatch newer get_scheduler() function overridng current version of diffusers.optimizer.get_scheduler
|
||||
# code is taken from https://github.com/huggingface/diffusers diffusers.optimizer, commit d87cc15977b87160c30abaace3894e802ad9e1e6
|
||||
# Which is a newer release of diffusers than currently packaged with sd-scripts
|
||||
# This code can be removed when newer diffusers version (v0.12.1 or greater) is tested and implemented to sd-scripts
|
||||
|
||||
|
||||
def get_scheduler_fix(
|
||||
name: Union[str, SchedulerType],
|
||||
optimizer: Optimizer,
|
||||
num_warmup_steps: Optional[int] = None,
|
||||
num_training_steps: Optional[int] = None,
|
||||
num_cycles: int = 1,
|
||||
power: float = 1.0,
|
||||
):
|
||||
"""
|
||||
Unified API to get any scheduler from its name.
|
||||
Args:
|
||||
name (`str` or `SchedulerType`):
|
||||
The name of the scheduler to use.
|
||||
optimizer (`torch.optim.Optimizer`):
|
||||
The optimizer that will be used during training.
|
||||
num_warmup_steps (`int`, *optional*):
|
||||
The number of warmup steps to do. This is not required by all schedulers (hence the argument being
|
||||
optional), the function will raise an error if it's unset and the scheduler type requires it.
|
||||
num_training_steps (`int``, *optional*):
|
||||
The number of training steps to do. This is not required by all schedulers (hence the argument being
|
||||
optional), the function will raise an error if it's unset and the scheduler type requires it.
|
||||
num_cycles (`int`, *optional*):
|
||||
The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler.
|
||||
power (`float`, *optional*, defaults to 1.0):
|
||||
Power factor. See `POLYNOMIAL` scheduler
|
||||
last_epoch (`int`, *optional*, defaults to -1):
|
||||
The index of the last epoch when resuming training.
|
||||
"""
|
||||
name = SchedulerType(name)
|
||||
schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]
|
||||
if name == SchedulerType.CONSTANT:
|
||||
return schedule_func(optimizer)
|
||||
|
||||
# All other schedulers require `num_warmup_steps`
|
||||
if num_warmup_steps is None:
|
||||
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
|
||||
|
||||
if name == SchedulerType.CONSTANT_WITH_WARMUP:
|
||||
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)
|
||||
|
||||
# All other schedulers require `num_training_steps`
|
||||
if num_training_steps is None:
|
||||
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.")
|
||||
|
||||
if name == SchedulerType.COSINE_WITH_RESTARTS:
|
||||
return schedule_func(
|
||||
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles
|
||||
)
|
||||
|
||||
if name == SchedulerType.POLYNOMIAL:
|
||||
return schedule_func(
|
||||
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, power=power
|
||||
)
|
||||
|
||||
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
|
||||
|
||||
|
||||
def train(args):
|
||||
session_id = random.randint(0, 2**32)
|
||||
training_started_at = time.time()
|
||||
@ -161,7 +100,7 @@ def train(args):
|
||||
if args.lowram:
|
||||
text_encoder.to("cuda")
|
||||
unet.to("cuda")
|
||||
|
||||
|
||||
# モデルに xformers とか memory efficient attention を組み込む
|
||||
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
|
||||
|
||||
@ -208,30 +147,8 @@ def train(args):
|
||||
# 学習に必要なクラスを準備する
|
||||
print("prepare optimizer, data loader etc.")
|
||||
|
||||
# 8-bit Adamを使う
|
||||
if args.use_8bit_adam:
|
||||
try:
|
||||
import bitsandbytes as bnb
|
||||
except ImportError:
|
||||
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
|
||||
print("use 8-bit Adam optimizer")
|
||||
optimizer_class = bnb.optim.AdamW8bit
|
||||
elif args.use_lion_optimizer:
|
||||
try:
|
||||
import lion_pytorch
|
||||
except ImportError:
|
||||
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
|
||||
print("use Lion optimizer")
|
||||
optimizer_class = lion_pytorch.Lion
|
||||
else:
|
||||
optimizer_class = torch.optim.AdamW
|
||||
|
||||
optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__
|
||||
|
||||
trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr)
|
||||
|
||||
# betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略
|
||||
optimizer = optimizer_class(trainable_params, lr=args.learning_rate)
|
||||
optimizer_name, optimizer_args, optimizer = train_util.get_optimizer(args, trainable_params)
|
||||
|
||||
# dataloaderを準備する
|
||||
# DataLoaderのプロセス数:0はメインプロセスになる
|
||||
@ -245,11 +162,9 @@ def train(args):
|
||||
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
|
||||
|
||||
# lr schedulerを用意する
|
||||
# lr_scheduler = diffusers.optimization.get_scheduler(
|
||||
lr_scheduler = get_scheduler_fix(
|
||||
args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps,
|
||||
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
|
||||
num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power)
|
||||
lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps,
|
||||
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
|
||||
num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power)
|
||||
|
||||
# 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする
|
||||
if args.full_fp16:
|
||||
@ -372,7 +287,7 @@ def train(args):
|
||||
"ss_bucket_info": json.dumps(train_dataset.bucket_info),
|
||||
"ss_training_comment": args.training_comment, # will not be updated after training
|
||||
"ss_sd_scripts_commit_hash": train_util.get_git_revision_hash(),
|
||||
"ss_optimizer": optimizer_name
|
||||
"ss_optimizer": optimizer_name + (f"({optimizer_args})" if len(optimizer_args) > 0 else "")
|
||||
}
|
||||
|
||||
# uncomment if another network is added
|
||||
@ -465,9 +380,9 @@ def train(args):
|
||||
loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし
|
||||
|
||||
accelerator.backward(loss)
|
||||
if accelerator.sync_gradients:
|
||||
if accelerator.sync_gradients and args.max_grad_norm != 0.0:
|
||||
params_to_clip = network.get_trainable_params()
|
||||
accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)
|
||||
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
||||
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
@ -555,6 +470,7 @@ if __name__ == '__main__':
|
||||
train_util.add_sd_models_arguments(parser)
|
||||
train_util.add_dataset_arguments(parser, True, True, True)
|
||||
train_util.add_training_arguments(parser, True)
|
||||
train_util.add_optimizer_arguments(parser)
|
||||
|
||||
parser.add_argument("--no_metadata", action='store_true', help="do not save metadata in output model / メタデータを出力先モデルに保存しない")
|
||||
parser.add_argument("--save_model_as", type=str, default="safetensors", choices=[None, "ckpt", "pt", "safetensors"],
|
||||
@ -562,10 +478,6 @@ if __name__ == '__main__':
|
||||
|
||||
parser.add_argument("--unet_lr", type=float, default=None, help="learning rate for U-Net / U-Netの学習率")
|
||||
parser.add_argument("--text_encoder_lr", type=float, default=None, help="learning rate for Text Encoder / Text Encoderの学習率")
|
||||
parser.add_argument("--lr_scheduler_num_cycles", type=int, default=1,
|
||||
help="Number of restarts for cosine scheduler with restarts / cosine with restartsスケジューラでのリスタート回数")
|
||||
parser.add_argument("--lr_scheduler_power", type=float, default=1,
|
||||
help="Polynomial power for polynomial scheduler / polynomialスケジューラでのpolynomial power")
|
||||
|
||||
parser.add_argument("--network_weights", type=str, default=None,
|
||||
help="pretrained weights for network / 学習するネットワークの初期重み")
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
cloneofsimo氏のリポジトリ、およびd8ahazard氏の[Dreambooth Extension for Stable-Diffusion-WebUI](https://github.com/d8ahazard/sd_dreambooth_extension)とは、現時点では互換性がありません。いくつかの機能拡張を行っているためです(後述)。
|
||||
|
||||
WebUI等で画像生成する場合には、学習したLoRAのモデルを学習元のStable Diffusionのモデルにこのリポジトリ内のスクリプトであらかじめマージしておくか、こちらの[WebUI用extention](https://github.com/kohya-ss/sd-webui-additional-networks)を使ってください。
|
||||
WebUI等で画像生成する場合には、学習したLoRAのモデルを学習元のStable Diffusionのモデルにこのリポジトリ内のスクリプトであらかじめマージしておくか、こちらの[WebUI用extension](https://github.com/kohya-ss/sd-webui-additional-networks)を使ってください。
|
||||
|
||||
## 学習方法
|
||||
|
||||
@ -50,12 +50,14 @@ accelerate launch --num_cpu_threads_per_process 1 train_network.py
|
||||
--train_data_dir=..\data\db\char1 --output_dir=..\lora_train1
|
||||
--reg_data_dir=..\data\db\reg1 --prior_loss_weight=1.0
|
||||
--resolution=448,640 --train_batch_size=1 --learning_rate=1e-4
|
||||
--max_train_steps=400 --use_8bit_adam --xformers --mixed_precision=fp16
|
||||
--max_train_steps=400 --optimizer_type=AdamW8bit --xformers --mixed_precision=fp16
|
||||
--save_every_n_epochs=1 --save_model_as=safetensors --clip_skip=2 --seed=42 --color_aug
|
||||
--network_module=networks.lora
|
||||
```
|
||||
|
||||
--output_dirオプションで指定したディレクトリに、LoRAのモデルが保存されます。
|
||||
(2023/2/22:オプティマイザの指定方法が変わりました。[こちら](#オプティマイザの指定について)をご覧ください。)
|
||||
|
||||
--output_dirオプションで指定したフォルダに、LoRAのモデルが保存されます。
|
||||
|
||||
その他、以下のオプションが指定できます。
|
||||
|
||||
@ -76,6 +78,42 @@ accelerate launch --num_cpu_threads_per_process 1 train_network.py
|
||||
|
||||
--network_train_unet_onlyと--network_train_text_encoder_onlyの両方とも未指定時(デフォルト)はText EncoderとU-Netの両方のLoRAモジュールを有効にします。
|
||||
|
||||
## オプティマイザの指定について
|
||||
|
||||
--optimizer_type オプションでオプティマイザの種類を指定します。以下が指定できます。
|
||||
|
||||
- AdamW : [torch.optim.AdamW](https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html)
|
||||
- 過去のバージョンのオプション未指定時と同じ
|
||||
- AdamW8bit : 引数は同上
|
||||
- 過去のバージョンの--use_8bit_adam指定時と同じ
|
||||
- Lion : https://github.com/lucidrains/lion-pytorch
|
||||
- 過去のバージョンの--use_lion_optimizer指定時と同じ
|
||||
- SGDNesterov : [torch.optim.SGD](https://pytorch.org/docs/stable/generated/torch.optim.SGD.html), nesterov=True
|
||||
- SGDNesterov8bit : 引数は同上
|
||||
- DAdaptation : https://github.com/facebookresearch/dadaptation
|
||||
- AdaFactor : [Transformers AdaFactor](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules)
|
||||
- 任意のオプティマイザ
|
||||
|
||||
オプティマイザのオプション引数は--optimizer_argsオプションで指定してください。key=valueの形式で、複数の値が指定できます。また、valueはカンマ区切りで複数の値が指定できます。たとえばAdamWオプティマイザに引数を指定する場合は、``--optimizer_args weight_decay=0.01 betas=.9,.999``のようになります。
|
||||
|
||||
オプション引数を指定する場合は、それぞれのオプティマイザの仕様をご確認ください。
|
||||
|
||||
一部のオプティマイザでは必須の引数があり、省略すると自動的に追加されます(SGDNesterovのmomentumなど)。コンソールの出力を確認してください。
|
||||
|
||||
D-Adaptationオプティマイザは学習率を自動調整します。学習率のオプションに指定した値は学習率そのものではなくD-Adaptationが決定した学習率の適用率になりますので、通常は1.0を指定してください。Text EncoderにU-Netの半分の学習率を指定したい場合は、``--text_encoder_lr=0.5 --unet_lr=1.0``と指定します。
|
||||
|
||||
AdaFactorオプティマイザはrelative_step=Trueを指定すると学習率を自動調整できます(省略時はデフォルトで追加されます)。自動調整する場合は学習率のスケジューラにはadafactor_schedulerが強制的に使用されます。またscale_parameterとwarmup_initを指定するとよいようです。
|
||||
|
||||
自動調整する場合のオプション指定はたとえば ``--optimizer_args "relative_step=True" "scale_parameter=True" "warmup_init=True"`` のようになります。
|
||||
|
||||
学習率を自動調整しない場合はオプション引数 ``relative_step=False`` を追加してください。その場合、学習率のスケジューラにはconstant_with_warmupが、また勾配のclip normをしないことが推奨されているようです。そのため引数は ``--optimizer_type=adafactor --optimizer_args "relative_step=False" --lr_scheduler="constant_with_warmup" --max_grad_norm=0.0`` のようになります。
|
||||
|
||||
### 任意のオプティマイザを使う
|
||||
|
||||
``torch.optim`` のオプティマイザを使う場合にはクラス名のみを(``--optimizer_type=RMSprop``など)、他のモジュールのオプティマイザを使う時は「モジュール名.クラス名」を指定してください(``--optimizer_type=bitsandbytes.optim.lamb.LAMB``など)。
|
||||
|
||||
(内部でimportlibしているだけで動作は未確認です。必要ならパッケージをインストールしてください。)
|
||||
|
||||
## マージスクリプトについて
|
||||
|
||||
merge_lora.pyでStable DiffusionのモデルにLoRAの学習結果をマージしたり、複数のLoRAモデルをマージしたりできます。
|
||||
@ -178,6 +216,38 @@ Text Encoderが二つのモデルで同じ場合にはLoRAはU-NetのみのLoRA
|
||||
- --save_precision
|
||||
- LoRAの保存形式を"float", "fp16", "bf16"から指定します。省略時はfloatになります。
|
||||
|
||||
## 画像リサイズスクリプト
|
||||
|
||||
(のちほどドキュメントを整理しますがとりあえずここに説明を書いておきます。)
|
||||
|
||||
Aspect Ratio Bucketingの機能拡張で、小さな画像については拡大しないでそのまま教師データとすることが可能になりました。元の教師画像を縮小した画像を、教師データに加えると精度が向上したという報告とともに前処理用のスクリプトをいただきましたので整備して追加しました。bmaltais氏に感謝します。
|
||||
|
||||
### スクリプトの実行方法
|
||||
|
||||
以下のように指定してください。元の画像そのまま、およびリサイズ後の画像が変換先フォルダに保存されます。リサイズ後の画像には、ファイル名に ``+512x512`` のようにリサイズ先の解像度が付け加えられます(画像サイズとは異なります)。リサイズ先の解像度より小さい画像は拡大されることはありません。
|
||||
|
||||
```
|
||||
python tools\resize_images_to_resolution.py --max_resolution 512x512,384x384,256x256 --save_as_png
|
||||
--copy_associated_files 元画像フォルダ 変換先フォルダ
|
||||
```
|
||||
|
||||
元画像フォルダ内の画像ファイルが、指定した解像度(複数指定可)と同じ面積になるようにリサイズされ、変換先フォルダに保存されます。画像以外のファイルはそのままコピーされます。
|
||||
|
||||
``--max_resolution`` オプションにリサイズ先のサイズを例のように指定してください。面積がそのサイズになるようにリサイズします。複数指定すると、それぞれの解像度でリサイズされます。``512x512,384x384,256x256``なら、変換先フォルダの画像は、元サイズとリサイズ後サイズ×3の計4枚になります。
|
||||
|
||||
``--save_as_png`` オプションを指定するとpng形式で保存します。省略するとjpeg形式(quality=100)で保存されます。
|
||||
|
||||
``--copy_associated_files`` オプションを指定すると、拡張子を除き画像と同じファイル名(たとえばキャプションなど)のファイルが、リサイズ後の画像のファイル名と同じ名前でコピーされます。
|
||||
|
||||
|
||||
### その他のオプション
|
||||
|
||||
- divisible_by
|
||||
- リサイズ後の画像のサイズ(縦、横のそれぞれ)がこの値で割り切れるように、画像中心を切り出します。
|
||||
- interpolation
|
||||
- 縮小時の補完方法を指定します。``area, cubic, lanczos4``から選択可能で、デフォルトは``area``です。
|
||||
|
||||
|
||||
## 追加情報
|
||||
|
||||
### cloneofsimo氏のリポジトリとの違い
|
||||
|
@ -13,41 +13,34 @@ from diffusers import DDPMScheduler
|
||||
import library.train_util as train_util
|
||||
from library.train_util import DreamBoothDataset, FineTuningDataset
|
||||
|
||||
import torch.optim as optim
|
||||
import dadaptation
|
||||
|
||||
# imagenet_templates_small = [
|
||||
# "a photo of a {}",
|
||||
# "a rendering of a {}",
|
||||
# "a cropped photo of the {}",
|
||||
# "the photo of a {}",
|
||||
# "a photo of a clean {}",
|
||||
# "a photo of a dirty {}",
|
||||
# "a dark photo of the {}",
|
||||
# "a photo of my {}",
|
||||
# "a photo of the cool {}",
|
||||
# "a close-up photo of a {}",
|
||||
# "a bright photo of the {}",
|
||||
# "a cropped photo of a {}",
|
||||
# "a photo of the {}",
|
||||
# "a good photo of the {}",
|
||||
# "a photo of one {}",
|
||||
# "a close-up photo of the {}",
|
||||
# "a rendition of the {}",
|
||||
# "a photo of the clean {}",
|
||||
# "a rendition of a {}",
|
||||
# "a photo of a nice {}",
|
||||
# "a good photo of a {}",
|
||||
# "a photo of the nice {}",
|
||||
# "a photo of the small {}",
|
||||
# "a photo of the weird {}",
|
||||
# "a photo of the large {}",
|
||||
# "a photo of a cool {}",
|
||||
# "a photo of a small {}",
|
||||
# ]
|
||||
|
||||
imagenet_templates_small = [
|
||||
"{}",
|
||||
"a photo of a {}",
|
||||
"a rendering of a {}",
|
||||
"a cropped photo of the {}",
|
||||
"the photo of a {}",
|
||||
"a photo of a clean {}",
|
||||
"a photo of a dirty {}",
|
||||
"a dark photo of the {}",
|
||||
"a photo of my {}",
|
||||
"a photo of the cool {}",
|
||||
"a close-up photo of a {}",
|
||||
"a bright photo of the {}",
|
||||
"a cropped photo of a {}",
|
||||
"a photo of the {}",
|
||||
"a good photo of the {}",
|
||||
"a photo of one {}",
|
||||
"a close-up photo of the {}",
|
||||
"a rendition of the {}",
|
||||
"a photo of the clean {}",
|
||||
"a rendition of a {}",
|
||||
"a photo of a nice {}",
|
||||
"a good photo of a {}",
|
||||
"a photo of the nice {}",
|
||||
"a photo of the small {}",
|
||||
"a photo of the weird {}",
|
||||
"a photo of the large {}",
|
||||
"a photo of a cool {}",
|
||||
"a photo of a small {}",
|
||||
]
|
||||
|
||||
imagenet_style_templates_small = [
|
||||
@ -205,34 +198,8 @@ def train(args):
|
||||
|
||||
# 学習に必要なクラスを準備する
|
||||
print("prepare optimizer, data loader etc.")
|
||||
|
||||
# 8-bit Adamを使う
|
||||
if args.use_8bit_adam:
|
||||
try:
|
||||
import bitsandbytes as bnb
|
||||
except ImportError:
|
||||
raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです")
|
||||
print("use 8-bit Adam optimizer")
|
||||
optimizer_class = bnb.optim.AdamW8bit
|
||||
elif args.use_lion_optimizer:
|
||||
try:
|
||||
import lion_pytorch
|
||||
except ImportError:
|
||||
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
|
||||
print("use Lion optimizer")
|
||||
optimizer_class = lion_pytorch.Lion
|
||||
else:
|
||||
optimizer_class = torch.optim.AdamW
|
||||
|
||||
trainable_params = text_encoder.get_input_embeddings().parameters()
|
||||
|
||||
# betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略
|
||||
# optimizer = optimizer_class(trainable_params, lr=args.learning_rate)
|
||||
print('enable dadapation.')
|
||||
optimizer = dadaptation.DAdaptAdam(trainable_params, lr=1.0, decouple=True, weight_decay=0)
|
||||
# optimizer = dadaptation.DAdaptSGD(trainable_params, lr=1.0, weight_decay=0, d0=1e-6)
|
||||
# optimizer = dadaptation.DAdaptAdaGrad(trainable_params, lr=1.0, weight_decay=0, d0=1e-6)
|
||||
|
||||
_, _, optimizer = train_util.get_optimizer(args, trainable_params)
|
||||
|
||||
# dataloaderを準備する
|
||||
# DataLoaderのプロセス数:0はメインプロセスになる
|
||||
@ -246,20 +213,9 @@ def train(args):
|
||||
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
|
||||
|
||||
# lr schedulerを用意する
|
||||
# lr_scheduler = diffusers.optimization.get_scheduler(
|
||||
# args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)
|
||||
|
||||
# For Adam
|
||||
lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer,
|
||||
lr_lambda=[lambda epoch: 1],
|
||||
last_epoch=-1,
|
||||
verbose=False)
|
||||
|
||||
# For SGD optim
|
||||
# lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer,
|
||||
# lr_lambda=[lambda epoch: 1],
|
||||
# last_epoch=-1,
|
||||
# verbose=False)
|
||||
lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps,
|
||||
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
|
||||
num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power)
|
||||
|
||||
# acceleratorがなんかよろしくやってくれるらしい
|
||||
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
@ -381,9 +337,9 @@ def train(args):
|
||||
loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし
|
||||
|
||||
accelerator.backward(loss)
|
||||
if accelerator.sync_gradients:
|
||||
if accelerator.sync_gradients and args.max_grad_norm != 0.0:
|
||||
params_to_clip = text_encoder.get_input_embeddings().parameters()
|
||||
accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)
|
||||
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
||||
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
@ -400,16 +356,14 @@ def train(args):
|
||||
|
||||
current_loss = loss.detach().item()
|
||||
if args.logging_dir is not None:
|
||||
#logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]}
|
||||
|
||||
avr_loss = loss_total / (step+1)
|
||||
logs = {"loss": avr_loss, "dlr0": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']}
|
||||
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
||||
if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value
|
||||
logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr']
|
||||
accelerator.log(logs, step=global_step)
|
||||
|
||||
loss_total += current_loss
|
||||
avr_loss = loss_total / (step+1)
|
||||
# logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
|
||||
logs = {"loss": avr_loss, "dlr0": optimizer.param_groups[0]['d']*optimizer.param_groups[0]['lr']}
|
||||
logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
|
||||
progress_bar.set_postfix(**logs)
|
||||
|
||||
if global_step >= args.max_train_steps:
|
||||
@ -519,6 +473,7 @@ if __name__ == '__main__':
|
||||
train_util.add_sd_models_arguments(parser)
|
||||
train_util.add_dataset_arguments(parser, True, True, False)
|
||||
train_util.add_training_arguments(parser, True)
|
||||
train_util.add_optimizer_arguments(parser)
|
||||
|
||||
parser.add_argument("--save_model_as", type=str, default="pt", choices=[None, "ckpt", "pt", "safetensors"],
|
||||
help="format to save the model (default is .pt) / モデル保存時の形式(デフォルトはpt)")
|
||||
|
Loading…
Reference in New Issue
Block a user