2022-12-29 04:02:35 +00:00
# v1: initial release
# v2: add open and save folder icons
# v3: Add new Utilities tab for Dreambooth folder preparation
# v3.1: Adding captionning of images to utilities
import gradio as gr
2023-03-20 12:47:00 +00:00
import easygui
2022-12-29 04:02:35 +00:00
import json
import math
import os
import subprocess
import pathlib
import argparse
from library . common_gui import (
get_folder_path ,
remove_doublequote ,
get_file_path ,
2022-12-29 19:00:02 +00:00
get_any_file_path ,
2022-12-29 04:02:35 +00:00
get_saveasfile_path ,
2023-01-02 18:07:17 +00:00
color_aug_changed ,
2023-01-09 16:48:57 +00:00
save_inference_file ,
2023-01-15 17:01:59 +00:00
gradio_advanced_training ,
run_cmd_advanced_training ,
2023-01-16 00:59:40 +00:00
gradio_training ,
gradio_config ,
gradio_source_model ,
run_cmd_training ,
2023-03-05 15:34:09 +00:00
# set_legacy_8bitadam,
2023-03-04 23:56:22 +00:00
update_my_data ,
2023-03-20 12:47:00 +00:00
check_if_model_exist ,
2022-12-29 04:02:35 +00:00
)
from library . dreambooth_folder_creation_gui import (
gradio_dreambooth_folder_creation_tab ,
)
2023-03-01 18:14:47 +00:00
from library . tensorboard_gui import (
gradio_tensorboard ,
start_tensorboard ,
stop_tensorboard ,
)
2022-12-29 04:02:35 +00:00
from library . dataset_balancing_gui import gradio_dataset_balancing_tab
from library . utilities import utilities_tab
2023-01-06 12:13:12 +00:00
from library . merge_lora_gui import gradio_merge_lora_tab
2023-03-10 16:44:52 +00:00
from library . svd_merge_lora_gui import gradio_svd_merge_lora_tab
2023-01-22 16:40:14 +00:00
from library . verify_lora_gui import gradio_verify_lora_tab
2023-02-04 16:55:06 +00:00
from library . resize_lora_gui import gradio_resize_lora_tab
2023-03-07 00:15:02 +00:00
from library . sampler_gui import sample_gradio_config , run_cmd_sample
2022-12-29 04:02:35 +00:00
from easygui import msgbox
folder_symbol = ' \U0001f4c2 ' # 📂
refresh_symbol = ' \U0001f504 ' # 🔄
save_style_symbol = ' \U0001f4be ' # 💾
document_symbol = ' \U0001F4C4 ' # 📄
2023-02-28 14:58:35 +00:00
path_of_this_folder = os . getcwd ( )
2023-03-02 00:24:11 +00:00
2022-12-29 04:02:35 +00:00
def save_configuration (
save_as ,
file_path ,
pretrained_model_name_or_path ,
v2 ,
v_parameterization ,
logging_dir ,
train_data_dir ,
reg_data_dir ,
output_dir ,
max_resolution ,
2023-01-16 00:59:40 +00:00
learning_rate ,
2022-12-29 04:02:35 +00:00
lr_scheduler ,
lr_warmup ,
train_batch_size ,
epoch ,
save_every_n_epochs ,
mixed_precision ,
save_precision ,
seed ,
num_cpu_threads_per_process ,
2023-01-16 00:59:40 +00:00
cache_latents ,
caption_extension ,
2022-12-29 04:02:35 +00:00
enable_bucket ,
gradient_checkpointing ,
full_fp16 ,
no_token_padding ,
stop_text_encoder_training ,
2023-03-05 15:34:09 +00:00
# use_8bit_adam,
2022-12-29 04:02:35 +00:00
xformers ,
save_model_as ,
shuffle_caption ,
save_state ,
resume ,
2023-01-02 18:07:17 +00:00
prior_loss_weight ,
text_encoder_lr ,
unet_lr ,
network_dim ,
lora_network_weights ,
color_aug ,
flip_aug ,
2023-01-06 00:16:13 +00:00
clip_skip ,
2023-01-09 01:55:41 +00:00
gradient_accumulation_steps ,
mem_eff_attn ,
2023-01-09 16:48:57 +00:00
output_name ,
2023-01-09 22:22:42 +00:00
model_list ,
2023-01-10 14:38:32 +00:00
max_token_length ,
2023-01-15 16:05:22 +00:00
max_train_epochs ,
max_data_loader_n_workers ,
2023-01-22 15:18:00 +00:00
network_alpha ,
2023-02-06 01:07:00 +00:00
training_comment ,
keep_tokens ,
lr_scheduler_num_cycles ,
lr_scheduler_power ,
2023-02-04 16:55:06 +00:00
persistent_data_loader_workers ,
2023-02-06 01:07:00 +00:00
bucket_no_upscale ,
random_crop ,
bucket_reso_steps ,
2023-03-02 00:24:11 +00:00
caption_dropout_every_n_epochs ,
caption_dropout_rate ,
2023-02-20 01:13:03 +00:00
optimizer ,
2023-03-03 12:41:44 +00:00
optimizer_args ,
noise_offset ,
2023-03-07 00:15:02 +00:00
LoRA_type ,
conv_dim ,
conv_alpha ,
sample_every_n_steps ,
sample_every_n_epochs ,
sample_sampler ,
2023-03-20 12:47:00 +00:00
sample_prompts ,
2023-03-28 15:54:42 +00:00
additional_parameters ,
vae_batch_size ,
min_snr_gamma ,
2022-12-29 04:02:35 +00:00
) :
2023-01-09 16:48:57 +00:00
# Get list of function parameters and values
parameters = list ( locals ( ) . items ( ) )
2022-12-29 04:02:35 +00:00
original_file_path = file_path
save_as_bool = True if save_as . get ( ' label ' ) == ' True ' else False
if save_as_bool :
print ( ' Save as... ' )
file_path = get_saveasfile_path ( file_path )
else :
print ( ' Save... ' )
if file_path == None or file_path == ' ' :
file_path = get_saveasfile_path ( file_path )
# print(file_path)
if file_path == None or file_path == ' ' :
return original_file_path # In case a file_path was provided and the user decide to cancel the open action
# Return the values of the variables as a dictionary
variables = {
2023-01-09 16:48:57 +00:00
name : value
for name , value in parameters # locals().items()
if name
not in [
' file_path ' ,
' save_as ' ,
]
2022-12-29 04:02:35 +00:00
}
2023-02-20 12:56:24 +00:00
# Extract the destination directory from the file path
destination_directory = os . path . dirname ( file_path )
# Create the destination directory if it doesn't exist
if not os . path . exists ( destination_directory ) :
os . makedirs ( destination_directory )
2022-12-29 04:02:35 +00:00
# Save the data to the selected file
with open ( file_path , ' w ' ) as file :
2023-01-09 16:48:57 +00:00
json . dump ( variables , file , indent = 2 )
2022-12-29 04:02:35 +00:00
return file_path
def open_configuration (
2023-03-11 01:05:38 +00:00
ask_for_file ,
2022-12-29 04:02:35 +00:00
file_path ,
2023-01-09 22:22:42 +00:00
pretrained_model_name_or_path ,
v2 ,
v_parameterization ,
logging_dir ,
train_data_dir ,
reg_data_dir ,
output_dir ,
max_resolution ,
2023-01-16 00:59:40 +00:00
learning_rate ,
2023-01-09 22:22:42 +00:00
lr_scheduler ,
lr_warmup ,
train_batch_size ,
epoch ,
save_every_n_epochs ,
mixed_precision ,
save_precision ,
seed ,
num_cpu_threads_per_process ,
2023-01-16 00:59:40 +00:00
cache_latents ,
caption_extension ,
2023-01-09 22:22:42 +00:00
enable_bucket ,
2022-12-29 04:02:35 +00:00
gradient_checkpointing ,
2023-01-09 22:22:42 +00:00
full_fp16 ,
no_token_padding ,
stop_text_encoder_training ,
2023-03-05 15:34:09 +00:00
# use_8bit_adam,
2023-01-09 22:22:42 +00:00
xformers ,
2023-01-27 12:04:35 +00:00
save_model_as ,
2022-12-29 04:02:35 +00:00
shuffle_caption ,
save_state ,
resume ,
2023-01-02 18:07:17 +00:00
prior_loss_weight ,
text_encoder_lr ,
unet_lr ,
network_dim ,
lora_network_weights ,
color_aug ,
flip_aug ,
2023-01-06 00:16:13 +00:00
clip_skip ,
2023-01-09 01:55:41 +00:00
gradient_accumulation_steps ,
mem_eff_attn ,
2023-01-09 16:48:57 +00:00
output_name ,
2023-01-09 22:22:42 +00:00
model_list ,
2023-01-10 14:38:32 +00:00
max_token_length ,
2023-01-15 16:05:22 +00:00
max_train_epochs ,
max_data_loader_n_workers ,
2023-01-22 15:18:00 +00:00
network_alpha ,
2023-02-06 01:07:00 +00:00
training_comment ,
keep_tokens ,
lr_scheduler_num_cycles ,
lr_scheduler_power ,
2023-02-04 16:55:06 +00:00
persistent_data_loader_workers ,
2023-02-06 01:07:00 +00:00
bucket_no_upscale ,
random_crop ,
bucket_reso_steps ,
2023-03-02 00:24:11 +00:00
caption_dropout_every_n_epochs ,
caption_dropout_rate ,
2023-02-20 01:13:03 +00:00
optimizer ,
2023-03-03 12:41:44 +00:00
optimizer_args ,
noise_offset ,
2023-03-07 00:15:02 +00:00
LoRA_type ,
conv_dim ,
conv_alpha ,
sample_every_n_steps ,
sample_every_n_epochs ,
sample_sampler ,
2023-03-20 12:47:00 +00:00
sample_prompts ,
2023-03-28 15:54:42 +00:00
additional_parameters ,
vae_batch_size ,
min_snr_gamma ,
2022-12-29 04:02:35 +00:00
) :
2023-01-09 16:48:57 +00:00
# Get list of function parameters and values
parameters = list ( locals ( ) . items ( ) )
2023-03-20 12:47:00 +00:00
2023-03-11 01:05:38 +00:00
ask_for_file = True if ask_for_file . get ( ' label ' ) == ' True ' else False
2023-01-15 17:01:59 +00:00
2022-12-29 04:02:35 +00:00
original_file_path = file_path
2023-03-20 12:47:00 +00:00
2023-03-11 01:05:38 +00:00
if ask_for_file :
file_path = get_file_path ( file_path )
2022-12-29 04:02:35 +00:00
if not file_path == ' ' and not file_path == None :
# load variables from JSON file
with open ( file_path , ' r ' ) as f :
2023-01-09 22:22:42 +00:00
my_data = json . load ( f )
2023-01-15 17:01:59 +00:00
print ( ' Loading config... ' )
2023-03-28 15:54:42 +00:00
2023-03-25 13:08:02 +00:00
# Update values to fix deprecated use_8bit_adam checkbox, set appropriate optimizer if it is set to True, etc.
2023-03-04 23:56:22 +00:00
my_data = update_my_data ( my_data )
2022-12-29 04:02:35 +00:00
else :
file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action
2023-01-09 22:22:42 +00:00
my_data = { }
2023-01-15 17:01:59 +00:00
2023-01-09 16:48:57 +00:00
values = [ file_path ]
for key , value in parameters :
# Set the value in the dictionary to the corresponding value in `my_data`, or the default value if not found
2023-03-11 01:05:38 +00:00
if not key in [ ' ask_for_file ' , ' file_path ' ] :
2023-01-09 22:22:42 +00:00
values . append ( my_data . get ( key , value ) )
2023-03-02 00:24:11 +00:00
2023-03-03 01:39:07 +00:00
# This next section is about making the LoCon parameters visible if LoRA_type = 'Standard'
if my_data . get ( ' LoRA_type ' , ' Standard ' ) == ' LoCon ' :
2023-03-03 15:21:24 +00:00
values . append ( gr . Row . update ( visible = True ) )
2023-03-03 01:39:07 +00:00
else :
2023-03-03 15:21:24 +00:00
values . append ( gr . Row . update ( visible = False ) )
2023-03-03 12:41:44 +00:00
2023-01-09 16:48:57 +00:00
return tuple ( values )
2022-12-29 04:02:35 +00:00
def train_model (
2023-03-08 13:49:12 +00:00
print_only ,
2022-12-29 04:02:35 +00:00
pretrained_model_name_or_path ,
v2 ,
v_parameterization ,
logging_dir ,
train_data_dir ,
reg_data_dir ,
output_dir ,
max_resolution ,
2023-01-16 00:59:40 +00:00
learning_rate ,
2022-12-29 04:02:35 +00:00
lr_scheduler ,
lr_warmup ,
train_batch_size ,
epoch ,
save_every_n_epochs ,
mixed_precision ,
save_precision ,
seed ,
num_cpu_threads_per_process ,
2023-01-16 00:59:40 +00:00
cache_latents ,
2022-12-29 04:19:18 +00:00
caption_extension ,
2022-12-29 04:02:35 +00:00
enable_bucket ,
gradient_checkpointing ,
full_fp16 ,
no_token_padding ,
stop_text_encoder_training_pct ,
2023-03-05 15:34:09 +00:00
# use_8bit_adam,
2022-12-29 04:02:35 +00:00
xformers ,
save_model_as ,
shuffle_caption ,
save_state ,
resume ,
2023-01-02 18:07:17 +00:00
prior_loss_weight ,
text_encoder_lr ,
unet_lr ,
network_dim ,
lora_network_weights ,
color_aug ,
flip_aug ,
2023-01-06 00:16:13 +00:00
clip_skip ,
2023-01-09 01:55:41 +00:00
gradient_accumulation_steps ,
mem_eff_attn ,
2023-01-09 16:48:57 +00:00
output_name ,
2023-01-15 17:01:59 +00:00
model_list , # Keep this. Yes, it is unused here but required given the common list used
2023-01-10 14:38:32 +00:00
max_token_length ,
2023-01-15 16:05:22 +00:00
max_train_epochs ,
max_data_loader_n_workers ,
2023-01-22 15:18:00 +00:00
network_alpha ,
2023-02-06 01:07:00 +00:00
training_comment ,
keep_tokens ,
lr_scheduler_num_cycles ,
lr_scheduler_power ,
2023-02-04 16:55:06 +00:00
persistent_data_loader_workers ,
2023-02-06 01:07:00 +00:00
bucket_no_upscale ,
random_crop ,
bucket_reso_steps ,
2023-03-02 00:24:11 +00:00
caption_dropout_every_n_epochs ,
caption_dropout_rate ,
2023-02-20 01:13:03 +00:00
optimizer ,
2023-03-03 12:41:44 +00:00
optimizer_args ,
noise_offset ,
LoRA_type ,
conv_dim ,
conv_alpha ,
2023-03-07 00:15:02 +00:00
sample_every_n_steps ,
sample_every_n_epochs ,
sample_sampler ,
2023-03-20 12:47:00 +00:00
sample_prompts ,
2023-03-28 15:54:42 +00:00
additional_parameters ,
vae_batch_size ,
min_snr_gamma ,
2023-03-03 12:41:44 +00:00
) :
2023-03-08 13:49:12 +00:00
print_only_bool = True if print_only . get ( ' label ' ) == ' True ' else False
2023-03-20 12:47:00 +00:00
2022-12-29 04:02:35 +00:00
if pretrained_model_name_or_path == ' ' :
msgbox ( ' Source model information is missing ' )
return
if train_data_dir == ' ' :
msgbox ( ' Image folder path is missing ' )
return
if not os . path . exists ( train_data_dir ) :
msgbox ( ' Image folder does not exist ' )
return
if reg_data_dir != ' ' :
if not os . path . exists ( reg_data_dir ) :
msgbox ( ' Regularisation folder does not exist ' )
return
if output_dir == ' ' :
msgbox ( ' Output folder path is missing ' )
return
2023-02-06 01:07:00 +00:00
if int ( bucket_reso_steps ) < 1 :
msgbox ( ' Bucket resolution steps need to be greater than 0 ' )
return
2023-01-26 21:22:58 +00:00
if not os . path . exists ( output_dir ) :
os . makedirs ( output_dir )
2023-02-06 01:07:00 +00:00
2023-01-20 23:41:37 +00:00
if stop_text_encoder_training_pct > 0 :
2023-02-06 01:07:00 +00:00
msgbox (
' Output " stop text encoder training " is not yet supported. Ignoring '
)
2023-01-20 23:41:37 +00:00
stop_text_encoder_training_pct = 0
2023-01-02 18:07:17 +00:00
2023-03-20 12:47:00 +00:00
if check_if_model_exist ( output_name , output_dir , save_model_as ) :
return
2023-01-01 05:33:29 +00:00
# If string is empty set string to 0.
2023-01-02 18:07:17 +00:00
if text_encoder_lr == ' ' :
text_encoder_lr = 0
if unet_lr == ' ' :
unet_lr = 0
v20.6.0
- Increase max LoRA rank (dim) size to 1024.
- Update finetune preprocessing scripts.
- ``.bmp`` and ``.jpeg`` are supported. Thanks to breakcore2 and p1atdev!
- The default weights of ``tag_images_by_wd14_tagger.py`` is now ``SmilingWolf/wd-v1-4-convnext-tagger-v2``. You can specify another model id from ``SmilingWolf`` by ``--repo_id`` option. Thanks to SmilingWolf for the great work.
- To change the weight, remove ``wd14_tagger_model`` folder, and run the script again.
- ``--max_data_loader_n_workers`` option is added to each script. This option uses the DataLoader for data loading to speed up loading, 20%~30% faster.
- Please specify 2 or 4, depends on the number of CPU cores.
- ``--recursive`` option is added to ``merge_dd_tags_to_metadata.py`` and ``merge_captions_to_metadata.py``, only works with ``--full_path``.
- ``make_captions_by_git.py`` is added. It uses [GIT microsoft/git-large-textcaps](https://huggingface.co/microsoft/git-large-textcaps) for captioning.
- ``requirements.txt`` is updated. If you use this script, [please update the libraries](https://github.com/kohya-ss/sd-scripts#upgrade).
- Usage is almost the same as ``make_captions.py``, but batch size should be smaller.
- ``--remove_words`` option removes as much text as possible (such as ``the word "XXXX" on it``).
- ``--skip_existing`` option is added to ``prepare_buckets_latents.py``. Images with existing npz files are ignored by this option.
- ``clean_captions_and_tags.py`` is updated to remove duplicated or conflicting tags, e.g. ``shirt`` is removed when ``white shirt`` exists. if ``black hair`` is with ``red hair``, both are removed.
- Tag frequency is added to the metadata in ``train_network.py``. Thanks to space-nuko!
- __All tags and number of occurrences of the tag are recorded.__ If you do not want it, disable metadata storing with ``--no_metadata`` option.
2023-02-04 13:36:35 +00:00
# if (float(text_encoder_lr) == 0) and (float(unet_lr) == 0):
# msgbox(
# 'At least one Learning Rate value for "Text encoder" or "Unet" need to be provided'
# )
# return
2022-12-29 04:02:35 +00:00
# Get a list of all subfolders in train_data_dir
subfolders = [
f
for f in os . listdir ( train_data_dir )
if os . path . isdir ( os . path . join ( train_data_dir , f ) )
]
total_steps = 0
# Loop through each subfolder and extract the number of repeats
for folder in subfolders :
# Extract the number of repeats from the folder name
repeats = int ( folder . split ( ' _ ' ) [ 0 ] )
# Count the number of images in the folder
num_images = len (
[
f
2023-03-26 09:58:41 +00:00
for f , lower_f in (
2023-03-28 15:54:42 +00:00
( file , file . lower ( ) )
for file in os . listdir (
os . path . join ( train_data_dir , folder )
)
2023-03-26 09:58:41 +00:00
)
if lower_f . endswith ( ( ' .jpg ' , ' .jpeg ' , ' .png ' , ' .webp ' ) )
2022-12-29 04:02:35 +00:00
]
)
2023-03-20 12:47:00 +00:00
2023-03-15 23:31:52 +00:00
print ( f ' Folder { folder } : { num_images } images found ' )
2022-12-29 04:02:35 +00:00
# Calculate the total number of steps for this folder
steps = repeats * num_images
# Print the result
print ( f ' Folder { folder } : { steps } steps ' )
2023-03-20 12:47:00 +00:00
2023-03-15 23:31:52 +00:00
total_steps + = steps
2022-12-29 04:02:35 +00:00
# calculate max_train_steps
max_train_steps = int (
math . ceil (
float ( total_steps )
/ int ( train_batch_size )
* int ( epoch )
2023-01-06 00:16:13 +00:00
# * int(reg_factor)
2022-12-29 04:02:35 +00:00
)
)
print ( f ' max_train_steps = { max_train_steps } ' )
# calculate stop encoder training
if stop_text_encoder_training_pct == None :
stop_text_encoder_training = 0
else :
stop_text_encoder_training = math . ceil (
float ( max_train_steps ) / 100 * int ( stop_text_encoder_training_pct )
)
print ( f ' stop_text_encoder_training = { stop_text_encoder_training } ' )
lr_warmup_steps = round ( float ( int ( lr_warmup ) * int ( max_train_steps ) / 100 ) )
print ( f ' lr_warmup_steps = { lr_warmup_steps } ' )
run_cmd = f ' accelerate launch --num_cpu_threads_per_process= { num_cpu_threads_per_process } " train_network.py " '
2023-02-06 01:07:00 +00:00
2023-02-08 01:58:35 +00:00
# run_cmd += f' --caption_dropout_rate="0.1" --caption_dropout_every_n_epochs=1' # --random_crop'
2023-02-06 01:07:00 +00:00
2022-12-29 04:02:35 +00:00
if v2 :
run_cmd + = ' --v2 '
if v_parameterization :
run_cmd + = ' --v_parameterization '
if enable_bucket :
run_cmd + = ' --enable_bucket '
if no_token_padding :
run_cmd + = ' --no_token_padding '
run_cmd + = (
2023-01-06 23:25:55 +00:00
f ' --pretrained_model_name_or_path= " { pretrained_model_name_or_path } " '
2022-12-29 04:02:35 +00:00
)
run_cmd + = f ' --train_data_dir= " { train_data_dir } " '
if len ( reg_data_dir ) :
run_cmd + = f ' --reg_data_dir= " { reg_data_dir } " '
run_cmd + = f ' --resolution= { max_resolution } '
2023-01-06 23:25:55 +00:00
run_cmd + = f ' --output_dir= " { output_dir } " '
run_cmd + = f ' --logging_dir= " { logging_dir } " '
2023-01-22 15:18:00 +00:00
run_cmd + = f ' --network_alpha= " { network_alpha } " '
if not training_comment == ' ' :
run_cmd + = f ' --training_comment= " { training_comment } " '
2022-12-29 04:02:35 +00:00
if not stop_text_encoder_training == 0 :
run_cmd + = (
f ' --stop_text_encoder_training= { stop_text_encoder_training } '
)
if not save_model_as == ' same as source model ' :
run_cmd + = f ' --save_model_as= { save_model_as } '
if not float ( prior_loss_weight ) == 1.0 :
run_cmd + = f ' --prior_loss_weight= { prior_loss_weight } '
2023-03-09 12:49:50 +00:00
if LoRA_type == ' LoCon ' or LoRA_type == ' LyCORIS/LoCon ' :
2023-03-03 12:41:44 +00:00
try :
2023-03-09 12:49:50 +00:00
import lycoris
2023-03-03 12:41:44 +00:00
except ModuleNotFoundError :
2023-03-04 23:56:22 +00:00
print (
2023-03-09 12:49:50 +00:00
" \033 [1;31mError: \033 [0m The required module ' lycoris_lora ' is not installed. Please install by running \033 [33mupgrade.ps1 \033 [0m before running this program. "
2023-03-04 23:56:22 +00:00
)
2023-03-03 12:41:44 +00:00
return
2023-03-09 12:49:50 +00:00
run_cmd + = f ' --network_module=lycoris.kohya '
2023-03-20 12:47:00 +00:00
run_cmd + = f ' --network_args " conv_dim= { conv_dim } " " conv_alpha= { conv_alpha } " " algo=lora " '
2023-03-09 12:49:50 +00:00
if LoRA_type == ' LyCORIS/LoHa ' :
try :
import lycoris
except ModuleNotFoundError :
print (
" \033 [1;31mError: \033 [0m The required module ' lycoris_lora ' is not installed. Please install by running \033 [33mupgrade.ps1 \033 [0m before running this program. "
)
return
run_cmd + = f ' --network_module=lycoris.kohya '
2023-03-20 12:47:00 +00:00
run_cmd + = f ' --network_args " conv_dim= { conv_dim } " " conv_alpha= { conv_alpha } " " algo=loha " '
2023-03-08 12:30:14 +00:00
if LoRA_type == ' Kohya LoCon ' :
run_cmd + = f ' --network_module=networks.lora '
run_cmd + = (
2023-03-13 00:36:58 +00:00
f ' --network_args " conv_dim= { conv_dim } " " conv_alpha= { conv_alpha } " '
2023-03-08 12:30:14 +00:00
)
if LoRA_type == ' Standard ' :
2023-02-28 12:45:42 +00:00
run_cmd + = f ' --network_module=networks.lora '
2023-02-06 01:07:00 +00:00
v20.6.0
- Increase max LoRA rank (dim) size to 1024.
- Update finetune preprocessing scripts.
- ``.bmp`` and ``.jpeg`` are supported. Thanks to breakcore2 and p1atdev!
- The default weights of ``tag_images_by_wd14_tagger.py`` is now ``SmilingWolf/wd-v1-4-convnext-tagger-v2``. You can specify another model id from ``SmilingWolf`` by ``--repo_id`` option. Thanks to SmilingWolf for the great work.
- To change the weight, remove ``wd14_tagger_model`` folder, and run the script again.
- ``--max_data_loader_n_workers`` option is added to each script. This option uses the DataLoader for data loading to speed up loading, 20%~30% faster.
- Please specify 2 or 4, depends on the number of CPU cores.
- ``--recursive`` option is added to ``merge_dd_tags_to_metadata.py`` and ``merge_captions_to_metadata.py``, only works with ``--full_path``.
- ``make_captions_by_git.py`` is added. It uses [GIT microsoft/git-large-textcaps](https://huggingface.co/microsoft/git-large-textcaps) for captioning.
- ``requirements.txt`` is updated. If you use this script, [please update the libraries](https://github.com/kohya-ss/sd-scripts#upgrade).
- Usage is almost the same as ``make_captions.py``, but batch size should be smaller.
- ``--remove_words`` option removes as much text as possible (such as ``the word "XXXX" on it``).
- ``--skip_existing`` option is added to ``prepare_buckets_latents.py``. Images with existing npz files are ignored by this option.
- ``clean_captions_and_tags.py`` is updated to remove duplicated or conflicting tags, e.g. ``shirt`` is removed when ``white shirt`` exists. if ``black hair`` is with ``red hair``, both are removed.
- Tag frequency is added to the metadata in ``train_network.py``. Thanks to space-nuko!
- __All tags and number of occurrences of the tag are recorded.__ If you do not want it, disable metadata storing with ``--no_metadata`` option.
2023-02-04 13:36:35 +00:00
if not ( float ( text_encoder_lr ) == 0 ) or not ( float ( unet_lr ) == 0 ) :
if not ( float ( text_encoder_lr ) == 0 ) and not ( float ( unet_lr ) == 0 ) :
run_cmd + = f ' --text_encoder_lr= { text_encoder_lr } '
run_cmd + = f ' --unet_lr= { unet_lr } '
elif not ( float ( text_encoder_lr ) == 0 ) :
run_cmd + = f ' --text_encoder_lr= { text_encoder_lr } '
run_cmd + = f ' --network_train_text_encoder_only '
else :
run_cmd + = f ' --unet_lr= { unet_lr } '
run_cmd + = f ' --network_train_unet_only '
2023-01-01 05:33:29 +00:00
else :
2023-02-06 01:07:00 +00:00
if float ( text_encoder_lr ) == 0 :
msgbox ( ' Please input learning rate values. ' )
v20.6.0
- Increase max LoRA rank (dim) size to 1024.
- Update finetune preprocessing scripts.
- ``.bmp`` and ``.jpeg`` are supported. Thanks to breakcore2 and p1atdev!
- The default weights of ``tag_images_by_wd14_tagger.py`` is now ``SmilingWolf/wd-v1-4-convnext-tagger-v2``. You can specify another model id from ``SmilingWolf`` by ``--repo_id`` option. Thanks to SmilingWolf for the great work.
- To change the weight, remove ``wd14_tagger_model`` folder, and run the script again.
- ``--max_data_loader_n_workers`` option is added to each script. This option uses the DataLoader for data loading to speed up loading, 20%~30% faster.
- Please specify 2 or 4, depends on the number of CPU cores.
- ``--recursive`` option is added to ``merge_dd_tags_to_metadata.py`` and ``merge_captions_to_metadata.py``, only works with ``--full_path``.
- ``make_captions_by_git.py`` is added. It uses [GIT microsoft/git-large-textcaps](https://huggingface.co/microsoft/git-large-textcaps) for captioning.
- ``requirements.txt`` is updated. If you use this script, [please update the libraries](https://github.com/kohya-ss/sd-scripts#upgrade).
- Usage is almost the same as ``make_captions.py``, but batch size should be smaller.
- ``--remove_words`` option removes as much text as possible (such as ``the word "XXXX" on it``).
- ``--skip_existing`` option is added to ``prepare_buckets_latents.py``. Images with existing npz files are ignored by this option.
- ``clean_captions_and_tags.py`` is updated to remove duplicated or conflicting tags, e.g. ``shirt`` is removed when ``white shirt`` exists. if ``black hair`` is with ``red hair``, both are removed.
- Tag frequency is added to the metadata in ``train_network.py``. Thanks to space-nuko!
- __All tags and number of occurrences of the tag are recorded.__ If you do not want it, disable metadata storing with ``--no_metadata`` option.
2023-02-04 13:36:35 +00:00
return
2023-02-06 01:07:00 +00:00
2022-12-29 04:02:35 +00:00
run_cmd + = f ' --network_dim= { network_dim } '
2023-02-06 01:07:00 +00:00
2023-01-01 19:37:32 +00:00
if not lora_network_weights == ' ' :
2023-01-06 23:25:55 +00:00
run_cmd + = f ' --network_weights= " { lora_network_weights } " '
2023-01-09 01:55:41 +00:00
if int ( gradient_accumulation_steps ) > 1 :
run_cmd + = f ' --gradient_accumulation_steps= { int ( gradient_accumulation_steps ) } '
2023-01-09 16:48:57 +00:00
if not output_name == ' ' :
run_cmd + = f ' --output_name= " { output_name } " '
2023-01-30 13:26:15 +00:00
if not lr_scheduler_num_cycles == ' ' :
run_cmd + = f ' --lr_scheduler_num_cycles= " { lr_scheduler_num_cycles } " '
v20.6.0
- Increase max LoRA rank (dim) size to 1024.
- Update finetune preprocessing scripts.
- ``.bmp`` and ``.jpeg`` are supported. Thanks to breakcore2 and p1atdev!
- The default weights of ``tag_images_by_wd14_tagger.py`` is now ``SmilingWolf/wd-v1-4-convnext-tagger-v2``. You can specify another model id from ``SmilingWolf`` by ``--repo_id`` option. Thanks to SmilingWolf for the great work.
- To change the weight, remove ``wd14_tagger_model`` folder, and run the script again.
- ``--max_data_loader_n_workers`` option is added to each script. This option uses the DataLoader for data loading to speed up loading, 20%~30% faster.
- Please specify 2 or 4, depends on the number of CPU cores.
- ``--recursive`` option is added to ``merge_dd_tags_to_metadata.py`` and ``merge_captions_to_metadata.py``, only works with ``--full_path``.
- ``make_captions_by_git.py`` is added. It uses [GIT microsoft/git-large-textcaps](https://huggingface.co/microsoft/git-large-textcaps) for captioning.
- ``requirements.txt`` is updated. If you use this script, [please update the libraries](https://github.com/kohya-ss/sd-scripts#upgrade).
- Usage is almost the same as ``make_captions.py``, but batch size should be smaller.
- ``--remove_words`` option removes as much text as possible (such as ``the word "XXXX" on it``).
- ``--skip_existing`` option is added to ``prepare_buckets_latents.py``. Images with existing npz files are ignored by this option.
- ``clean_captions_and_tags.py`` is updated to remove duplicated or conflicting tags, e.g. ``shirt`` is removed when ``white shirt`` exists. if ``black hair`` is with ``red hair``, both are removed.
- Tag frequency is added to the metadata in ``train_network.py``. Thanks to space-nuko!
- __All tags and number of occurrences of the tag are recorded.__ If you do not want it, disable metadata storing with ``--no_metadata`` option.
2023-02-04 13:36:35 +00:00
else :
run_cmd + = f ' --lr_scheduler_num_cycles= " { epoch } " '
2023-01-30 13:26:15 +00:00
if not lr_scheduler_power == ' ' :
2023-02-08 01:58:35 +00:00
run_cmd + = f ' --lr_scheduler_power= " { lr_scheduler_power } " '
2023-01-16 00:59:40 +00:00
run_cmd + = run_cmd_training (
learning_rate = learning_rate ,
lr_scheduler = lr_scheduler ,
lr_warmup_steps = lr_warmup_steps ,
train_batch_size = train_batch_size ,
max_train_steps = max_train_steps ,
save_every_n_epochs = save_every_n_epochs ,
mixed_precision = mixed_precision ,
save_precision = save_precision ,
seed = seed ,
caption_extension = caption_extension ,
cache_latents = cache_latents ,
2023-02-20 01:13:03 +00:00
optimizer = optimizer ,
2023-02-23 01:32:57 +00:00
optimizer_args = optimizer_args ,
2023-01-16 00:59:40 +00:00
)
2023-01-15 17:01:59 +00:00
run_cmd + = run_cmd_advanced_training (
max_train_epochs = max_train_epochs ,
max_data_loader_n_workers = max_data_loader_n_workers ,
max_token_length = max_token_length ,
resume = resume ,
save_state = save_state ,
2023-01-15 20:03:04 +00:00
mem_eff_attn = mem_eff_attn ,
clip_skip = clip_skip ,
flip_aug = flip_aug ,
color_aug = color_aug ,
shuffle_caption = shuffle_caption ,
gradient_checkpointing = gradient_checkpointing ,
full_fp16 = full_fp16 ,
xformers = xformers ,
2023-03-05 15:34:09 +00:00
# use_8bit_adam=use_8bit_adam,
2023-01-27 12:33:44 +00:00
keep_tokens = keep_tokens ,
2023-02-04 16:55:06 +00:00
persistent_data_loader_workers = persistent_data_loader_workers ,
2023-02-06 01:07:00 +00:00
bucket_no_upscale = bucket_no_upscale ,
random_crop = random_crop ,
bucket_reso_steps = bucket_reso_steps ,
2023-02-08 01:58:35 +00:00
caption_dropout_every_n_epochs = caption_dropout_every_n_epochs ,
caption_dropout_rate = caption_dropout_rate ,
2023-02-23 01:32:57 +00:00
noise_offset = noise_offset ,
2023-03-07 12:42:13 +00:00
additional_parameters = additional_parameters ,
2023-03-22 00:20:57 +00:00
vae_batch_size = vae_batch_size ,
2023-03-28 15:54:42 +00:00
min_snr_gamma = min_snr_gamma ,
2023-01-15 17:01:59 +00:00
)
2022-12-29 04:02:35 +00:00
2023-03-07 00:15:02 +00:00
run_cmd + = run_cmd_sample (
sample_every_n_steps ,
sample_every_n_epochs ,
sample_sampler ,
sample_prompts ,
2023-03-08 12:30:14 +00:00
output_dir ,
2023-03-07 00:15:02 +00:00
)
2023-03-20 12:47:00 +00:00
if print_only_bool :
print (
' \033 [93m \n Here is the trainer command as a reference. It will not be executed: \033 [0m \n '
)
2023-03-08 14:46:20 +00:00
print ( ' \033 [96m ' + run_cmd + ' \033 [0m \n ' )
2023-03-05 16:43:59 +00:00
else :
2023-03-08 13:49:12 +00:00
print ( run_cmd )
# Run the command
if os . name == ' posix ' :
os . system ( run_cmd )
else :
subprocess . run ( run_cmd )
2022-12-29 04:02:35 +00:00
2023-03-08 13:49:12 +00:00
# check if output_dir/last is a folder... therefore it is a diffuser model
last_dir = pathlib . Path ( f ' { output_dir } / { output_name } ' )
2022-12-29 04:02:35 +00:00
2023-03-08 13:49:12 +00:00
if not last_dir . is_dir ( ) :
# Copy inference model for v2 if required
2023-03-20 12:47:00 +00:00
save_inference_file (
output_dir , v2 , v_parameterization , output_name
)
2022-12-29 04:02:35 +00:00
def lora_tab (
train_data_dir_input = gr . Textbox ( ) ,
reg_data_dir_input = gr . Textbox ( ) ,
output_dir_input = gr . Textbox ( ) ,
logging_dir_input = gr . Textbox ( ) ,
) :
dummy_db_true = gr . Label ( value = True , visible = False )
dummy_db_false = gr . Label ( value = False , visible = False )
2023-01-02 18:07:17 +00:00
gr . Markdown (
' Train a custom model using kohya train network LoRA python code... '
)
2023-01-16 00:59:40 +00:00
(
button_open_config ,
button_save_config ,
button_save_as_config ,
config_file_name ,
2023-03-11 01:05:38 +00:00
button_load_config ,
2023-01-16 00:59:40 +00:00
) = gradio_config ( )
(
pretrained_model_name_or_path ,
v2 ,
v_parameterization ,
save_model_as ,
model_list ,
2023-03-28 15:54:42 +00:00
) = gradio_source_model (
save_model_as_choices = [
' ckpt ' ,
' safetensors ' ,
]
)
2022-12-29 04:02:35 +00:00
with gr . Tab ( ' Folders ' ) :
with gr . Row ( ) :
2023-01-09 22:22:42 +00:00
train_data_dir = gr . Textbox (
2022-12-29 04:02:35 +00:00
label = ' Image folder ' ,
placeholder = ' Folder where the training folders containing the images are located ' ,
)
2023-01-15 17:01:59 +00:00
train_data_dir_folder = gr . Button ( ' 📂 ' , elem_id = ' open_folder_small ' )
2023-01-09 22:22:42 +00:00
train_data_dir_folder . click (
2023-03-04 23:56:22 +00:00
get_folder_path ,
outputs = train_data_dir ,
show_progress = False ,
2022-12-29 04:02:35 +00:00
)
2023-01-09 22:22:42 +00:00
reg_data_dir = gr . Textbox (
2022-12-29 04:02:35 +00:00
label = ' Regularisation folder ' ,
placeholder = ' (Optional) Folder where where the regularization folders containing the images are located ' ,
)
2023-01-15 17:01:59 +00:00
reg_data_dir_folder = gr . Button ( ' 📂 ' , elem_id = ' open_folder_small ' )
2023-03-04 23:56:22 +00:00
reg_data_dir_folder . click (
get_folder_path ,
outputs = reg_data_dir ,
show_progress = False ,
)
2022-12-29 04:02:35 +00:00
with gr . Row ( ) :
2023-01-09 22:22:42 +00:00
output_dir = gr . Textbox (
2022-12-29 04:02:35 +00:00
label = ' Output folder ' ,
placeholder = ' Folder to output trained model ' ,
)
2023-01-15 17:01:59 +00:00
output_dir_folder = gr . Button ( ' 📂 ' , elem_id = ' open_folder_small ' )
2023-03-04 23:56:22 +00:00
output_dir_folder . click (
get_folder_path ,
outputs = output_dir ,
show_progress = False ,
)
2023-01-09 22:22:42 +00:00
logging_dir = gr . Textbox (
2022-12-29 04:02:35 +00:00
label = ' Logging folder ' ,
placeholder = ' Optional: enable logging and output TensorBoard log to this folder ' ,
)
2023-01-15 17:01:59 +00:00
logging_dir_folder = gr . Button ( ' 📂 ' , elem_id = ' open_folder_small ' )
2023-03-04 23:56:22 +00:00
logging_dir_folder . click (
get_folder_path ,
outputs = logging_dir ,
show_progress = False ,
)
2023-01-09 16:48:57 +00:00
with gr . Row ( ) :
output_name = gr . Textbox (
label = ' Model output name ' ,
2023-01-22 15:18:00 +00:00
placeholder = ' (Name of the model to output) ' ,
2023-01-09 16:48:57 +00:00
value = ' last ' ,
interactive = True ,
)
2023-01-22 15:18:00 +00:00
training_comment = gr . Textbox (
label = ' Training comment ' ,
placeholder = ' (Optional) Add training comment to be included in metadata ' ,
interactive = True ,
)
2023-01-09 22:22:42 +00:00
train_data_dir . change (
2022-12-29 04:02:35 +00:00
remove_doublequote ,
2023-01-09 22:22:42 +00:00
inputs = [ train_data_dir ] ,
outputs = [ train_data_dir ] ,
2022-12-29 04:02:35 +00:00
)
2023-01-09 22:22:42 +00:00
reg_data_dir . change (
2022-12-29 04:02:35 +00:00
remove_doublequote ,
2023-01-09 22:22:42 +00:00
inputs = [ reg_data_dir ] ,
outputs = [ reg_data_dir ] ,
2022-12-29 04:02:35 +00:00
)
2023-01-09 22:22:42 +00:00
output_dir . change (
2022-12-29 04:02:35 +00:00
remove_doublequote ,
2023-01-09 22:22:42 +00:00
inputs = [ output_dir ] ,
outputs = [ output_dir ] ,
2022-12-29 04:02:35 +00:00
)
2023-01-09 22:22:42 +00:00
logging_dir . change (
2022-12-29 04:02:35 +00:00
remove_doublequote ,
2023-01-09 22:22:42 +00:00
inputs = [ logging_dir ] ,
outputs = [ logging_dir ] ,
2022-12-29 04:02:35 +00:00
)
with gr . Tab ( ' Training parameters ' ) :
2023-01-01 19:14:58 +00:00
with gr . Row ( ) :
2023-03-03 01:39:07 +00:00
LoRA_type = gr . Dropdown (
label = ' LoRA type ' ,
choices = [
2023-03-08 12:30:14 +00:00
' Kohya LoCon ' ,
2023-03-09 12:49:50 +00:00
# 'LoCon',
' LyCORIS/LoCon ' ,
' LyCORIS/LoHa ' ,
2023-03-08 12:30:14 +00:00
' Standard ' ,
2023-03-03 01:39:07 +00:00
] ,
2023-03-03 12:41:44 +00:00
value = ' Standard ' ,
2023-03-03 01:39:07 +00:00
)
2023-01-01 19:14:58 +00:00
lora_network_weights = gr . Textbox (
label = ' LoRA network weights ' ,
placeholder = ' { Optional) Path to existing LoRA network weights to resume training ' ,
)
lora_network_weights_file = gr . Button (
document_symbol , elem_id = ' open_folder_small '
)
lora_network_weights_file . click (
get_any_file_path ,
inputs = [ lora_network_weights ] ,
outputs = lora_network_weights ,
2023-03-04 23:56:22 +00:00
show_progress = False ,
2023-01-01 19:14:58 +00:00
)
2023-01-16 00:59:40 +00:00
(
learning_rate ,
lr_scheduler ,
lr_warmup ,
train_batch_size ,
epoch ,
save_every_n_epochs ,
mixed_precision ,
save_precision ,
num_cpu_threads_per_process ,
seed ,
caption_extension ,
cache_latents ,
2023-02-20 01:13:03 +00:00
optimizer ,
2023-02-23 01:32:57 +00:00
optimizer_args ,
2023-01-16 00:59:40 +00:00
) = gradio_training (
2023-02-10 13:22:03 +00:00
learning_rate_value = ' 0.0001 ' ,
2023-01-16 00:59:40 +00:00
lr_scheduler_value = ' cosine ' ,
lr_warmup_value = ' 10 ' ,
)
2023-03-03 12:41:44 +00:00
2022-12-29 04:02:35 +00:00
with gr . Row ( ) :
2023-01-02 18:07:17 +00:00
text_encoder_lr = gr . Textbox (
label = ' Text Encoder learning rate ' ,
2023-01-15 17:01:59 +00:00
value = ' 5e-5 ' ,
2023-01-02 18:07:17 +00:00
placeholder = ' Optional ' ,
)
unet_lr = gr . Textbox (
2023-01-15 17:01:59 +00:00
label = ' Unet learning rate ' ,
2023-02-10 13:22:03 +00:00
value = ' 0.0001 ' ,
2023-01-15 17:01:59 +00:00
placeholder = ' Optional ' ,
2023-01-06 23:25:55 +00:00
)
2022-12-29 04:02:35 +00:00
network_dim = gr . Slider (
2023-03-01 04:19:18 +00:00
minimum = 1 ,
v20.6.0
- Increase max LoRA rank (dim) size to 1024.
- Update finetune preprocessing scripts.
- ``.bmp`` and ``.jpeg`` are supported. Thanks to breakcore2 and p1atdev!
- The default weights of ``tag_images_by_wd14_tagger.py`` is now ``SmilingWolf/wd-v1-4-convnext-tagger-v2``. You can specify another model id from ``SmilingWolf`` by ``--repo_id`` option. Thanks to SmilingWolf for the great work.
- To change the weight, remove ``wd14_tagger_model`` folder, and run the script again.
- ``--max_data_loader_n_workers`` option is added to each script. This option uses the DataLoader for data loading to speed up loading, 20%~30% faster.
- Please specify 2 or 4, depends on the number of CPU cores.
- ``--recursive`` option is added to ``merge_dd_tags_to_metadata.py`` and ``merge_captions_to_metadata.py``, only works with ``--full_path``.
- ``make_captions_by_git.py`` is added. It uses [GIT microsoft/git-large-textcaps](https://huggingface.co/microsoft/git-large-textcaps) for captioning.
- ``requirements.txt`` is updated. If you use this script, [please update the libraries](https://github.com/kohya-ss/sd-scripts#upgrade).
- Usage is almost the same as ``make_captions.py``, but batch size should be smaller.
- ``--remove_words`` option removes as much text as possible (such as ``the word "XXXX" on it``).
- ``--skip_existing`` option is added to ``prepare_buckets_latents.py``. Images with existing npz files are ignored by this option.
- ``clean_captions_and_tags.py`` is updated to remove duplicated or conflicting tags, e.g. ``shirt`` is removed when ``white shirt`` exists. if ``black hair`` is with ``red hair``, both are removed.
- Tag frequency is added to the metadata in ``train_network.py``. Thanks to space-nuko!
- __All tags and number of occurrences of the tag are recorded.__ If you do not want it, disable metadata storing with ``--no_metadata`` option.
2023-02-04 13:36:35 +00:00
maximum = 1024 ,
2023-01-22 15:18:00 +00:00
label = ' Network Rank (Dimension) ' ,
2023-01-06 23:25:55 +00:00
value = 8 ,
2023-03-01 04:19:18 +00:00
step = 1 ,
2023-01-02 18:07:17 +00:00
interactive = True ,
2022-12-29 04:02:35 +00:00
)
2023-01-22 15:18:00 +00:00
network_alpha = gr . Slider (
2023-03-28 12:11:17 +00:00
minimum = 0.1 ,
v20.6.0
- Increase max LoRA rank (dim) size to 1024.
- Update finetune preprocessing scripts.
- ``.bmp`` and ``.jpeg`` are supported. Thanks to breakcore2 and p1atdev!
- The default weights of ``tag_images_by_wd14_tagger.py`` is now ``SmilingWolf/wd-v1-4-convnext-tagger-v2``. You can specify another model id from ``SmilingWolf`` by ``--repo_id`` option. Thanks to SmilingWolf for the great work.
- To change the weight, remove ``wd14_tagger_model`` folder, and run the script again.
- ``--max_data_loader_n_workers`` option is added to each script. This option uses the DataLoader for data loading to speed up loading, 20%~30% faster.
- Please specify 2 or 4, depends on the number of CPU cores.
- ``--recursive`` option is added to ``merge_dd_tags_to_metadata.py`` and ``merge_captions_to_metadata.py``, only works with ``--full_path``.
- ``make_captions_by_git.py`` is added. It uses [GIT microsoft/git-large-textcaps](https://huggingface.co/microsoft/git-large-textcaps) for captioning.
- ``requirements.txt`` is updated. If you use this script, [please update the libraries](https://github.com/kohya-ss/sd-scripts#upgrade).
- Usage is almost the same as ``make_captions.py``, but batch size should be smaller.
- ``--remove_words`` option removes as much text as possible (such as ``the word "XXXX" on it``).
- ``--skip_existing`` option is added to ``prepare_buckets_latents.py``. Images with existing npz files are ignored by this option.
- ``clean_captions_and_tags.py`` is updated to remove duplicated or conflicting tags, e.g. ``shirt`` is removed when ``white shirt`` exists. if ``black hair`` is with ``red hair``, both are removed.
- Tag frequency is added to the metadata in ``train_network.py``. Thanks to space-nuko!
- __All tags and number of occurrences of the tag are recorded.__ If you do not want it, disable metadata storing with ``--no_metadata`` option.
2023-02-04 13:36:35 +00:00
maximum = 1024 ,
2023-01-22 15:18:00 +00:00
label = ' Network Alpha ' ,
value = 1 ,
2023-03-28 12:11:17 +00:00
step = 0.1 ,
2023-01-22 15:18:00 +00:00
interactive = True ,
)
2023-03-03 12:41:44 +00:00
2023-03-03 15:21:24 +00:00
with gr . Row ( visible = False ) as LoCon_row :
2023-03-03 12:41:44 +00:00
2023-03-03 15:21:24 +00:00
# locon= gr.Checkbox(label='Train a LoCon instead of a general LoRA (does not support v2 base models) (may not be able to some utilities now)', value=False)
conv_dim = gr . Slider (
minimum = 1 ,
maximum = 512 ,
value = 1 ,
step = 1 ,
2023-03-11 19:53:43 +00:00
label = ' Convolution Rank (Dimension) ' ,
2023-03-03 15:21:24 +00:00
)
conv_alpha = gr . Slider (
2023-03-28 12:28:41 +00:00
minimum = 0.1 ,
2023-03-03 15:21:24 +00:00
maximum = 512 ,
value = 1 ,
2023-03-28 12:28:41 +00:00
step = 0.1 ,
2023-03-11 19:53:43 +00:00
label = ' Convolution Alpha ' ,
2023-03-03 12:41:44 +00:00
)
2023-03-03 15:21:24 +00:00
# Show of hide LoCon conv settings depending on LoRA type selection
def LoRA_type_change ( LoRA_type ) :
print ( ' LoRA type changed... ' )
2023-03-20 12:47:00 +00:00
if (
LoRA_type == ' LoCon '
or LoRA_type == ' Kohya LoCon '
or LoRA_type == ' LyCORIS/LoHa '
or LoRA_type == ' LyCORIS/LoCon '
) :
2023-03-03 15:21:24 +00:00
return gr . Group . update ( visible = True )
else :
return gr . Group . update ( visible = False )
2023-03-04 23:56:22 +00:00
2023-03-03 15:21:24 +00:00
LoRA_type . change (
LoRA_type_change , inputs = [ LoRA_type ] , outputs = [ LoCon_row ]
)
2023-03-03 01:39:07 +00:00
with gr . Row ( ) :
max_resolution = gr . Textbox (
label = ' Max resolution ' ,
value = ' 512,512 ' ,
placeholder = ' 512,512 ' ,
)
stop_text_encoder_training = gr . Slider (
minimum = 0 ,
maximum = 100 ,
value = 0 ,
step = 1 ,
label = ' Stop text encoder training ' ,
)
enable_bucket = gr . Checkbox ( label = ' Enable buckets ' , value = True )
with gr . Accordion ( ' Advanced Configuration ' , open = False ) :
2022-12-29 04:02:35 +00:00
with gr . Row ( ) :
2023-01-09 22:22:42 +00:00
no_token_padding = gr . Checkbox (
2022-12-29 04:02:35 +00:00
label = ' No token padding ' , value = False
)
2023-01-09 01:55:41 +00:00
gradient_accumulation_steps = gr . Number (
label = ' Gradient accumulate steps ' , value = ' 1 '
)
2023-01-06 00:16:13 +00:00
with gr . Row ( ) :
2023-01-09 01:55:41 +00:00
prior_loss_weight = gr . Number (
label = ' Prior loss weight ' , value = 1.0
2022-12-29 04:02:35 +00:00
)
2023-01-30 13:26:15 +00:00
lr_scheduler_num_cycles = gr . Textbox (
2023-02-06 01:07:00 +00:00
label = ' LR number of cycles ' ,
placeholder = ' (Optional) For Cosine with restart and polynomial only ' ,
2023-01-30 13:26:15 +00:00
)
2023-02-06 01:07:00 +00:00
2023-01-30 13:26:15 +00:00
lr_scheduler_power = gr . Textbox (
2023-02-06 01:07:00 +00:00
label = ' LR power ' ,
placeholder = ' (Optional) For Cosine with restart and polynomial only ' ,
2023-01-30 13:26:15 +00:00
)
2023-01-15 17:01:59 +00:00
(
2023-03-05 15:34:09 +00:00
# use_8bit_adam,
2023-01-15 20:03:04 +00:00
xformers ,
full_fp16 ,
gradient_checkpointing ,
shuffle_caption ,
color_aug ,
flip_aug ,
clip_skip ,
mem_eff_attn ,
2023-01-15 17:01:59 +00:00
save_state ,
resume ,
max_token_length ,
max_train_epochs ,
max_data_loader_n_workers ,
2023-01-27 12:33:44 +00:00
keep_tokens ,
2023-02-04 16:55:06 +00:00
persistent_data_loader_workers ,
2023-02-06 01:07:00 +00:00
bucket_no_upscale ,
random_crop ,
bucket_reso_steps ,
2023-03-02 00:24:11 +00:00
caption_dropout_every_n_epochs ,
caption_dropout_rate ,
2023-03-20 12:47:00 +00:00
noise_offset ,
additional_parameters ,
2023-03-22 00:20:57 +00:00
vae_batch_size ,
2023-03-28 15:54:42 +00:00
min_snr_gamma ,
2023-01-15 17:01:59 +00:00
) = gradio_advanced_training ( )
2023-01-15 20:03:04 +00:00
color_aug . change (
color_aug_changed ,
inputs = [ color_aug ] ,
2023-01-16 00:59:40 +00:00
outputs = [ cache_latents ] ,
2023-01-15 20:03:04 +00:00
)
2023-03-02 00:24:11 +00:00
2023-03-07 00:15:02 +00:00
(
sample_every_n_steps ,
sample_every_n_epochs ,
sample_sampler ,
sample_prompts ,
) = sample_gradio_config ( )
2023-01-15 17:01:59 +00:00
2022-12-29 04:02:35 +00:00
with gr . Tab ( ' Tools ' ) :
2023-01-02 18:07:17 +00:00
gr . Markdown (
' This section provide Dreambooth tools to help setup your dataset... '
)
2022-12-29 04:02:35 +00:00
gradio_dreambooth_folder_creation_tab (
2023-01-09 22:22:42 +00:00
train_data_dir_input = train_data_dir ,
reg_data_dir_input = reg_data_dir ,
output_dir_input = output_dir ,
logging_dir_input = logging_dir ,
2022-12-29 04:02:35 +00:00
)
gradio_dataset_balancing_tab ( )
2023-01-06 12:13:12 +00:00
gradio_merge_lora_tab ( )
2023-03-10 16:44:52 +00:00
gradio_svd_merge_lora_tab ( )
2023-02-04 16:55:06 +00:00
gradio_resize_lora_tab ( )
2023-01-22 16:40:14 +00:00
gradio_verify_lora_tab ( )
2022-12-29 04:02:35 +00:00
2023-02-27 00:49:22 +00:00
button_run = gr . Button ( ' Train model ' , variant = ' primary ' )
2023-03-20 12:47:00 +00:00
2023-03-08 13:49:12 +00:00
button_print = gr . Button ( ' Print training command ' )
2023-03-02 00:24:11 +00:00
2023-02-27 00:49:22 +00:00
# Setup gradio tensorboard buttons
button_start_tensorboard , button_stop_tensorboard = gradio_tensorboard ( )
2023-03-02 00:24:11 +00:00
2023-02-27 00:49:22 +00:00
button_start_tensorboard . click (
start_tensorboard ,
inputs = logging_dir ,
2023-03-04 23:56:22 +00:00
show_progress = False ,
2023-02-27 00:49:22 +00:00
)
2023-03-02 00:24:11 +00:00
2023-02-27 00:49:22 +00:00
button_stop_tensorboard . click (
stop_tensorboard ,
2023-03-04 23:56:22 +00:00
show_progress = False ,
2023-02-27 00:49:22 +00:00
)
2023-01-02 18:07:17 +00:00
2022-12-29 19:00:02 +00:00
settings_list = [
2023-01-09 22:22:42 +00:00
pretrained_model_name_or_path ,
v2 ,
v_parameterization ,
logging_dir ,
train_data_dir ,
reg_data_dir ,
output_dir ,
max_resolution ,
2023-01-16 00:59:40 +00:00
learning_rate ,
2023-01-09 22:22:42 +00:00
lr_scheduler ,
lr_warmup ,
train_batch_size ,
epoch ,
save_every_n_epochs ,
mixed_precision ,
save_precision ,
seed ,
num_cpu_threads_per_process ,
2023-01-16 00:59:40 +00:00
cache_latents ,
caption_extension ,
2023-01-09 22:22:42 +00:00
enable_bucket ,
2023-01-09 01:55:41 +00:00
gradient_checkpointing ,
2023-01-09 22:22:42 +00:00
full_fp16 ,
no_token_padding ,
stop_text_encoder_training ,
2023-03-05 15:34:09 +00:00
# use_8bit_adam,
2023-01-09 22:22:42 +00:00
xformers ,
2023-01-16 00:59:40 +00:00
save_model_as ,
2022-12-29 19:00:02 +00:00
shuffle_caption ,
save_state ,
resume ,
2023-01-02 18:07:17 +00:00
prior_loss_weight ,
text_encoder_lr ,
unet_lr ,
network_dim ,
lora_network_weights ,
color_aug ,
flip_aug ,
2023-01-06 00:16:13 +00:00
clip_skip ,
2023-01-09 01:55:41 +00:00
gradient_accumulation_steps ,
mem_eff_attn ,
2023-01-09 16:48:57 +00:00
output_name ,
2023-01-09 22:22:42 +00:00
model_list ,
2023-01-15 16:05:22 +00:00
max_token_length ,
max_train_epochs ,
max_data_loader_n_workers ,
2023-01-22 15:18:00 +00:00
network_alpha ,
training_comment ,
2023-01-27 12:33:44 +00:00
keep_tokens ,
2023-02-06 01:07:00 +00:00
lr_scheduler_num_cycles ,
lr_scheduler_power ,
2023-02-04 16:55:06 +00:00
persistent_data_loader_workers ,
2023-02-06 01:07:00 +00:00
bucket_no_upscale ,
random_crop ,
bucket_reso_steps ,
2023-03-02 00:24:11 +00:00
caption_dropout_every_n_epochs ,
caption_dropout_rate ,
2023-02-20 01:13:03 +00:00
optimizer ,
2023-03-03 12:41:44 +00:00
optimizer_args ,
noise_offset ,
LoRA_type ,
conv_dim ,
conv_alpha ,
2023-03-07 00:15:02 +00:00
sample_every_n_steps ,
sample_every_n_epochs ,
sample_sampler ,
2023-03-20 12:47:00 +00:00
sample_prompts ,
additional_parameters ,
2023-03-22 00:20:57 +00:00
vae_batch_size ,
2023-03-28 15:54:42 +00:00
min_snr_gamma ,
2022-12-29 19:00:02 +00:00
]
2022-12-29 04:02:35 +00:00
button_open_config . click (
open_configuration ,
2023-03-11 01:05:38 +00:00
inputs = [ dummy_db_true , config_file_name ] + settings_list ,
outputs = [ config_file_name ] + settings_list + [ LoCon_row ] ,
show_progress = False ,
)
2023-03-20 12:47:00 +00:00
2023-03-11 01:05:38 +00:00
button_load_config . click (
open_configuration ,
inputs = [ dummy_db_false , config_file_name ] + settings_list ,
2023-03-03 15:21:24 +00:00
outputs = [ config_file_name ] + settings_list + [ LoCon_row ] ,
2023-03-04 23:56:22 +00:00
show_progress = False ,
2022-12-29 04:02:35 +00:00
)
button_save_config . click (
save_configuration ,
2022-12-29 19:00:02 +00:00
inputs = [ dummy_db_false , config_file_name ] + settings_list ,
2022-12-29 04:02:35 +00:00
outputs = [ config_file_name ] ,
2023-03-04 23:56:22 +00:00
show_progress = False ,
2022-12-29 04:02:35 +00:00
)
button_save_as_config . click (
save_configuration ,
2022-12-29 19:00:02 +00:00
inputs = [ dummy_db_true , config_file_name ] + settings_list ,
2022-12-29 04:02:35 +00:00
outputs = [ config_file_name ] ,
2023-03-04 23:56:22 +00:00
show_progress = False ,
2022-12-29 04:02:35 +00:00
)
button_run . click (
train_model ,
2023-03-08 13:49:12 +00:00
inputs = [ dummy_db_false ] + settings_list ,
show_progress = False ,
)
2023-03-20 12:47:00 +00:00
2023-03-08 13:49:12 +00:00
button_print . click (
train_model ,
inputs = [ dummy_db_true ] + settings_list ,
2023-03-04 23:56:22 +00:00
show_progress = False ,
2022-12-29 04:02:35 +00:00
)
return (
2023-01-09 22:22:42 +00:00
train_data_dir ,
reg_data_dir ,
output_dir ,
logging_dir ,
2022-12-29 04:02:35 +00:00
)
2023-02-10 13:22:03 +00:00
def UI ( * * kwargs ) :
css = ' '
if os . path . exists ( ' ./style.css ' ) :
with open ( os . path . join ( ' ./style.css ' ) , ' r ' , encoding = ' utf8 ' ) as file :
print ( ' Load CSS... ' )
css + = file . read ( ) + ' \n '
interface = gr . Blocks ( css = css )
with interface :
with gr . Tab ( ' LoRA ' ) :
(
train_data_dir_input ,
reg_data_dir_input ,
output_dir_input ,
logging_dir_input ,
) = lora_tab ( )
with gr . Tab ( ' Utilities ' ) :
utilities_tab (
train_data_dir_input = train_data_dir_input ,
reg_data_dir_input = reg_data_dir_input ,
output_dir_input = output_dir_input ,
logging_dir_input = logging_dir_input ,
enable_copy_info_button = True ,
)
# Show the interface
2023-03-02 00:24:11 +00:00
launch_kwargs = { }
2023-02-10 13:22:03 +00:00
if not kwargs . get ( ' username ' , None ) == ' ' :
2023-03-02 00:24:11 +00:00
launch_kwargs [ ' auth ' ] = (
kwargs . get ( ' username ' , None ) ,
kwargs . get ( ' password ' , None ) ,
)
2023-02-10 13:22:03 +00:00
if kwargs . get ( ' server_port ' , 0 ) > 0 :
2023-03-02 00:24:11 +00:00
launch_kwargs [ ' server_port ' ] = kwargs . get ( ' server_port ' , 0 )
if kwargs . get ( ' inbrowser ' , False ) :
launch_kwargs [ ' inbrowser ' ] = kwargs . get ( ' inbrowser ' , False )
2023-03-25 07:59:57 +00:00
if kwargs . get ( ' listen ' , True ) :
2023-03-28 15:54:42 +00:00
launch_kwargs [ ' server_name ' ] = ' 0.0.0.0 '
2023-02-10 13:22:03 +00:00
print ( launch_kwargs )
interface . launch ( * * launch_kwargs )
2023-03-02 00:24:11 +00:00
2023-02-10 13:22:03 +00:00
2022-12-29 04:02:35 +00:00
if __name__ == ' __main__ ' :
# torch.cuda.set_per_process_memory_fraction(0.48)
parser = argparse . ArgumentParser ( )
parser . add_argument (
' --username ' , type = str , default = ' ' , help = ' Username for authentication '
)
parser . add_argument (
' --password ' , type = str , default = ' ' , help = ' Password for authentication '
)
2023-02-10 13:22:03 +00:00
parser . add_argument (
2023-03-02 00:24:11 +00:00
' --server_port ' ,
type = int ,
default = 0 ,
help = ' Port to run the server listener on ' ,
)
parser . add_argument (
' --inbrowser ' , action = ' store_true ' , help = ' Open in browser '
2023-02-10 13:22:03 +00:00
)
2023-03-25 07:59:57 +00:00
parser . add_argument (
2023-03-28 15:54:42 +00:00
' --listen ' ,
action = ' store_true ' ,
help = ' Launch gradio with server name 0.0.0.0, allowing LAN access ' ,
2023-03-25 07:59:57 +00:00
)
2022-12-29 04:02:35 +00:00
args = parser . parse_args ( )
2023-03-02 00:24:11 +00:00
UI (
username = args . username ,
password = args . password ,
inbrowser = args . inbrowser ,
server_port = args . server_port ,
)