Making gui more modular by dividing it
This commit is contained in:
parent
6a7e27e100
commit
f60c8addd5
File diff suppressed because it is too large
Load Diff
2115
fine_tune.py
2115
fine_tune.py
File diff suppressed because it is too large
Load Diff
815
finetune_gui.py
815
finetune_gui.py
@ -6,17 +6,12 @@ import subprocess
|
||||
import pathlib
|
||||
import shutil
|
||||
import argparse
|
||||
|
||||
# from easygui import fileopenbox, filesavebox, diropenbox, msgbox
|
||||
from library.basic_caption_gui import gradio_basic_caption_gui_tab
|
||||
from library.convert_model_gui import gradio_convert_model_tab
|
||||
from library.blip_caption_gui import gradio_blip_caption_gui_tab
|
||||
from library.wd14_caption_gui import gradio_wd14_caption_gui_tab
|
||||
from library.common_gui import (
|
||||
get_folder_path,
|
||||
get_file_path,
|
||||
get_saveasfile_path,
|
||||
)
|
||||
from library.utilities import utilities_tab
|
||||
|
||||
folder_symbol = '\U0001f4c2' # 📂
|
||||
refresh_symbol = '\U0001f504' # 🔄
|
||||
@ -386,6 +381,7 @@ def remove_doublequote(file_path):
|
||||
|
||||
return file_path
|
||||
|
||||
|
||||
def UI(username, password):
|
||||
|
||||
css = ''
|
||||
@ -398,399 +394,10 @@ def UI(username, password):
|
||||
interface = gr.Blocks(css=css)
|
||||
|
||||
with interface:
|
||||
dummy_true = gr.Label(value=True, visible=False)
|
||||
dummy_false = gr.Label(value=False, visible=False)
|
||||
with gr.Tab('Finetuning'):
|
||||
gr.Markdown('Enter kohya finetuner parameter using this interface.')
|
||||
with gr.Accordion('Configuration File Load/Save', open=False):
|
||||
with gr.Row():
|
||||
button_open_config = gr.Button(
|
||||
f'Open {folder_symbol}', elem_id='open_folder'
|
||||
)
|
||||
button_save_config = gr.Button(
|
||||
f'Save {save_style_symbol}', elem_id='open_folder'
|
||||
)
|
||||
button_save_as_config = gr.Button(
|
||||
f'Save as... {save_style_symbol}', elem_id='open_folder'
|
||||
)
|
||||
config_file_name = gr.Textbox(
|
||||
label='', placeholder='type file path or use buttons...'
|
||||
)
|
||||
config_file_name.change(
|
||||
remove_doublequote,
|
||||
inputs=[config_file_name],
|
||||
outputs=[config_file_name],
|
||||
)
|
||||
with gr.Tab('Source model'):
|
||||
# Define the input elements
|
||||
with gr.Row():
|
||||
pretrained_model_name_or_path_input = gr.Textbox(
|
||||
label='Pretrained model name or path',
|
||||
placeholder='enter the path to custom model or name of pretrained model',
|
||||
)
|
||||
pretrained_model_name_or_path_file = gr.Button(
|
||||
document_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
pretrained_model_name_or_path_file.click(
|
||||
get_file_path,
|
||||
inputs=pretrained_model_name_or_path_input,
|
||||
outputs=pretrained_model_name_or_path_input,
|
||||
)
|
||||
pretrained_model_name_or_path_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
pretrained_model_name_or_path_folder.click(
|
||||
get_folder_path,
|
||||
inputs=pretrained_model_name_or_path_input,
|
||||
outputs=pretrained_model_name_or_path_input,
|
||||
)
|
||||
model_list = gr.Dropdown(
|
||||
label='(Optional) Model Quick Pick',
|
||||
choices=[
|
||||
'custom',
|
||||
'stabilityai/stable-diffusion-2-1-base',
|
||||
'stabilityai/stable-diffusion-2-base',
|
||||
'stabilityai/stable-diffusion-2-1',
|
||||
'stabilityai/stable-diffusion-2',
|
||||
'runwayml/stable-diffusion-v1-5',
|
||||
'CompVis/stable-diffusion-v1-4',
|
||||
],
|
||||
)
|
||||
save_model_as_dropdown = gr.Dropdown(
|
||||
label='Save trained model as',
|
||||
choices=[
|
||||
'same as source model',
|
||||
'ckpt',
|
||||
'diffusers',
|
||||
'diffusers_safetensors',
|
||||
'safetensors',
|
||||
],
|
||||
value='same as source model',
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
v2_input = gr.Checkbox(label='v2', value=True)
|
||||
v_parameterization_input = gr.Checkbox(
|
||||
label='v_parameterization', value=False
|
||||
)
|
||||
model_list.change(
|
||||
set_pretrained_model_name_or_path_input,
|
||||
inputs=[model_list, v2_input, v_parameterization_input],
|
||||
outputs=[
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
],
|
||||
)
|
||||
with gr.Tab('Directories'):
|
||||
with gr.Row():
|
||||
train_dir_input = gr.Textbox(
|
||||
label='Training config folder',
|
||||
placeholder='folder where the training configuration files will be saved',
|
||||
)
|
||||
train_dir_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
train_dir_folder.click(
|
||||
get_folder_path, outputs=train_dir_input
|
||||
)
|
||||
|
||||
image_folder_input = gr.Textbox(
|
||||
label='Training Image folder',
|
||||
placeholder='folder where the training images are located',
|
||||
)
|
||||
image_folder_input_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
image_folder_input_folder.click(
|
||||
get_folder_path, outputs=image_folder_input
|
||||
)
|
||||
with gr.Row():
|
||||
output_dir_input = gr.Textbox(
|
||||
label='Output folder',
|
||||
placeholder='folder where the model will be saved',
|
||||
)
|
||||
output_dir_input_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
output_dir_input_folder.click(
|
||||
get_folder_path, outputs=output_dir_input
|
||||
)
|
||||
|
||||
logging_dir_input = gr.Textbox(
|
||||
label='Logging folder',
|
||||
placeholder='Optional: enable logging and output TensorBoard log to this folder',
|
||||
)
|
||||
logging_dir_input_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
logging_dir_input_folder.click(
|
||||
get_folder_path, outputs=logging_dir_input
|
||||
)
|
||||
train_dir_input.change(
|
||||
remove_doublequote,
|
||||
inputs=[train_dir_input],
|
||||
outputs=[train_dir_input],
|
||||
)
|
||||
image_folder_input.change(
|
||||
remove_doublequote,
|
||||
inputs=[image_folder_input],
|
||||
outputs=[image_folder_input],
|
||||
)
|
||||
output_dir_input.change(
|
||||
remove_doublequote,
|
||||
inputs=[output_dir_input],
|
||||
outputs=[output_dir_input],
|
||||
)
|
||||
with gr.Tab('Training parameters'):
|
||||
with gr.Row():
|
||||
learning_rate_input = gr.Textbox(
|
||||
label='Learning rate', value=1e-6
|
||||
)
|
||||
lr_scheduler_input = gr.Dropdown(
|
||||
label='LR Scheduler',
|
||||
choices=[
|
||||
'constant',
|
||||
'constant_with_warmup',
|
||||
'cosine',
|
||||
'cosine_with_restarts',
|
||||
'linear',
|
||||
'polynomial',
|
||||
],
|
||||
value='constant',
|
||||
)
|
||||
lr_warmup_input = gr.Textbox(label='LR warmup', value=0)
|
||||
with gr.Row():
|
||||
dataset_repeats_input = gr.Textbox(
|
||||
label='Dataset repeats', value=40
|
||||
)
|
||||
train_batch_size_input = gr.Slider(
|
||||
minimum=1,
|
||||
maximum=32,
|
||||
label='Train batch size',
|
||||
value=1,
|
||||
step=1,
|
||||
)
|
||||
epoch_input = gr.Textbox(label='Epoch', value=1)
|
||||
save_every_n_epochs_input = gr.Textbox(
|
||||
label='Save every N epochs', value=1
|
||||
)
|
||||
with gr.Row():
|
||||
mixed_precision_input = gr.Dropdown(
|
||||
label='Mixed precision',
|
||||
choices=[
|
||||
'no',
|
||||
'fp16',
|
||||
'bf16',
|
||||
],
|
||||
value='fp16',
|
||||
)
|
||||
save_precision_input = gr.Dropdown(
|
||||
label='Save precision',
|
||||
choices=[
|
||||
'float',
|
||||
'fp16',
|
||||
'bf16',
|
||||
],
|
||||
value='fp16',
|
||||
)
|
||||
num_cpu_threads_per_process_input = gr.Slider(
|
||||
minimum=1,
|
||||
maximum=os.cpu_count(),
|
||||
step=1,
|
||||
label='Number of CPU threads per process',
|
||||
value=os.cpu_count(),
|
||||
)
|
||||
with gr.Row():
|
||||
seed_input = gr.Textbox(label='Seed', value=1234)
|
||||
max_resolution_input = gr.Textbox(
|
||||
label='Max resolution', value='512,512'
|
||||
)
|
||||
with gr.Row():
|
||||
caption_extention_input = gr.Textbox(
|
||||
label='Caption Extension',
|
||||
placeholder='(Optional) Extension for caption files. default: .txt',
|
||||
)
|
||||
train_text_encoder_input = gr.Checkbox(
|
||||
label='Train text encoder', value=True
|
||||
)
|
||||
with gr.Box():
|
||||
with gr.Row():
|
||||
create_caption = gr.Checkbox(
|
||||
label='Generate caption database', value=True
|
||||
)
|
||||
create_buckets = gr.Checkbox(
|
||||
label='Generate image buckets', value=True
|
||||
)
|
||||
train = gr.Checkbox(label='Train model', value=True)
|
||||
|
||||
button_run = gr.Button('Run')
|
||||
|
||||
button_run.click(
|
||||
train_model,
|
||||
inputs=[
|
||||
create_caption,
|
||||
create_buckets,
|
||||
train,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
)
|
||||
|
||||
button_open_config.click(
|
||||
open_config_file,
|
||||
inputs=[
|
||||
config_file_name,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
create_buckets,
|
||||
create_caption,
|
||||
train,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
outputs=[
|
||||
config_file_name,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
create_buckets,
|
||||
create_caption,
|
||||
train,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
)
|
||||
|
||||
button_save_config.click(
|
||||
save_configuration,
|
||||
inputs=[
|
||||
dummy_false,
|
||||
config_file_name,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
create_buckets,
|
||||
create_caption,
|
||||
train,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
outputs=[config_file_name],
|
||||
)
|
||||
|
||||
button_save_as_config.click(
|
||||
save_configuration,
|
||||
inputs=[
|
||||
dummy_true,
|
||||
config_file_name,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
create_buckets,
|
||||
create_caption,
|
||||
train,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
outputs=[config_file_name],
|
||||
)
|
||||
|
||||
with gr.Tab('Utilities'):
|
||||
gradio_basic_caption_gui_tab()
|
||||
gradio_blip_caption_gui_tab()
|
||||
gradio_wd14_caption_gui_tab()
|
||||
gradio_convert_model_tab()
|
||||
|
||||
with gr.Tab("Finetune"):
|
||||
finetune_tab()
|
||||
with gr.Tab("Utilities"):
|
||||
utilities_tab(enable_dreambooth_tab=False)
|
||||
|
||||
# Show the interface
|
||||
if not username == '':
|
||||
@ -798,13 +405,407 @@ def UI(username, password):
|
||||
else:
|
||||
interface.launch()
|
||||
|
||||
def finetune_tab():
|
||||
dummy_ft_true = gr.Label(value=True, visible=False)
|
||||
dummy_ft_false = gr.Label(value=False, visible=False)
|
||||
gr.Markdown(
|
||||
'Enter kohya finetuner parameter using this interface.'
|
||||
)
|
||||
with gr.Accordion('Configuration File Load/Save', open=False):
|
||||
with gr.Row():
|
||||
button_open_config = gr.Button(
|
||||
f'Open {folder_symbol}', elem_id='open_folder'
|
||||
)
|
||||
button_save_config = gr.Button(
|
||||
f'Save {save_style_symbol}', elem_id='open_folder'
|
||||
)
|
||||
button_save_as_config = gr.Button(
|
||||
f'Save as... {save_style_symbol}',
|
||||
elem_id='open_folder',
|
||||
)
|
||||
config_file_name = gr.Textbox(
|
||||
label='', placeholder='type file path or use buttons...'
|
||||
)
|
||||
config_file_name.change(
|
||||
remove_doublequote,
|
||||
inputs=[config_file_name],
|
||||
outputs=[config_file_name],
|
||||
)
|
||||
with gr.Tab('Source model'):
|
||||
# Define the input elements
|
||||
with gr.Row():
|
||||
pretrained_model_name_or_path_input = gr.Textbox(
|
||||
label='Pretrained model name or path',
|
||||
placeholder='enter the path to custom model or name of pretrained model',
|
||||
)
|
||||
pretrained_model_name_or_path_file = gr.Button(
|
||||
document_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
pretrained_model_name_or_path_file.click(
|
||||
get_file_path,
|
||||
inputs=pretrained_model_name_or_path_input,
|
||||
outputs=pretrained_model_name_or_path_input,
|
||||
)
|
||||
pretrained_model_name_or_path_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
pretrained_model_name_or_path_folder.click(
|
||||
get_folder_path,
|
||||
inputs=pretrained_model_name_or_path_input,
|
||||
outputs=pretrained_model_name_or_path_input,
|
||||
)
|
||||
model_list = gr.Dropdown(
|
||||
label='(Optional) Model Quick Pick',
|
||||
choices=[
|
||||
'custom',
|
||||
'stabilityai/stable-diffusion-2-1-base',
|
||||
'stabilityai/stable-diffusion-2-base',
|
||||
'stabilityai/stable-diffusion-2-1',
|
||||
'stabilityai/stable-diffusion-2',
|
||||
'runwayml/stable-diffusion-v1-5',
|
||||
'CompVis/stable-diffusion-v1-4',
|
||||
],
|
||||
)
|
||||
save_model_as_dropdown = gr.Dropdown(
|
||||
label='Save trained model as',
|
||||
choices=[
|
||||
'same as source model',
|
||||
'ckpt',
|
||||
'diffusers',
|
||||
'diffusers_safetensors',
|
||||
'safetensors',
|
||||
],
|
||||
value='same as source model',
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
v2_input = gr.Checkbox(label='v2', value=True)
|
||||
v_parameterization_input = gr.Checkbox(
|
||||
label='v_parameterization', value=False
|
||||
)
|
||||
model_list.change(
|
||||
set_pretrained_model_name_or_path_input,
|
||||
inputs=[model_list, v2_input, v_parameterization_input],
|
||||
outputs=[
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
],
|
||||
)
|
||||
with gr.Tab('Directories'):
|
||||
with gr.Row():
|
||||
train_dir_input = gr.Textbox(
|
||||
label='Training config folder',
|
||||
placeholder='folder where the training configuration files will be saved',
|
||||
)
|
||||
train_dir_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
train_dir_folder.click(
|
||||
get_folder_path, outputs=train_dir_input
|
||||
)
|
||||
|
||||
image_folder_input = gr.Textbox(
|
||||
label='Training Image folder',
|
||||
placeholder='folder where the training images are located',
|
||||
)
|
||||
image_folder_input_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
image_folder_input_folder.click(
|
||||
get_folder_path, outputs=image_folder_input
|
||||
)
|
||||
with gr.Row():
|
||||
output_dir_input = gr.Textbox(
|
||||
label='Output folder',
|
||||
placeholder='folder where the model will be saved',
|
||||
)
|
||||
output_dir_input_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
output_dir_input_folder.click(
|
||||
get_folder_path, outputs=output_dir_input
|
||||
)
|
||||
|
||||
logging_dir_input = gr.Textbox(
|
||||
label='Logging folder',
|
||||
placeholder='Optional: enable logging and output TensorBoard log to this folder',
|
||||
)
|
||||
logging_dir_input_folder = gr.Button(
|
||||
folder_symbol, elem_id='open_folder_small'
|
||||
)
|
||||
logging_dir_input_folder.click(
|
||||
get_folder_path, outputs=logging_dir_input
|
||||
)
|
||||
train_dir_input.change(
|
||||
remove_doublequote,
|
||||
inputs=[train_dir_input],
|
||||
outputs=[train_dir_input],
|
||||
)
|
||||
image_folder_input.change(
|
||||
remove_doublequote,
|
||||
inputs=[image_folder_input],
|
||||
outputs=[image_folder_input],
|
||||
)
|
||||
output_dir_input.change(
|
||||
remove_doublequote,
|
||||
inputs=[output_dir_input],
|
||||
outputs=[output_dir_input],
|
||||
)
|
||||
with gr.Tab('Training parameters'):
|
||||
with gr.Row():
|
||||
learning_rate_input = gr.Textbox(
|
||||
label='Learning rate', value=1e-6
|
||||
)
|
||||
lr_scheduler_input = gr.Dropdown(
|
||||
label='LR Scheduler',
|
||||
choices=[
|
||||
'constant',
|
||||
'constant_with_warmup',
|
||||
'cosine',
|
||||
'cosine_with_restarts',
|
||||
'linear',
|
||||
'polynomial',
|
||||
],
|
||||
value='constant',
|
||||
)
|
||||
lr_warmup_input = gr.Textbox(label='LR warmup', value=0)
|
||||
with gr.Row():
|
||||
dataset_repeats_input = gr.Textbox(
|
||||
label='Dataset repeats', value=40
|
||||
)
|
||||
train_batch_size_input = gr.Slider(
|
||||
minimum=1,
|
||||
maximum=32,
|
||||
label='Train batch size',
|
||||
value=1,
|
||||
step=1,
|
||||
)
|
||||
epoch_input = gr.Textbox(label='Epoch', value=1)
|
||||
save_every_n_epochs_input = gr.Textbox(
|
||||
label='Save every N epochs', value=1
|
||||
)
|
||||
with gr.Row():
|
||||
mixed_precision_input = gr.Dropdown(
|
||||
label='Mixed precision',
|
||||
choices=[
|
||||
'no',
|
||||
'fp16',
|
||||
'bf16',
|
||||
],
|
||||
value='fp16',
|
||||
)
|
||||
save_precision_input = gr.Dropdown(
|
||||
label='Save precision',
|
||||
choices=[
|
||||
'float',
|
||||
'fp16',
|
||||
'bf16',
|
||||
],
|
||||
value='fp16',
|
||||
)
|
||||
num_cpu_threads_per_process_input = gr.Slider(
|
||||
minimum=1,
|
||||
maximum=os.cpu_count(),
|
||||
step=1,
|
||||
label='Number of CPU threads per process',
|
||||
value=os.cpu_count(),
|
||||
)
|
||||
with gr.Row():
|
||||
seed_input = gr.Textbox(label='Seed', value=1234)
|
||||
max_resolution_input = gr.Textbox(
|
||||
label='Max resolution', value='512,512'
|
||||
)
|
||||
with gr.Row():
|
||||
caption_extention_input = gr.Textbox(
|
||||
label='Caption Extension',
|
||||
placeholder='(Optional) Extension for caption files. default: .txt',
|
||||
)
|
||||
train_text_encoder_input = gr.Checkbox(
|
||||
label='Train text encoder', value=True
|
||||
)
|
||||
with gr.Box():
|
||||
with gr.Row():
|
||||
create_caption = gr.Checkbox(
|
||||
label='Generate caption database', value=True
|
||||
)
|
||||
create_buckets = gr.Checkbox(
|
||||
label='Generate image buckets', value=True
|
||||
)
|
||||
train = gr.Checkbox(label='Train model', value=True)
|
||||
|
||||
button_run = gr.Button('Run')
|
||||
|
||||
button_run.click(
|
||||
train_model,
|
||||
inputs=[
|
||||
create_caption,
|
||||
create_buckets,
|
||||
train,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
)
|
||||
|
||||
button_open_config.click(
|
||||
open_config_file,
|
||||
inputs=[
|
||||
config_file_name,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
create_buckets,
|
||||
create_caption,
|
||||
train,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
outputs=[
|
||||
config_file_name,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
create_buckets,
|
||||
create_caption,
|
||||
train,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
)
|
||||
|
||||
button_save_config.click(
|
||||
save_configuration,
|
||||
inputs=[
|
||||
dummy_ft_false,
|
||||
config_file_name,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
create_buckets,
|
||||
create_caption,
|
||||
train,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
outputs=[config_file_name],
|
||||
)
|
||||
|
||||
button_save_as_config.click(
|
||||
save_configuration,
|
||||
inputs=[
|
||||
dummy_ft_true,
|
||||
config_file_name,
|
||||
pretrained_model_name_or_path_input,
|
||||
v2_input,
|
||||
v_parameterization_input,
|
||||
train_dir_input,
|
||||
image_folder_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
max_resolution_input,
|
||||
learning_rate_input,
|
||||
lr_scheduler_input,
|
||||
lr_warmup_input,
|
||||
dataset_repeats_input,
|
||||
train_batch_size_input,
|
||||
epoch_input,
|
||||
save_every_n_epochs_input,
|
||||
mixed_precision_input,
|
||||
save_precision_input,
|
||||
seed_input,
|
||||
num_cpu_threads_per_process_input,
|
||||
train_text_encoder_input,
|
||||
create_buckets,
|
||||
create_caption,
|
||||
train,
|
||||
save_model_as_dropdown,
|
||||
caption_extention_input,
|
||||
],
|
||||
outputs=[config_file_name],
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# torch.cuda.set_per_process_memory_fraction(0.48)
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--username", type=str, default='', help="Username for authentication")
|
||||
parser.add_argument("--password", type=str, default='', help="Password for authentication")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
UI(username=args.username, password=args.password)
|
||||
# torch.cuda.set_per_process_memory_fraction(0.48)
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--username', type=str, default='', help='Username for authentication'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--password', type=str, default='', help='Password for authentication'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
UI(username=args.username, password=args.password)
|
||||
|
58
kohya_gui.py
Normal file
58
kohya_gui.py
Normal file
@ -0,0 +1,58 @@
|
||||
import gradio as gr
|
||||
import os
|
||||
import argparse
|
||||
from dreambooth_gui import dreambooth_tab
|
||||
from finetune_gui import finetune_tab
|
||||
from library.utilities import utilities_tab
|
||||
|
||||
|
||||
def UI(username, password):
|
||||
|
||||
css = ''
|
||||
|
||||
if os.path.exists('./style.css'):
|
||||
with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:
|
||||
print('Load CSS...')
|
||||
css += file.read() + '\n'
|
||||
|
||||
interface = gr.Blocks(css=css)
|
||||
|
||||
with interface:
|
||||
with gr.Tab('Dreambooth'):
|
||||
(
|
||||
train_data_dir_input,
|
||||
reg_data_dir_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
) = dreambooth_tab()
|
||||
with gr.Tab('Finetune'):
|
||||
finetune_tab()
|
||||
with gr.Tab('Utilities'):
|
||||
utilities_tab(
|
||||
train_data_dir_input=train_data_dir_input,
|
||||
reg_data_dir_input=reg_data_dir_input,
|
||||
output_dir_input=output_dir_input,
|
||||
logging_dir_input=logging_dir_input,
|
||||
enable_copy_info_button=True,
|
||||
)
|
||||
|
||||
# Show the interface
|
||||
if not username == '':
|
||||
interface.launch(auth=(username, password))
|
||||
else:
|
||||
interface.launch()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# torch.cuda.set_per_process_memory_fraction(0.48)
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--username', type=str, default='', help='Username for authentication'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--password', type=str, default='', help='Password for authentication'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
UI(username=args.username, password=args.password)
|
@ -72,10 +72,13 @@ def get_saveasfile_path(file_path='', defaultextension='.json'):
|
||||
def add_pre_postfix(
|
||||
folder='', prefix='', postfix='', caption_file_ext='.caption'
|
||||
):
|
||||
if prefix == '' and postfix == '':
|
||||
return
|
||||
|
||||
# set caption extention to default in case it was not provided
|
||||
if caption_file_ext == '':
|
||||
caption_file_ext = '.caption'
|
||||
|
||||
|
||||
files = [f for f in os.listdir(folder) if f.endswith(caption_file_ext)]
|
||||
if not prefix == '':
|
||||
prefix = f'{prefix} '
|
||||
|
@ -51,7 +51,12 @@ def dataset_balancing(concept_repeats, folder, insecure):
|
||||
if match:
|
||||
# Multiply the repeats value by the number inside the braces
|
||||
if not images == 0:
|
||||
repeats = max(1, round(concept_repeats / images * float(match.group(1))))
|
||||
repeats = max(
|
||||
1,
|
||||
round(
|
||||
concept_repeats / images * float(match.group(1))
|
||||
),
|
||||
)
|
||||
else:
|
||||
repeats = 0
|
||||
subdir = subdir[match.end() :]
|
||||
@ -95,7 +100,7 @@ def warning(insecure):
|
||||
|
||||
|
||||
def gradio_dataset_balancing_tab():
|
||||
with gr.Tab('Dataset balancing'):
|
||||
with gr.Tab('Dreambooth Dataset balancing'):
|
||||
gr.Markdown(
|
||||
'This utility will ensure that each concept folder in the dataset folder is used equally during the training process of the dreambooth machine learning model, regardless of the number of images in each folder. It will do this by renaming the concept folders to indicate the number of times they should be repeated during training.'
|
||||
)
|
||||
|
@ -68,31 +68,31 @@ def dreambooth_folder_preparation(
|
||||
print(f'Copy {util_training_images_dir_input} to {training_dir}...')
|
||||
shutil.copytree(util_training_images_dir_input, training_dir)
|
||||
|
||||
# Create the regularization_dir path
|
||||
if (
|
||||
util_class_prompt_input == ''
|
||||
or not util_regularization_images_repeat_input > 0
|
||||
):
|
||||
print(
|
||||
'Regularization images directory or repeats is missing... not copying regularisation images...'
|
||||
)
|
||||
if not util_regularization_images_dir_input == '':
|
||||
# Create the regularization_dir path
|
||||
if not util_regularization_images_repeat_input > 0:
|
||||
print('Repeats is missing... not copying regularisation images...')
|
||||
else:
|
||||
regularization_dir = os.path.join(
|
||||
util_training_dir_output,
|
||||
f'reg/{int(util_regularization_images_repeat_input)}_{util_class_prompt_input}',
|
||||
)
|
||||
|
||||
# Remove folders if they exist
|
||||
if os.path.exists(regularization_dir):
|
||||
print(f'Removing existing directory {regularization_dir}...')
|
||||
shutil.rmtree(regularization_dir)
|
||||
|
||||
# Copy the regularisation images to their respective directories
|
||||
print(
|
||||
f'Copy {util_regularization_images_dir_input} to {regularization_dir}...'
|
||||
)
|
||||
shutil.copytree(
|
||||
util_regularization_images_dir_input, regularization_dir
|
||||
)
|
||||
else:
|
||||
regularization_dir = os.path.join(
|
||||
util_training_dir_output,
|
||||
f'reg/{int(util_regularization_images_repeat_input)}_{util_class_prompt_input}',
|
||||
)
|
||||
|
||||
# Remove folders if they exist
|
||||
if os.path.exists(regularization_dir):
|
||||
print(f'Removing existing directory {regularization_dir}...')
|
||||
shutil.rmtree(regularization_dir)
|
||||
|
||||
# Copy the regularisation images to their respective directories
|
||||
print(
|
||||
f'Copy {util_regularization_images_dir_input} to {regularization_dir}...'
|
||||
)
|
||||
shutil.copytree(
|
||||
util_regularization_images_dir_input, regularization_dir
|
||||
'Regularization images directory is missing... not copying regularisation images...'
|
||||
)
|
||||
|
||||
# create log and model folder
|
||||
@ -110,10 +110,11 @@ def dreambooth_folder_preparation(
|
||||
|
||||
|
||||
def gradio_dreambooth_folder_creation_tab(
|
||||
train_data_dir_input,
|
||||
reg_data_dir_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
train_data_dir_input=gr.Textbox(),
|
||||
reg_data_dir_input=gr.Textbox(),
|
||||
output_dir_input=gr.Textbox(),
|
||||
logging_dir_input=gr.Textbox(),
|
||||
enable_copy_info_button=bool(False),
|
||||
):
|
||||
with gr.Tab('Dreambooth folder preparation'):
|
||||
gr.Markdown(
|
||||
@ -191,16 +192,17 @@ def gradio_dreambooth_folder_creation_tab(
|
||||
util_training_dir_output,
|
||||
],
|
||||
)
|
||||
button_copy_info_to_Directories_tab = gr.Button(
|
||||
'Copy info to Directories Tab'
|
||||
)
|
||||
button_copy_info_to_Directories_tab.click(
|
||||
copy_info_to_Directories_tab,
|
||||
inputs=[util_training_dir_output],
|
||||
outputs=[
|
||||
train_data_dir_input,
|
||||
reg_data_dir_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
],
|
||||
)
|
||||
if enable_copy_info_button:
|
||||
button_copy_info_to_Directories_tab = gr.Button(
|
||||
'Copy info to Directories Tab'
|
||||
)
|
||||
button_copy_info_to_Directories_tab.click(
|
||||
copy_info_to_Directories_tab,
|
||||
inputs=[util_training_dir_output],
|
||||
outputs=[
|
||||
train_data_dir_input,
|
||||
reg_data_dir_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
],
|
||||
)
|
||||
|
84
library/utilities.py
Normal file
84
library/utilities.py
Normal file
@ -0,0 +1,84 @@
|
||||
# v1: initial release
|
||||
# v2: add open and save folder icons
|
||||
# v3: Add new Utilities tab for Dreambooth folder preparation
|
||||
# v3.1: Adding captionning of images to utilities
|
||||
|
||||
import gradio as gr
|
||||
import os
|
||||
import argparse
|
||||
from library.dreambooth_folder_creation_gui import (
|
||||
gradio_dreambooth_folder_creation_tab,
|
||||
)
|
||||
from library.basic_caption_gui import gradio_basic_caption_gui_tab
|
||||
from library.convert_model_gui import gradio_convert_model_tab
|
||||
from library.blip_caption_gui import gradio_blip_caption_gui_tab
|
||||
from library.wd14_caption_gui import gradio_wd14_caption_gui_tab
|
||||
from library.dataset_balancing_gui import gradio_dataset_balancing_tab
|
||||
|
||||
|
||||
def utilities_tab(
|
||||
train_data_dir_input=gr.Textbox(),
|
||||
reg_data_dir_input=gr.Textbox(),
|
||||
output_dir_input=gr.Textbox(),
|
||||
logging_dir_input=gr.Textbox(),
|
||||
enable_copy_info_button=bool(False),
|
||||
enable_dreambooth_tab=True,
|
||||
):
|
||||
with gr.Tab('Captioning'):
|
||||
gradio_basic_caption_gui_tab()
|
||||
gradio_blip_caption_gui_tab()
|
||||
gradio_wd14_caption_gui_tab()
|
||||
if enable_dreambooth_tab:
|
||||
with gr.Tab('Dreambooth'):
|
||||
gr.Markdown('This section provide Dreambooth specific tools.')
|
||||
gradio_dreambooth_folder_creation_tab(
|
||||
train_data_dir_input=train_data_dir_input,
|
||||
reg_data_dir_input=reg_data_dir_input,
|
||||
output_dir_input=output_dir_input,
|
||||
logging_dir_input=logging_dir_input,
|
||||
enable_copy_info_button=enable_copy_info_button,
|
||||
)
|
||||
gradio_dataset_balancing_tab()
|
||||
gradio_convert_model_tab()
|
||||
|
||||
return (
|
||||
train_data_dir_input,
|
||||
reg_data_dir_input,
|
||||
output_dir_input,
|
||||
logging_dir_input,
|
||||
)
|
||||
|
||||
|
||||
def UI(username, password):
|
||||
css = ''
|
||||
|
||||
if os.path.exists('./style.css'):
|
||||
with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:
|
||||
print('Load CSS...')
|
||||
css += file.read() + '\n'
|
||||
|
||||
interface = gr.Blocks(css=css)
|
||||
|
||||
with interface:
|
||||
utilities_tab()
|
||||
|
||||
# Show the interface
|
||||
if not username == '':
|
||||
interface.launch(auth=(username, password))
|
||||
else:
|
||||
interface.launch()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# torch.cuda.set_per_process_memory_fraction(0.48)
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--username', type=str, default='', help='Username for authentication'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--password', type=str, default='', help='Password for authentication'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
UI(username=args.username, password=args.password)
|
Loading…
Reference in New Issue
Block a user