Add authentication support

This commit is contained in:
bmaltais 2022-12-21 09:05:06 -05:00
parent 1bc5089db8
commit aa5d13f9a7
3 changed files with 888 additions and 853 deletions

View File

@ -10,4 +10,9 @@ You can find the dreambooth solution spercific [Dreambooth README](README_dreamb
## Finetune
You can find the finetune solution spercific [Finetune README](README_finetune.md)
You can find the finetune solution spercific [Finetune README](README_finetune.md)
## Change history
* 12/21 (v9.7) update:
- add optional GUI authentication support via: `python fine_tune.py --username=<name> --password=<password>`

View File

@ -10,6 +10,7 @@ import os
import subprocess
import pathlib
import shutil
import argparse
from library.dreambooth_folder_creation_gui import (
gradio_dreambooth_folder_creation_tab,
)
@ -472,491 +473,505 @@ def set_pretrained_model_name_or_path_input(value, v2, v_parameterization):
return value, v2, v_parameterization
def UI(username, password):
css = ''
css = ''
if os.path.exists('./style.css'):
with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:
print('Load CSS...')
css += file.read() + '\n'
if os.path.exists('./style.css'):
with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:
print('Load CSS...')
css += file.read() + '\n'
interface = gr.Blocks(css=css)
interface = gr.Blocks(css=css)
with interface:
dummy_true = gr.Label(value=True, visible=False)
dummy_false = gr.Label(value=False, visible=False)
with gr.Tab('Dreambooth'):
gr.Markdown('Enter kohya finetuner parameter using this interface.')
with gr.Accordion('Configuration file', open=False):
with gr.Row():
button_open_config = gr.Button('Open 📂', elem_id='open_folder')
button_save_config = gr.Button('Save 💾', elem_id='open_folder')
button_save_as_config = gr.Button(
'Save as... 💾', elem_id='open_folder'
)
config_file_name = gr.Textbox(
label='',
placeholder="type the configuration file path or use the 'Open' button above to select it...",
interactive=True,
)
# config_file_name.change(
# remove_doublequote,
# inputs=[config_file_name],
# outputs=[config_file_name],
# )
with gr.Tab('Source model'):
# Define the input elements
with gr.Row():
pretrained_model_name_or_path_input = gr.Textbox(
label='Pretrained model name or path',
placeholder='enter the path to custom model or name of pretrained model',
)
pretrained_model_name_or_path_fille = gr.Button(
document_symbol, elem_id='open_folder_small'
)
pretrained_model_name_or_path_fille.click(
get_file_path,
with interface:
dummy_true = gr.Label(value=True, visible=False)
dummy_false = gr.Label(value=False, visible=False)
with gr.Tab('Dreambooth'):
gr.Markdown('Enter kohya finetuner parameter using this interface.')
with gr.Accordion('Configuration file', open=False):
with gr.Row():
button_open_config = gr.Button('Open 📂', elem_id='open_folder')
button_save_config = gr.Button('Save 💾', elem_id='open_folder')
button_save_as_config = gr.Button(
'Save as... 💾', elem_id='open_folder'
)
config_file_name = gr.Textbox(
label='',
placeholder="type the configuration file path or use the 'Open' button above to select it...",
interactive=True,
)
# config_file_name.change(
# remove_doublequote,
# inputs=[config_file_name],
# outputs=[config_file_name],
# )
with gr.Tab('Source model'):
# Define the input elements
with gr.Row():
pretrained_model_name_or_path_input = gr.Textbox(
label='Pretrained model name or path',
placeholder='enter the path to custom model or name of pretrained model',
)
pretrained_model_name_or_path_fille = gr.Button(
document_symbol, elem_id='open_folder_small'
)
pretrained_model_name_or_path_fille.click(
get_file_path,
inputs=[pretrained_model_name_or_path_input],
outputs=pretrained_model_name_or_path_input,
)
pretrained_model_name_or_path_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
pretrained_model_name_or_path_folder.click(
get_folder_path,
outputs=pretrained_model_name_or_path_input,
)
model_list = gr.Dropdown(
label='(Optional) Model Quick Pick',
choices=[
'custom',
'stabilityai/stable-diffusion-2-1-base',
'stabilityai/stable-diffusion-2-base',
'stabilityai/stable-diffusion-2-1',
'stabilityai/stable-diffusion-2',
'runwayml/stable-diffusion-v1-5',
'CompVis/stable-diffusion-v1-4',
],
)
save_model_as_dropdown = gr.Dropdown(
label='Save trained model as',
choices=[
'same as source model',
'ckpt',
'diffusers',
'diffusers_safetensors',
'safetensors',
],
value='same as source model',
)
with gr.Row():
v2_input = gr.Checkbox(label='v2', value=True)
v_parameterization_input = gr.Checkbox(
label='v_parameterization', value=False
)
pretrained_model_name_or_path_input.change(
remove_doublequote,
inputs=[pretrained_model_name_or_path_input],
outputs=pretrained_model_name_or_path_input,
outputs=[pretrained_model_name_or_path_input],
)
pretrained_model_name_or_path_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
pretrained_model_name_or_path_folder.click(
get_folder_path,
outputs=pretrained_model_name_or_path_input,
)
model_list = gr.Dropdown(
label='(Optional) Model Quick Pick',
choices=[
'custom',
'stabilityai/stable-diffusion-2-1-base',
'stabilityai/stable-diffusion-2-base',
'stabilityai/stable-diffusion-2-1',
'stabilityai/stable-diffusion-2',
'runwayml/stable-diffusion-v1-5',
'CompVis/stable-diffusion-v1-4',
model_list.change(
set_pretrained_model_name_or_path_input,
inputs=[model_list, v2_input, v_parameterization_input],
outputs=[
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
],
)
save_model_as_dropdown = gr.Dropdown(
label='Save trained model as',
choices=[
'same as source model',
'ckpt',
'diffusers',
'diffusers_safetensors',
'safetensors',
],
value='same as source model',
)
with gr.Row():
v2_input = gr.Checkbox(label='v2', value=True)
v_parameterization_input = gr.Checkbox(
label='v_parameterization', value=False
)
pretrained_model_name_or_path_input.change(
remove_doublequote,
inputs=[pretrained_model_name_or_path_input],
outputs=[pretrained_model_name_or_path_input],
)
model_list.change(
set_pretrained_model_name_or_path_input,
inputs=[model_list, v2_input, v_parameterization_input],
outputs=[
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
],
)
with gr.Tab('Directories'):
with gr.Row():
train_data_dir_input = gr.Textbox(
label='Image folder',
placeholder='Folder where the training folders containing the images are located',
)
train_data_dir_input_folder = gr.Button(
'📂', elem_id='open_folder_small'
)
train_data_dir_input_folder.click(
get_folder_path, outputs=train_data_dir_input
)
reg_data_dir_input = gr.Textbox(
label='Regularisation folder',
placeholder='(Optional) Folder where where the regularization folders containing the images are located',
)
reg_data_dir_input_folder = gr.Button(
'📂', elem_id='open_folder_small'
)
reg_data_dir_input_folder.click(
get_folder_path, outputs=reg_data_dir_input
)
with gr.Row():
output_dir_input = gr.Textbox(
label='Output folder',
placeholder='Folder to output trained model',
)
output_dir_input_folder = gr.Button(
'📂', elem_id='open_folder_small'
)
output_dir_input_folder.click(
get_folder_path, outputs=output_dir_input
)
logging_dir_input = gr.Textbox(
label='Logging folder',
placeholder='Optional: enable logging and output TensorBoard log to this folder',
)
logging_dir_input_folder = gr.Button(
'📂', elem_id='open_folder_small'
)
logging_dir_input_folder.click(
get_folder_path, outputs=logging_dir_input
)
train_data_dir_input.change(
remove_doublequote,
inputs=[train_data_dir_input],
outputs=[train_data_dir_input],
)
reg_data_dir_input.change(
remove_doublequote,
inputs=[reg_data_dir_input],
outputs=[reg_data_dir_input],
)
output_dir_input.change(
remove_doublequote,
inputs=[output_dir_input],
outputs=[output_dir_input],
)
logging_dir_input.change(
remove_doublequote,
inputs=[logging_dir_input],
outputs=[logging_dir_input],
)
with gr.Tab('Training parameters'):
with gr.Row():
learning_rate_input = gr.Textbox(
label='Learning rate', value=1e-6
)
lr_scheduler_input = gr.Dropdown(
label='LR Scheduler',
choices=[
'constant',
'constant_with_warmup',
'cosine',
'cosine_with_restarts',
'linear',
'polynomial',
],
value='constant',
)
lr_warmup_input = gr.Textbox(label='LR warmup', value=0)
with gr.Row():
train_batch_size_input = gr.Slider(
minimum=1,
maximum=32,
label='Train batch size',
value=1,
step=1,
)
epoch_input = gr.Textbox(label='Epoch', value=1)
save_every_n_epochs_input = gr.Textbox(
label='Save every N epochs', value=1
)
with gr.Row():
mixed_precision_input = gr.Dropdown(
label='Mixed precision',
choices=[
'no',
'fp16',
'bf16',
],
value='fp16',
)
save_precision_input = gr.Dropdown(
label='Save precision',
choices=[
'float',
'fp16',
'bf16',
],
value='fp16',
)
num_cpu_threads_per_process_input = gr.Slider(
minimum=1,
maximum=os.cpu_count(),
step=1,
label='Number of CPU threads per process',
value=os.cpu_count(),
)
with gr.Row():
seed_input = gr.Textbox(label='Seed', value=1234)
max_resolution_input = gr.Textbox(
label='Max resolution',
value='512,512',
placeholder='512,512',
)
with gr.Row():
caption_extention_input = gr.Textbox(
label='Caption Extension',
placeholder='(Optional) Extension for caption files. default: .caption',
)
stop_text_encoder_training_input = gr.Slider(
minimum=0,
maximum=100,
value=0,
step=1,
label='Stop text encoder training',
)
with gr.Row():
enable_bucket_input = gr.Checkbox(
label='Enable buckets', value=True
)
cache_latent_input = gr.Checkbox(
label='Cache latent', value=True
)
use_8bit_adam_input = gr.Checkbox(
label='Use 8bit adam', value=True
)
xformers_input = gr.Checkbox(label='Use xformers', value=True)
with gr.Accordion('Advanced Configuration', open=False):
with gr.Tab('Directories'):
with gr.Row():
full_fp16_input = gr.Checkbox(
label='Full fp16 training (experimental)', value=False
train_data_dir_input = gr.Textbox(
label='Image folder',
placeholder='Folder where the training folders containing the images are located',
)
no_token_padding_input = gr.Checkbox(
label='No token padding', value=False
train_data_dir_input_folder = gr.Button(
'📂', elem_id='open_folder_small'
)
gradient_checkpointing_input = gr.Checkbox(
label='Gradient checkpointing', value=False
train_data_dir_input_folder.click(
get_folder_path, outputs=train_data_dir_input
)
shuffle_caption = gr.Checkbox(
label='Shuffle caption', value=False
reg_data_dir_input = gr.Textbox(
label='Regularisation folder',
placeholder='(Optional) Folder where where the regularization folders containing the images are located',
)
reg_data_dir_input_folder = gr.Button(
'📂', elem_id='open_folder_small'
)
reg_data_dir_input_folder.click(
get_folder_path, outputs=reg_data_dir_input
)
save_state = gr.Checkbox(label='Save training state', value=False)
with gr.Row():
resume = gr.Textbox(
label='Resume from saved training state',
placeholder='path to "last-state" state folder to resume from',
output_dir_input = gr.Textbox(
label='Output folder',
placeholder='Folder to output trained model',
)
resume_button = gr.Button('📂', elem_id='open_folder_small')
resume_button.click(get_folder_path, outputs=resume)
prior_loss_weight = gr.Number(
label='Prior loss weight', value=1.0
output_dir_input_folder = gr.Button(
'📂', elem_id='open_folder_small'
)
output_dir_input_folder.click(
get_folder_path, outputs=output_dir_input
)
logging_dir_input = gr.Textbox(
label='Logging folder',
placeholder='Optional: enable logging and output TensorBoard log to this folder',
)
logging_dir_input_folder = gr.Button(
'📂', elem_id='open_folder_small'
)
logging_dir_input_folder.click(
get_folder_path, outputs=logging_dir_input
)
train_data_dir_input.change(
remove_doublequote,
inputs=[train_data_dir_input],
outputs=[train_data_dir_input],
)
reg_data_dir_input.change(
remove_doublequote,
inputs=[reg_data_dir_input],
outputs=[reg_data_dir_input],
)
output_dir_input.change(
remove_doublequote,
inputs=[output_dir_input],
outputs=[output_dir_input],
)
logging_dir_input.change(
remove_doublequote,
inputs=[logging_dir_input],
outputs=[logging_dir_input],
)
with gr.Tab('Training parameters'):
with gr.Row():
learning_rate_input = gr.Textbox(
label='Learning rate', value=1e-6
)
lr_scheduler_input = gr.Dropdown(
label='LR Scheduler',
choices=[
'constant',
'constant_with_warmup',
'cosine',
'cosine_with_restarts',
'linear',
'polynomial',
],
value='constant',
)
lr_warmup_input = gr.Textbox(label='LR warmup', value=0)
with gr.Row():
train_batch_size_input = gr.Slider(
minimum=1,
maximum=32,
label='Train batch size',
value=1,
step=1,
)
epoch_input = gr.Textbox(label='Epoch', value=1)
save_every_n_epochs_input = gr.Textbox(
label='Save every N epochs', value=1
)
with gr.Row():
mixed_precision_input = gr.Dropdown(
label='Mixed precision',
choices=[
'no',
'fp16',
'bf16',
],
value='fp16',
)
save_precision_input = gr.Dropdown(
label='Save precision',
choices=[
'float',
'fp16',
'bf16',
],
value='fp16',
)
num_cpu_threads_per_process_input = gr.Slider(
minimum=1,
maximum=os.cpu_count(),
step=1,
label='Number of CPU threads per process',
value=os.cpu_count(),
)
with gr.Row():
seed_input = gr.Textbox(label='Seed', value=1234)
max_resolution_input = gr.Textbox(
label='Max resolution',
value='512,512',
placeholder='512,512',
)
with gr.Row():
caption_extention_input = gr.Textbox(
label='Caption Extension',
placeholder='(Optional) Extension for caption files. default: .caption',
)
stop_text_encoder_training_input = gr.Slider(
minimum=0,
maximum=100,
value=0,
step=1,
label='Stop text encoder training',
)
with gr.Row():
enable_bucket_input = gr.Checkbox(
label='Enable buckets', value=True
)
cache_latent_input = gr.Checkbox(
label='Cache latent', value=True
)
use_8bit_adam_input = gr.Checkbox(
label='Use 8bit adam', value=True
)
xformers_input = gr.Checkbox(label='Use xformers', value=True)
with gr.Accordion('Advanced Configuration', open=False):
with gr.Row():
full_fp16_input = gr.Checkbox(
label='Full fp16 training (experimental)', value=False
)
no_token_padding_input = gr.Checkbox(
label='No token padding', value=False
)
button_run = gr.Button('Train model')
gradient_checkpointing_input = gr.Checkbox(
label='Gradient checkpointing', value=False
)
with gr.Tab('Utilities'):
with gr.Tab('Captioning'):
gradio_basic_caption_gui_tab()
gradio_blip_caption_gui_tab()
gradio_wd14_caption_gui_tab()
gradio_dreambooth_folder_creation_tab(
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
logging_dir_input,
shuffle_caption = gr.Checkbox(
label='Shuffle caption', value=False
)
save_state = gr.Checkbox(label='Save training state', value=False)
with gr.Row():
resume = gr.Textbox(
label='Resume from saved training state',
placeholder='path to "last-state" state folder to resume from',
)
resume_button = gr.Button('📂', elem_id='open_folder_small')
resume_button.click(get_folder_path, outputs=resume)
prior_loss_weight = gr.Number(
label='Prior loss weight', value=1.0
)
button_run = gr.Button('Train model')
with gr.Tab('Utilities'):
with gr.Tab('Captioning'):
gradio_basic_caption_gui_tab()
gradio_blip_caption_gui_tab()
gradio_wd14_caption_gui_tab()
gradio_dreambooth_folder_creation_tab(
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
logging_dir_input,
)
gradio_dataset_balancing_tab()
gradio_convert_model_tab()
button_open_config.click(
open_configuration,
inputs=[
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
outputs=[
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
)
gradio_dataset_balancing_tab()
gradio_convert_model_tab()
button_open_config.click(
open_configuration,
inputs=[
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
outputs=[
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
)
save_as = True
not_save_as = False
button_save_config.click(
save_configuration,
inputs=[
dummy_false,
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
outputs=[config_file_name],
)
save_as = True
not_save_as = False
button_save_config.click(
save_configuration,
inputs=[
dummy_false,
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
outputs=[config_file_name],
)
button_save_as_config.click(
save_configuration,
inputs=[
dummy_true,
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
outputs=[config_file_name],
)
button_save_as_config.click(
save_configuration,
inputs=[
dummy_true,
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
outputs=[config_file_name],
)
button_run.click(
train_model,
inputs=[
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
)
button_run.click(
train_model,
inputs=[
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
logging_dir_input,
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
cache_latent_input,
caption_extention_input,
enable_bucket_input,
gradient_checkpointing_input,
full_fp16_input,
no_token_padding_input,
stop_text_encoder_training_input,
use_8bit_adam_input,
xformers_input,
save_model_as_dropdown,
shuffle_caption,
save_state,
resume,
prior_loss_weight,
],
)
# Show the interface
if not username == '':
interface.launch(auth=(username, password))
else:
interface.launch()
# Show the interface
interface.launch()
if __name__ == '__main__':
# torch.cuda.set_per_process_memory_fraction(0.48)
parser = argparse.ArgumentParser()
parser.add_argument("--username", type=str, default='', help="Username for authentication")
parser.add_argument("--password", type=str, default='', help="Password for authentication")
args = parser.parse_args()
UI(username=args.username, password=args.password)

View File

@ -5,6 +5,7 @@ import os
import subprocess
import pathlib
import shutil
import argparse
# from easygui import fileopenbox, filesavebox, diropenbox, msgbox
from library.basic_caption_gui import gradio_basic_caption_gui_tab
@ -99,9 +100,8 @@ def save_configuration(
}
# Save the data to the selected file
# with open(file_path, 'w') as file:
# json.dump(variables, file)
# msgbox('File was saved...')
with open(file_path, 'w') as file:
json.dump(variables, file)
return file_path
@ -386,410 +386,425 @@ def remove_doublequote(file_path):
return file_path
def UI(username, password):
css = ''
css = ''
if os.path.exists('./style.css'):
with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:
print('Load CSS...')
css += file.read() + '\n'
if os.path.exists('./style.css'):
with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:
print('Load CSS...')
css += file.read() + '\n'
interface = gr.Blocks(css=css)
interface = gr.Blocks(css=css)
with interface:
dummy_true = gr.Label(value=True, visible=False)
dummy_false = gr.Label(value=False, visible=False)
with gr.Tab('Finetuning'):
gr.Markdown('Enter kohya finetuner parameter using this interface.')
with gr.Accordion('Configuration File Load/Save', open=False):
with gr.Row():
button_open_config = gr.Button(
f'Open {folder_symbol}', elem_id='open_folder'
with interface:
dummy_true = gr.Label(value=True, visible=False)
dummy_false = gr.Label(value=False, visible=False)
with gr.Tab('Finetuning'):
gr.Markdown('Enter kohya finetuner parameter using this interface.')
with gr.Accordion('Configuration File Load/Save', open=False):
with gr.Row():
button_open_config = gr.Button(
f'Open {folder_symbol}', elem_id='open_folder'
)
button_save_config = gr.Button(
f'Save {save_style_symbol}', elem_id='open_folder'
)
button_save_as_config = gr.Button(
f'Save as... {save_style_symbol}', elem_id='open_folder'
)
config_file_name = gr.Textbox(
label='', placeholder='type file path or use buttons...'
)
button_save_config = gr.Button(
f'Save {save_style_symbol}', elem_id='open_folder'
config_file_name.change(
remove_doublequote,
inputs=[config_file_name],
outputs=[config_file_name],
)
button_save_as_config = gr.Button(
f'Save as... {save_style_symbol}', elem_id='open_folder'
)
config_file_name = gr.Textbox(
label='', placeholder='type file path or use buttons...'
)
config_file_name.change(
remove_doublequote,
inputs=[config_file_name],
outputs=[config_file_name],
)
with gr.Tab('Source model'):
# Define the input elements
with gr.Row():
pretrained_model_name_or_path_input = gr.Textbox(
label='Pretrained model name or path',
placeholder='enter the path to custom model or name of pretrained model',
)
pretrained_model_name_or_path_file = gr.Button(
document_symbol, elem_id='open_folder_small'
)
pretrained_model_name_or_path_file.click(
get_file_path,
inputs=pretrained_model_name_or_path_input,
outputs=pretrained_model_name_or_path_input,
)
pretrained_model_name_or_path_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
pretrained_model_name_or_path_folder.click(
get_folder_path,
inputs=pretrained_model_name_or_path_input,
outputs=pretrained_model_name_or_path_input,
)
model_list = gr.Dropdown(
label='(Optional) Model Quick Pick',
choices=[
'custom',
'stabilityai/stable-diffusion-2-1-base',
'stabilityai/stable-diffusion-2-base',
'stabilityai/stable-diffusion-2-1',
'stabilityai/stable-diffusion-2',
'runwayml/stable-diffusion-v1-5',
'CompVis/stable-diffusion-v1-4',
with gr.Tab('Source model'):
# Define the input elements
with gr.Row():
pretrained_model_name_or_path_input = gr.Textbox(
label='Pretrained model name or path',
placeholder='enter the path to custom model or name of pretrained model',
)
pretrained_model_name_or_path_file = gr.Button(
document_symbol, elem_id='open_folder_small'
)
pretrained_model_name_or_path_file.click(
get_file_path,
inputs=pretrained_model_name_or_path_input,
outputs=pretrained_model_name_or_path_input,
)
pretrained_model_name_or_path_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
pretrained_model_name_or_path_folder.click(
get_folder_path,
inputs=pretrained_model_name_or_path_input,
outputs=pretrained_model_name_or_path_input,
)
model_list = gr.Dropdown(
label='(Optional) Model Quick Pick',
choices=[
'custom',
'stabilityai/stable-diffusion-2-1-base',
'stabilityai/stable-diffusion-2-base',
'stabilityai/stable-diffusion-2-1',
'stabilityai/stable-diffusion-2',
'runwayml/stable-diffusion-v1-5',
'CompVis/stable-diffusion-v1-4',
],
)
save_model_as_dropdown = gr.Dropdown(
label='Save trained model as',
choices=[
'same as source model',
'ckpt',
'diffusers',
'diffusers_safetensors',
'safetensors',
],
value='same as source model',
)
with gr.Row():
v2_input = gr.Checkbox(label='v2', value=True)
v_parameterization_input = gr.Checkbox(
label='v_parameterization', value=False
)
model_list.change(
set_pretrained_model_name_or_path_input,
inputs=[model_list, v2_input, v_parameterization_input],
outputs=[
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
],
)
save_model_as_dropdown = gr.Dropdown(
label='Save trained model as',
choices=[
'same as source model',
'ckpt',
'diffusers',
'diffusers_safetensors',
'safetensors',
],
value='same as source model',
)
with gr.Tab('Directories'):
with gr.Row():
train_dir_input = gr.Textbox(
label='Training config folder',
placeholder='folder where the training configuration files will be saved',
)
train_dir_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
train_dir_folder.click(
get_folder_path, outputs=train_dir_input
)
with gr.Row():
v2_input = gr.Checkbox(label='v2', value=True)
v_parameterization_input = gr.Checkbox(
label='v_parameterization', value=False
image_folder_input = gr.Textbox(
label='Training Image folder',
placeholder='folder where the training images are located',
)
image_folder_input_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
image_folder_input_folder.click(
get_folder_path, outputs=image_folder_input
)
with gr.Row():
output_dir_input = gr.Textbox(
label='Output folder',
placeholder='folder where the model will be saved',
)
output_dir_input_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
output_dir_input_folder.click(
get_folder_path, outputs=output_dir_input
)
logging_dir_input = gr.Textbox(
label='Logging folder',
placeholder='Optional: enable logging and output TensorBoard log to this folder',
)
logging_dir_input_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
logging_dir_input_folder.click(
get_folder_path, outputs=logging_dir_input
)
train_dir_input.change(
remove_doublequote,
inputs=[train_dir_input],
outputs=[train_dir_input],
)
model_list.change(
set_pretrained_model_name_or_path_input,
inputs=[model_list, v2_input, v_parameterization_input],
outputs=[
image_folder_input.change(
remove_doublequote,
inputs=[image_folder_input],
outputs=[image_folder_input],
)
output_dir_input.change(
remove_doublequote,
inputs=[output_dir_input],
outputs=[output_dir_input],
)
with gr.Tab('Training parameters'):
with gr.Row():
learning_rate_input = gr.Textbox(
label='Learning rate', value=1e-6
)
lr_scheduler_input = gr.Dropdown(
label='LR Scheduler',
choices=[
'constant',
'constant_with_warmup',
'cosine',
'cosine_with_restarts',
'linear',
'polynomial',
],
value='constant',
)
lr_warmup_input = gr.Textbox(label='LR warmup', value=0)
with gr.Row():
dataset_repeats_input = gr.Textbox(
label='Dataset repeats', value=40
)
train_batch_size_input = gr.Slider(
minimum=1,
maximum=32,
label='Train batch size',
value=1,
step=1,
)
epoch_input = gr.Textbox(label='Epoch', value=1)
save_every_n_epochs_input = gr.Textbox(
label='Save every N epochs', value=1
)
with gr.Row():
mixed_precision_input = gr.Dropdown(
label='Mixed precision',
choices=[
'no',
'fp16',
'bf16',
],
value='fp16',
)
save_precision_input = gr.Dropdown(
label='Save precision',
choices=[
'float',
'fp16',
'bf16',
],
value='fp16',
)
num_cpu_threads_per_process_input = gr.Slider(
minimum=1,
maximum=os.cpu_count(),
step=1,
label='Number of CPU threads per process',
value=os.cpu_count(),
)
with gr.Row():
seed_input = gr.Textbox(label='Seed', value=1234)
max_resolution_input = gr.Textbox(
label='Max resolution', value='512,512'
)
with gr.Row():
caption_extention_input = gr.Textbox(
label='Caption Extension',
placeholder='(Optional) Extension for caption files. default: .txt',
)
train_text_encoder_input = gr.Checkbox(
label='Train text encoder', value=True
)
with gr.Box():
with gr.Row():
create_caption = gr.Checkbox(
label='Generate caption database', value=True
)
create_buckets = gr.Checkbox(
label='Generate image buckets', value=True
)
train = gr.Checkbox(label='Train model', value=True)
button_run = gr.Button('Run')
button_run.click(
train_model,
inputs=[
create_caption,
create_buckets,
train,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
save_model_as_dropdown,
caption_extention_input,
],
)
with gr.Tab('Directories'):
with gr.Row():
train_dir_input = gr.Textbox(
label='Training config folder',
placeholder='folder where the training configuration files will be saved',
)
train_dir_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
train_dir_folder.click(
get_folder_path, outputs=train_dir_input
)
image_folder_input = gr.Textbox(
label='Training Image folder',
placeholder='folder where the training images are located',
)
image_folder_input_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
image_folder_input_folder.click(
get_folder_path, outputs=image_folder_input
)
with gr.Row():
output_dir_input = gr.Textbox(
label='Output folder',
placeholder='folder where the model will be saved',
)
output_dir_input_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
output_dir_input_folder.click(
get_folder_path, outputs=output_dir_input
)
logging_dir_input = gr.Textbox(
label='Logging folder',
placeholder='Optional: enable logging and output TensorBoard log to this folder',
)
logging_dir_input_folder = gr.Button(
folder_symbol, elem_id='open_folder_small'
)
logging_dir_input_folder.click(
get_folder_path, outputs=logging_dir_input
)
train_dir_input.change(
remove_doublequote,
inputs=[train_dir_input],
outputs=[train_dir_input],
button_open_config.click(
open_config_file,
inputs=[
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
create_buckets,
create_caption,
train,
save_model_as_dropdown,
caption_extention_input,
],
outputs=[
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
create_buckets,
create_caption,
train,
save_model_as_dropdown,
caption_extention_input,
],
)
image_folder_input.change(
remove_doublequote,
inputs=[image_folder_input],
outputs=[image_folder_input],
button_save_config.click(
save_configuration,
inputs=[
dummy_false,
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
create_buckets,
create_caption,
train,
save_model_as_dropdown,
caption_extention_input,
],
outputs=[config_file_name],
)
output_dir_input.change(
remove_doublequote,
inputs=[output_dir_input],
outputs=[output_dir_input],
button_save_as_config.click(
save_configuration,
inputs=[
dummy_true,
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
create_buckets,
create_caption,
train,
save_model_as_dropdown,
caption_extention_input,
],
outputs=[config_file_name],
)
with gr.Tab('Training parameters'):
with gr.Row():
learning_rate_input = gr.Textbox(
label='Learning rate', value=1e-6
)
lr_scheduler_input = gr.Dropdown(
label='LR Scheduler',
choices=[
'constant',
'constant_with_warmup',
'cosine',
'cosine_with_restarts',
'linear',
'polynomial',
],
value='constant',
)
lr_warmup_input = gr.Textbox(label='LR warmup', value=0)
with gr.Row():
dataset_repeats_input = gr.Textbox(
label='Dataset repeats', value=40
)
train_batch_size_input = gr.Slider(
minimum=1,
maximum=32,
label='Train batch size',
value=1,
step=1,
)
epoch_input = gr.Textbox(label='Epoch', value=1)
save_every_n_epochs_input = gr.Textbox(
label='Save every N epochs', value=1
)
with gr.Row():
mixed_precision_input = gr.Dropdown(
label='Mixed precision',
choices=[
'no',
'fp16',
'bf16',
],
value='fp16',
)
save_precision_input = gr.Dropdown(
label='Save precision',
choices=[
'float',
'fp16',
'bf16',
],
value='fp16',
)
num_cpu_threads_per_process_input = gr.Slider(
minimum=1,
maximum=os.cpu_count(),
step=1,
label='Number of CPU threads per process',
value=os.cpu_count(),
)
with gr.Row():
seed_input = gr.Textbox(label='Seed', value=1234)
max_resolution_input = gr.Textbox(
label='Max resolution', value='512,512'
)
with gr.Row():
caption_extention_input = gr.Textbox(
label='Caption Extension',
placeholder='(Optional) Extension for caption files. default: .txt',
)
train_text_encoder_input = gr.Checkbox(
label='Train text encoder', value=True
)
with gr.Box():
with gr.Row():
create_caption = gr.Checkbox(
label='Generate caption database', value=True
)
create_buckets = gr.Checkbox(
label='Generate image buckets', value=True
)
train = gr.Checkbox(label='Train model', value=True)
button_run = gr.Button('Run')
button_run.click(
train_model,
inputs=[
create_caption,
create_buckets,
train,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
save_model_as_dropdown,
caption_extention_input,
],
)
button_open_config.click(
open_config_file,
inputs=[
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
create_buckets,
create_caption,
train,
save_model_as_dropdown,
caption_extention_input,
],
outputs=[
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
create_buckets,
create_caption,
train,
save_model_as_dropdown,
caption_extention_input,
],
)
button_save_config.click(
save_configuration,
inputs=[
dummy_false,
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
create_buckets,
create_caption,
train,
save_model_as_dropdown,
caption_extention_input,
],
outputs=[config_file_name],
)
button_save_as_config.click(
save_configuration,
inputs=[
dummy_true,
config_file_name,
pretrained_model_name_or_path_input,
v2_input,
v_parameterization_input,
train_dir_input,
image_folder_input,
output_dir_input,
logging_dir_input,
max_resolution_input,
learning_rate_input,
lr_scheduler_input,
lr_warmup_input,
dataset_repeats_input,
train_batch_size_input,
epoch_input,
save_every_n_epochs_input,
mixed_precision_input,
save_precision_input,
seed_input,
num_cpu_threads_per_process_input,
train_text_encoder_input,
create_buckets,
create_caption,
train,
save_model_as_dropdown,
caption_extention_input,
],
outputs=[config_file_name],
)
with gr.Tab('Utilities'):
gradio_basic_caption_gui_tab()
gradio_blip_caption_gui_tab()
gradio_wd14_caption_gui_tab()
gradio_convert_model_tab()
with gr.Tab('Utilities'):
gradio_basic_caption_gui_tab()
gradio_blip_caption_gui_tab()
gradio_wd14_caption_gui_tab()
gradio_convert_model_tab()
# Show the interface
interface.launch()
# Show the interface
if not username == '':
interface.launch(auth=(username, password))
else:
interface.launch()
if __name__ == '__main__':
# torch.cuda.set_per_process_memory_fraction(0.48)
parser = argparse.ArgumentParser()
parser.add_argument("--username", type=str, default='', help="Username for authentication")
parser.add_argument("--password", type=str, default='', help="Password for authentication")
args = parser.parse_args()
UI(username=args.username, password=args.password)