Merge pull request #16 from bmaltais/dev

v18.8
This commit is contained in:
bmaltais 2022-12-23 07:57:23 -05:00 committed by GitHub
commit 2d58594daa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 48 additions and 76 deletions

View File

@ -14,6 +14,9 @@ You can find the finetune solution spercific [Finetune README](README_finetune.m
## Change history
* 12/23 (v18.8) update:
- Fix for conversion tool issue when the source was an sd1.x diffuser model
- Other minor code and GUI fix
* 12/22 (v18.7) update:
- Merge dreambooth and finetune is a common GUI
- General bug fixes and code improvements

View File

@ -67,12 +67,6 @@ python .\tools\cudann_1.8_install.py
When a new release comes out you can upgrade your repo with the following command:
```
.\upgrade.bat
```
alternatively you can do it manually with
```powershell
cd kohya_ss
git pull
@ -87,15 +81,8 @@ Once the commands have completed successfully you should be ready to use the new
There is now support for GUI based training using gradio. You can start the complete kohya training GUI interface by running:
```powershell
.\kohya.cmd
```
and select the Dreambooth tab.
Alternativelly you can use the Dreambooth focus GUI with
```powershell
.\dreambooth.cmd
.\venv\Scripts\activate
.\kohya_gui.cmd
```
## CLI

View File

@ -67,12 +67,6 @@ python .\tools\cudann_1.8_install.py
When a new release comes out you can upgrade your repo with the following command:
```
.\upgrade.bat
```
or you can do it manually with
```powershell
cd kohya_ss
git pull
@ -110,15 +104,8 @@ You can also use the `Captioning` tool found under the `Utilities` tab in the GU
There is now support for GUI based training using gradio. You can start the complete kohya training GUI interface by running:
```powershell
.\kohya.cmd
```
and select the Finetune tab.
Alternativelly you can use the Finetune focus GUI with
```powershell
.\finetune.cmd
.\venv\Scripts\activate
.\kohya_gui.cmd
```
## CLI

View File

@ -1 +0,0 @@
.\venv\Scripts\python.exe .\dreambooth_gui.py

View File

@ -17,6 +17,10 @@ from library.common_gui import (
get_file_path,
get_saveasfile_path,
)
from library.dreambooth_folder_creation_gui import (
gradio_dreambooth_folder_creation_tab,
)
from library.dataset_balancing_gui import gradio_dataset_balancing_tab
from library.utilities import utilities_tab
from easygui import msgbox
@ -509,7 +513,7 @@ def dreambooth_tab(
):
dummy_db_true = gr.Label(value=True, visible=False)
dummy_db_false = gr.Label(value=False, visible=False)
gr.Markdown('Enter kohya dreambooth training parameter using this interface.')
gr.Markdown('Train a custom model using kohya dreambooth python code...')
with gr.Accordion('Configuration file', open=False):
with gr.Row():
button_open_config = gr.Button('Open 📂', elem_id='open_folder')
@ -592,7 +596,7 @@ def dreambooth_tab(
],
)
with gr.Tab('Directories'):
with gr.Tab('Folders'):
with gr.Row():
train_data_dir_input = gr.Textbox(
label='Image folder',
@ -766,6 +770,15 @@ def dreambooth_tab(
prior_loss_weight = gr.Number(
label='Prior loss weight', value=1.0
)
with gr.Tab('Tools'):
gr.Markdown('This section provide Dreambooth tools to help setup your dataset...')
gradio_dreambooth_folder_creation_tab(
train_data_dir_input=train_data_dir_input,
reg_data_dir_input=reg_data_dir_input,
output_dir_input=output_dir_input,
logging_dir_input=logging_dir_input,
)
gradio_dataset_balancing_tab()
button_run = gr.Button('Train model')

View File

@ -1 +0,0 @@
.\venv\Scripts\python.exe .\finetune_gui.py

View File

@ -24,10 +24,13 @@ def main(args):
random.seed(seed)
if not os.path.exists("blip"):
args.train_data_dir = os.path.abspath(args.train_data_dir) # convert to absolute path
cwd = os.getcwd()
print('Current Working Directory is: ', cwd)
os.chdir('finetune')
print(f"load images from {args.train_data_dir}")
image_paths = glob.glob(os.path.join(args.train_data_dir, "*.jpg")) + \
glob.glob(os.path.join(args.train_data_dir, "*.png")) + glob.glob(os.path.join(args.train_data_dir, "*.webp"))
print(f"found {len(image_paths)} images.")
@ -105,4 +108,4 @@ if __name__ == '__main__':
if args.caption_extention is not None:
args.caption_extension = args.caption_extention
main(args)
main(args)

View File

@ -403,9 +403,9 @@ def finetune_tab():
dummy_ft_true = gr.Label(value=True, visible=False)
dummy_ft_false = gr.Label(value=False, visible=False)
gr.Markdown(
'Enter kohya finetune training parameter using this interface.'
'Train a custom model using kohya finetune python code...'
)
with gr.Accordion('Configuration File Load/Save', open=False):
with gr.Accordion('Configuration file', open=False):
with gr.Row():
button_open_config = gr.Button(
f'Open {folder_symbol}', elem_id='open_folder'
@ -486,7 +486,7 @@ def finetune_tab():
v_parameterization_input,
],
)
with gr.Tab('Directories'):
with gr.Tab('Folders'):
with gr.Row():
train_dir_input = gr.Textbox(
label='Training config folder',

View File

@ -1 +0,0 @@
.\venv\Scripts\python.exe .\kohya_gui.py

View File

@ -5,7 +5,7 @@ import shutil
import os
def copy_info_to_Directories_tab(training_folder):
def copy_info_to_Folders_tab(training_folder):
img_folder = os.path.join(training_folder, 'img')
if os.path.exists(os.path.join(training_folder, 'reg')):
reg_folder = os.path.join(training_folder, 'reg')
@ -114,7 +114,6 @@ def gradio_dreambooth_folder_creation_tab(
reg_data_dir_input=gr.Textbox(),
output_dir_input=gr.Textbox(),
logging_dir_input=gr.Textbox(),
enable_copy_info_button=bool(False),
):
with gr.Tab('Dreambooth folder preparation'):
gr.Markdown(
@ -192,17 +191,16 @@ def gradio_dreambooth_folder_creation_tab(
util_training_dir_output,
],
)
if enable_copy_info_button:
button_copy_info_to_Directories_tab = gr.Button(
'Copy info to Directories Tab'
)
button_copy_info_to_Directories_tab.click(
copy_info_to_Directories_tab,
inputs=[util_training_dir_output],
outputs=[
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
logging_dir_input,
],
)
button_copy_info_to_Folders_tab = gr.Button(
'Copy info to Folders Tab'
)
button_copy_info_to_Folders_tab.click(
copy_info_to_Folders_tab,
inputs=[util_training_dir_output],
outputs=[
train_data_dir_input,
reg_data_dir_input,
output_dir_input,
logging_dir_input,
],
)

View File

@ -6,14 +6,10 @@
import gradio as gr
import os
import argparse
from library.dreambooth_folder_creation_gui import (
gradio_dreambooth_folder_creation_tab,
)
from library.basic_caption_gui import gradio_basic_caption_gui_tab
from library.convert_model_gui import gradio_convert_model_tab
from library.blip_caption_gui import gradio_blip_caption_gui_tab
from library.wd14_caption_gui import gradio_wd14_caption_gui_tab
from library.dataset_balancing_gui import gradio_dataset_balancing_tab
def utilities_tab(
@ -28,17 +24,6 @@ def utilities_tab(
gradio_basic_caption_gui_tab()
gradio_blip_caption_gui_tab()
gradio_wd14_caption_gui_tab()
if enable_dreambooth_tab:
with gr.Tab('Dreambooth'):
gr.Markdown('This section provide Dreambooth specific tools.')
gradio_dreambooth_folder_creation_tab(
train_data_dir_input=train_data_dir_input,
reg_data_dir_input=reg_data_dir_input,
output_dir_input=output_dir_input,
logging_dir_input=logging_dir_input,
enable_copy_info_button=enable_copy_info_button,
)
gradio_dataset_balancing_tab()
gradio_convert_model_tab()
return (

View File

@ -9,7 +9,7 @@ import os
import torch
from diffusers import StableDiffusionPipeline
from library import model_util as model_util
import library.model_util as model_util
def convert(args):
@ -48,7 +48,7 @@ def convert(args):
v2_model = unet.config.cross_attention_dim == 1024
print("checking model version: model is " + ('v2' if v2_model else 'v1'))
else:
v2_model = args.v1
v2_model = not args.v1
# 変換して保存する
msg = ("checkpoint" + ("" if save_dtype is None else f" in {save_dtype}")) if is_save_ckpt else "Diffusers"
@ -90,4 +90,4 @@ if __name__ == '__main__':
help="model to save: checkpoint (with extension) or Diffusers model's directory (without extension) / 変換後のモデル、拡張子がある場合はcheckpoint、ない場合はDiffusesモデルとして保存")
args = parser.parse_args()
convert(args)
convert(args)

View File

@ -1011,6 +1011,7 @@ def train(args):
if stop_text_encoder_training:
print(f"stop text encoder training at step {global_step}")
text_encoder.train(False)
text_encoder.requires_grad_(False)
with accelerator.accumulate(unet):
with torch.no_grad():
@ -1225,4 +1226,4 @@ if __name__ == '__main__':
help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数デフォルト0")
args = parser.parse_args()
train(args)
train(args)

View File

@ -1,2 +0,0 @@
git pull
.\venv\Scripts\python.exe -m pip install -U -r .\requirements.txt