2023-01-06 23:25:55 +00:00
|
|
|
import gradio as gr
|
|
|
|
from easygui import msgbox
|
|
|
|
import subprocess
|
|
|
|
import os
|
2023-02-06 01:07:00 +00:00
|
|
|
from .common_gui import (
|
|
|
|
get_saveasfilename_path,
|
|
|
|
get_any_file_path,
|
|
|
|
get_file_path,
|
|
|
|
)
|
2023-01-06 23:25:55 +00:00
|
|
|
|
|
|
|
folder_symbol = '\U0001f4c2' # 📂
|
|
|
|
refresh_symbol = '\U0001f504' # 🔄
|
|
|
|
save_style_symbol = '\U0001f4be' # 💾
|
|
|
|
document_symbol = '\U0001F4C4' # 📄
|
2023-03-04 23:56:22 +00:00
|
|
|
PYTHON = 'python3' if os.name == 'posix' else './venv/Scripts/python.exe'
|
|
|
|
|
2023-01-06 23:25:55 +00:00
|
|
|
|
|
|
|
def extract_lora(
|
2023-02-06 01:07:00 +00:00
|
|
|
model_tuned,
|
|
|
|
model_org,
|
|
|
|
save_to,
|
|
|
|
save_precision,
|
|
|
|
dim,
|
|
|
|
v2,
|
2023-03-09 16:06:59 +00:00
|
|
|
conv_dim,
|
2023-03-22 16:55:30 +00:00
|
|
|
device,
|
2023-01-06 23:25:55 +00:00
|
|
|
):
|
|
|
|
# Check for caption_text_input
|
|
|
|
if model_tuned == '':
|
|
|
|
msgbox('Invalid finetuned model file')
|
|
|
|
return
|
2023-02-06 01:07:00 +00:00
|
|
|
|
2023-01-06 23:25:55 +00:00
|
|
|
if model_org == '':
|
|
|
|
msgbox('Invalid base model file')
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check if source model exist
|
|
|
|
if not os.path.isfile(model_tuned):
|
|
|
|
msgbox('The provided finetuned model is not a file')
|
|
|
|
return
|
2023-02-06 01:07:00 +00:00
|
|
|
|
2023-01-06 23:25:55 +00:00
|
|
|
if not os.path.isfile(model_org):
|
|
|
|
msgbox('The provided base model is not a file')
|
|
|
|
return
|
|
|
|
|
2023-02-06 01:07:00 +00:00
|
|
|
run_cmd = (
|
2023-03-04 22:13:02 +00:00
|
|
|
f'{PYTHON} "{os.path.join("networks","extract_lora_from_models.py")}"'
|
2023-02-06 01:07:00 +00:00
|
|
|
)
|
2023-01-06 23:25:55 +00:00
|
|
|
run_cmd += f' --save_precision {save_precision}'
|
|
|
|
run_cmd += f' --save_to "{save_to}"'
|
|
|
|
run_cmd += f' --model_org "{model_org}"'
|
|
|
|
run_cmd += f' --model_tuned "{model_tuned}"'
|
|
|
|
run_cmd += f' --dim {dim}'
|
2023-03-22 16:55:30 +00:00
|
|
|
run_cmd += f' --device {device}'
|
2023-03-09 16:06:59 +00:00
|
|
|
if conv_dim > 0:
|
|
|
|
run_cmd += f' --conv_dim {conv_dim}'
|
2023-01-06 23:25:55 +00:00
|
|
|
if v2:
|
|
|
|
run_cmd += f' --v2'
|
|
|
|
|
|
|
|
print(run_cmd)
|
|
|
|
|
|
|
|
# Run the command
|
2023-03-05 16:43:59 +00:00
|
|
|
if os.name == 'posix':
|
|
|
|
os.system(run_cmd)
|
|
|
|
else:
|
|
|
|
subprocess.run(run_cmd)
|
2023-01-06 23:25:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
###
|
|
|
|
# Gradio UI
|
|
|
|
###
|
|
|
|
|
|
|
|
|
|
|
|
def gradio_extract_lora_tab():
|
|
|
|
with gr.Tab('Extract LoRA'):
|
|
|
|
gr.Markdown(
|
|
|
|
'This utility can extract a LoRA network from a finetuned model.'
|
|
|
|
)
|
2023-03-09 16:06:59 +00:00
|
|
|
lora_ext = gr.Textbox(value='*.safetensors *.pt', visible=False)
|
2023-01-06 23:25:55 +00:00
|
|
|
lora_ext_name = gr.Textbox(value='LoRA model types', visible=False)
|
|
|
|
model_ext = gr.Textbox(value='*.ckpt *.safetensors', visible=False)
|
|
|
|
model_ext_name = gr.Textbox(value='Model types', visible=False)
|
2023-02-06 01:07:00 +00:00
|
|
|
|
2023-01-06 23:25:55 +00:00
|
|
|
with gr.Row():
|
|
|
|
model_tuned = gr.Textbox(
|
|
|
|
label='Finetuned model',
|
|
|
|
placeholder='Path to the finetuned model to extract',
|
|
|
|
interactive=True,
|
|
|
|
)
|
|
|
|
button_model_tuned_file = gr.Button(
|
|
|
|
folder_symbol, elem_id='open_folder_small'
|
|
|
|
)
|
|
|
|
button_model_tuned_file.click(
|
|
|
|
get_file_path,
|
|
|
|
inputs=[model_tuned, model_ext, model_ext_name],
|
|
|
|
outputs=model_tuned,
|
2023-03-04 23:56:22 +00:00
|
|
|
show_progress=False,
|
2023-01-06 23:25:55 +00:00
|
|
|
)
|
2023-02-06 01:07:00 +00:00
|
|
|
|
2023-01-06 23:25:55 +00:00
|
|
|
model_org = gr.Textbox(
|
|
|
|
label='Stable Diffusion base model',
|
|
|
|
placeholder='Stable Diffusion original model: ckpt or safetensors file',
|
|
|
|
interactive=True,
|
|
|
|
)
|
|
|
|
button_model_org_file = gr.Button(
|
|
|
|
folder_symbol, elem_id='open_folder_small'
|
|
|
|
)
|
|
|
|
button_model_org_file.click(
|
|
|
|
get_file_path,
|
|
|
|
inputs=[model_org, model_ext, model_ext_name],
|
|
|
|
outputs=model_org,
|
2023-03-04 23:56:22 +00:00
|
|
|
show_progress=False,
|
2023-01-06 23:25:55 +00:00
|
|
|
)
|
|
|
|
with gr.Row():
|
|
|
|
save_to = gr.Textbox(
|
|
|
|
label='Save to',
|
|
|
|
placeholder='path where to save the extracted LoRA model...',
|
|
|
|
interactive=True,
|
|
|
|
)
|
|
|
|
button_save_to = gr.Button(
|
|
|
|
folder_symbol, elem_id='open_folder_small'
|
|
|
|
)
|
|
|
|
button_save_to.click(
|
2023-02-06 01:07:00 +00:00
|
|
|
get_saveasfilename_path,
|
|
|
|
inputs=[save_to, lora_ext, lora_ext_name],
|
|
|
|
outputs=save_to,
|
2023-03-04 23:56:22 +00:00
|
|
|
show_progress=False,
|
2023-01-06 23:25:55 +00:00
|
|
|
)
|
|
|
|
save_precision = gr.Dropdown(
|
2023-03-02 00:20:05 +00:00
|
|
|
label='Save precision',
|
2023-01-06 23:25:55 +00:00
|
|
|
choices=['fp16', 'bf16', 'float'],
|
|
|
|
value='float',
|
|
|
|
interactive=True,
|
|
|
|
)
|
|
|
|
with gr.Row():
|
|
|
|
dim = gr.Slider(
|
v20.6.0
- Increase max LoRA rank (dim) size to 1024.
- Update finetune preprocessing scripts.
- ``.bmp`` and ``.jpeg`` are supported. Thanks to breakcore2 and p1atdev!
- The default weights of ``tag_images_by_wd14_tagger.py`` is now ``SmilingWolf/wd-v1-4-convnext-tagger-v2``. You can specify another model id from ``SmilingWolf`` by ``--repo_id`` option. Thanks to SmilingWolf for the great work.
- To change the weight, remove ``wd14_tagger_model`` folder, and run the script again.
- ``--max_data_loader_n_workers`` option is added to each script. This option uses the DataLoader for data loading to speed up loading, 20%~30% faster.
- Please specify 2 or 4, depends on the number of CPU cores.
- ``--recursive`` option is added to ``merge_dd_tags_to_metadata.py`` and ``merge_captions_to_metadata.py``, only works with ``--full_path``.
- ``make_captions_by_git.py`` is added. It uses [GIT microsoft/git-large-textcaps](https://huggingface.co/microsoft/git-large-textcaps) for captioning.
- ``requirements.txt`` is updated. If you use this script, [please update the libraries](https://github.com/kohya-ss/sd-scripts#upgrade).
- Usage is almost the same as ``make_captions.py``, but batch size should be smaller.
- ``--remove_words`` option removes as much text as possible (such as ``the word "XXXX" on it``).
- ``--skip_existing`` option is added to ``prepare_buckets_latents.py``. Images with existing npz files are ignored by this option.
- ``clean_captions_and_tags.py`` is updated to remove duplicated or conflicting tags, e.g. ``shirt`` is removed when ``white shirt`` exists. if ``black hair`` is with ``red hair``, both are removed.
- Tag frequency is added to the metadata in ``train_network.py``. Thanks to space-nuko!
- __All tags and number of occurrences of the tag are recorded.__ If you do not want it, disable metadata storing with ``--no_metadata`` option.
2023-02-04 13:36:35 +00:00
|
|
|
minimum=4,
|
|
|
|
maximum=1024,
|
2023-03-27 17:24:48 +00:00
|
|
|
label='Network Dimension (Rank)',
|
v20.6.0
- Increase max LoRA rank (dim) size to 1024.
- Update finetune preprocessing scripts.
- ``.bmp`` and ``.jpeg`` are supported. Thanks to breakcore2 and p1atdev!
- The default weights of ``tag_images_by_wd14_tagger.py`` is now ``SmilingWolf/wd-v1-4-convnext-tagger-v2``. You can specify another model id from ``SmilingWolf`` by ``--repo_id`` option. Thanks to SmilingWolf for the great work.
- To change the weight, remove ``wd14_tagger_model`` folder, and run the script again.
- ``--max_data_loader_n_workers`` option is added to each script. This option uses the DataLoader for data loading to speed up loading, 20%~30% faster.
- Please specify 2 or 4, depends on the number of CPU cores.
- ``--recursive`` option is added to ``merge_dd_tags_to_metadata.py`` and ``merge_captions_to_metadata.py``, only works with ``--full_path``.
- ``make_captions_by_git.py`` is added. It uses [GIT microsoft/git-large-textcaps](https://huggingface.co/microsoft/git-large-textcaps) for captioning.
- ``requirements.txt`` is updated. If you use this script, [please update the libraries](https://github.com/kohya-ss/sd-scripts#upgrade).
- Usage is almost the same as ``make_captions.py``, but batch size should be smaller.
- ``--remove_words`` option removes as much text as possible (such as ``the word "XXXX" on it``).
- ``--skip_existing`` option is added to ``prepare_buckets_latents.py``. Images with existing npz files are ignored by this option.
- ``clean_captions_and_tags.py`` is updated to remove duplicated or conflicting tags, e.g. ``shirt`` is removed when ``white shirt`` exists. if ``black hair`` is with ``red hair``, both are removed.
- Tag frequency is added to the metadata in ``train_network.py``. Thanks to space-nuko!
- __All tags and number of occurrences of the tag are recorded.__ If you do not want it, disable metadata storing with ``--no_metadata`` option.
2023-02-04 13:36:35 +00:00
|
|
|
value=128,
|
2023-03-09 16:06:59 +00:00
|
|
|
step=1,
|
|
|
|
interactive=True,
|
|
|
|
)
|
|
|
|
conv_dim = gr.Slider(
|
|
|
|
minimum=0,
|
|
|
|
maximum=1024,
|
2023-03-27 17:24:48 +00:00
|
|
|
label='Conv Dimension (Rank)',
|
|
|
|
value=128,
|
2023-03-09 16:06:59 +00:00
|
|
|
step=1,
|
2023-01-06 23:25:55 +00:00
|
|
|
interactive=True,
|
|
|
|
)
|
|
|
|
v2 = gr.Checkbox(label='v2', value=False, interactive=True)
|
2023-03-22 16:55:30 +00:00
|
|
|
device = gr.Dropdown(
|
|
|
|
label='Device',
|
|
|
|
choices=[
|
|
|
|
'cpu',
|
|
|
|
'cuda',
|
|
|
|
],
|
|
|
|
value='cuda',
|
|
|
|
interactive=True,
|
|
|
|
)
|
2023-01-06 23:25:55 +00:00
|
|
|
|
|
|
|
extract_button = gr.Button('Extract LoRA model')
|
|
|
|
|
|
|
|
extract_button.click(
|
|
|
|
extract_lora,
|
2023-03-20 12:47:00 +00:00
|
|
|
inputs=[
|
|
|
|
model_tuned,
|
|
|
|
model_org,
|
|
|
|
save_to,
|
|
|
|
save_precision,
|
|
|
|
dim,
|
|
|
|
v2,
|
|
|
|
conv_dim,
|
2023-03-22 16:55:30 +00:00
|
|
|
device
|
2023-03-20 12:47:00 +00:00
|
|
|
],
|
2023-03-04 23:56:22 +00:00
|
|
|
show_progress=False,
|
2023-01-06 23:25:55 +00:00
|
|
|
)
|