- Fix for conversion tool issue when the source was an sd1.x diffuser model
- Other minor code and GUI fix
This commit is contained in:
parent
5e3f32f69c
commit
2cdf4cf741
@ -14,6 +14,9 @@ You can find the finetune solution spercific [Finetune README](README_finetune.m
|
||||
|
||||
## Change history
|
||||
|
||||
* 12/23 (v18.8) update:
|
||||
- Fix for conversion tool issue when the source was an sd1.x diffuser model
|
||||
- Other minor code and GUI fix
|
||||
* 12/22 (v18.7) update:
|
||||
- Merge dreambooth and finetune is a common GUI
|
||||
- General bug fixes and code improvements
|
||||
|
@ -67,12 +67,6 @@ python .\tools\cudann_1.8_install.py
|
||||
|
||||
When a new release comes out you can upgrade your repo with the following command:
|
||||
|
||||
```
|
||||
.\upgrade.bat
|
||||
```
|
||||
|
||||
alternatively you can do it manually with
|
||||
|
||||
```powershell
|
||||
cd kohya_ss
|
||||
git pull
|
||||
@ -87,15 +81,8 @@ Once the commands have completed successfully you should be ready to use the new
|
||||
There is now support for GUI based training using gradio. You can start the complete kohya training GUI interface by running:
|
||||
|
||||
```powershell
|
||||
.\kohya.cmd
|
||||
```
|
||||
|
||||
and select the Dreambooth tab.
|
||||
|
||||
Alternativelly you can use the Dreambooth focus GUI with
|
||||
|
||||
```powershell
|
||||
.\dreambooth.cmd
|
||||
.\venv\Scripts\activate
|
||||
.\kohya_gui.cmd
|
||||
```
|
||||
|
||||
## CLI
|
||||
|
@ -67,12 +67,6 @@ python .\tools\cudann_1.8_install.py
|
||||
|
||||
When a new release comes out you can upgrade your repo with the following command:
|
||||
|
||||
```
|
||||
.\upgrade.bat
|
||||
```
|
||||
|
||||
or you can do it manually with
|
||||
|
||||
```powershell
|
||||
cd kohya_ss
|
||||
git pull
|
||||
@ -110,15 +104,8 @@ You can also use the `Captioning` tool found under the `Utilities` tab in the GU
|
||||
There is now support for GUI based training using gradio. You can start the complete kohya training GUI interface by running:
|
||||
|
||||
```powershell
|
||||
.\kohya.cmd
|
||||
```
|
||||
|
||||
and select the Finetune tab.
|
||||
|
||||
Alternativelly you can use the Finetune focus GUI with
|
||||
|
||||
```powershell
|
||||
.\finetune.cmd
|
||||
.\venv\Scripts\activate
|
||||
.\kohya_gui.cmd
|
||||
```
|
||||
|
||||
## CLI
|
||||
|
@ -1 +0,0 @@
|
||||
.\venv\Scripts\python.exe .\dreambooth_gui.py
|
@ -1 +0,0 @@
|
||||
.\venv\Scripts\python.exe .\finetune_gui.py
|
@ -24,10 +24,13 @@ def main(args):
|
||||
random.seed(seed)
|
||||
|
||||
if not os.path.exists("blip"):
|
||||
args.train_data_dir = os.path.abspath(args.train_data_dir) # convert to absolute path
|
||||
|
||||
cwd = os.getcwd()
|
||||
print('Current Working Directory is: ', cwd)
|
||||
os.chdir('finetune')
|
||||
|
||||
print(f"load images from {args.train_data_dir}")
|
||||
image_paths = glob.glob(os.path.join(args.train_data_dir, "*.jpg")) + \
|
||||
glob.glob(os.path.join(args.train_data_dir, "*.png")) + glob.glob(os.path.join(args.train_data_dir, "*.webp"))
|
||||
print(f"found {len(image_paths)} images.")
|
||||
@ -105,4 +108,4 @@ if __name__ == '__main__':
|
||||
if args.caption_extention is not None:
|
||||
args.caption_extension = args.caption_extention
|
||||
|
||||
main(args)
|
||||
main(args)
|
@ -9,7 +9,7 @@ import os
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
from library import model_util as model_util
|
||||
import library.model_util as model_util
|
||||
|
||||
|
||||
def convert(args):
|
||||
@ -48,7 +48,7 @@ def convert(args):
|
||||
v2_model = unet.config.cross_attention_dim == 1024
|
||||
print("checking model version: model is " + ('v2' if v2_model else 'v1'))
|
||||
else:
|
||||
v2_model = args.v1
|
||||
v2_model = not args.v1
|
||||
|
||||
# 変換して保存する
|
||||
msg = ("checkpoint" + ("" if save_dtype is None else f" in {save_dtype}")) if is_save_ckpt else "Diffusers"
|
||||
@ -90,4 +90,4 @@ if __name__ == '__main__':
|
||||
help="model to save: checkpoint (with extension) or Diffusers model's directory (without extension) / 変換後のモデル、拡張子がある場合はcheckpoint、ない場合はDiffusesモデルとして保存")
|
||||
|
||||
args = parser.parse_args()
|
||||
convert(args)
|
||||
convert(args)
|
@ -1011,6 +1011,7 @@ def train(args):
|
||||
if stop_text_encoder_training:
|
||||
print(f"stop text encoder training at step {global_step}")
|
||||
text_encoder.train(False)
|
||||
text_encoder.requires_grad_(False)
|
||||
|
||||
with accelerator.accumulate(unet):
|
||||
with torch.no_grad():
|
||||
@ -1225,4 +1226,4 @@ if __name__ == '__main__':
|
||||
help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)")
|
||||
|
||||
args = parser.parse_args()
|
||||
train(args)
|
||||
train(args)
|
@ -1,2 +0,0 @@
|
||||
git pull
|
||||
.\venv\Scripts\python.exe -m pip install -U -r .\requirements.txt
|
Loading…
Reference in New Issue
Block a user