Update README
Add example powershell code
This commit is contained in:
parent
36b06d41bf
commit
202a416251
10
README.md
10
README.md
@ -55,11 +55,11 @@ accelerate config:
|
||||
Refer to the note to understand how to create the folde structure. In short it should look like:
|
||||
|
||||
```
|
||||
<wathever top folder name>
|
||||
|- reg_<class>
|
||||
|- <repeat count>_<prompt>
|
||||
|- train_<class>
|
||||
|- <repeat count>_<prompt>
|
||||
<arbitrary folder name>
|
||||
|- <arbitrary class folder name>
|
||||
|- <repeat count>_<class>
|
||||
|- <arbitrary training folder name>
|
||||
|- <repeat count>_<token> <class>
|
||||
```
|
||||
|
||||
Example for `sks dog`
|
||||
|
10
examples/caption.ps1
Normal file
10
examples/caption.ps1
Normal file
@ -0,0 +1,10 @@
|
||||
# This powershell script will create a text file for each files in the folder
|
||||
#
|
||||
# Usefull to create base caption that will be augmented on a per image basis
|
||||
|
||||
$folder = "D:\dreambooth\train_sylvia_ritter\raw_data\all-images\"
|
||||
$file_pattern="*.*"
|
||||
$text_fir_file="a digital painting of xxx, by silvery trait"
|
||||
|
||||
$files = Get-ChildItem $folder$file_pattern
|
||||
foreach ($file in $files) {New-Item -ItemType file -Path $folder -Name "$($file.BaseName).txt" -Value $text_fir_file}
|
154
examples/kohya.ps1
Normal file
154
examples/kohya.ps1
Normal file
@ -0,0 +1,154 @@
|
||||
# This powershell script will create a model using the fine tuning dreambooth method. It will require landscape,
|
||||
# portrait and square images.
|
||||
#
|
||||
# Adjust the script to your own needs
|
||||
|
||||
# Sylvia Ritter
|
||||
# variable values
|
||||
$pretrained_model_name_or_path = "D:\models\v1-5-pruned-mse-vae.ckpt"
|
||||
$train_dir = "D:\dreambooth\train_sylvia_ritter\raw_data"
|
||||
|
||||
$landscape_image_num = 4
|
||||
$portrait_image_num = 25
|
||||
$square_image_num = 2
|
||||
|
||||
$learning_rate = 1e-6
|
||||
$dataset_repeats = 120
|
||||
$train_batch_size = 4
|
||||
$epoch = 1
|
||||
$save_every_n_epochs=1
|
||||
$mixed_precision="fp16"
|
||||
$num_cpu_threads_per_process=6
|
||||
|
||||
$landscape_folder_name = "landscape-pp"
|
||||
$landscape_resolution = "832,512"
|
||||
$portrait_folder_name = "portrait-pp"
|
||||
$portrait_resolution = "448,896"
|
||||
$square_folder_name = "square-pp"
|
||||
$square_resolution = "512,512"
|
||||
|
||||
# You should not have to change values past this point
|
||||
|
||||
$landscape_data_dir = $train_dir + "\" + $landscape_folder_name
|
||||
$portrait_data_dir = $train_dir + "\" + $portrait_folder_name
|
||||
$square_data_dir = $train_dir + "\" + $square_folder_name
|
||||
$landscape_output_dir = $train_dir + "\model-l"
|
||||
$portrait_output_dir = $train_dir + "\model-lp"
|
||||
$square_output_dir = $train_dir + "\model-lps"
|
||||
|
||||
$landscape_repeats = $landscape_image_num * $dataset_repeats
|
||||
$portrait_repeats = $portrait_image_num * $dataset_repeats
|
||||
$square_repeats = $square_image_num * $dataset_repeats
|
||||
|
||||
$landscape_mts = [Math]::Ceiling($landscape_repeats / $train_batch_size * $epoch)
|
||||
$portrait_mts = [Math]::Ceiling($portrait_repeats / $train_batch_size * $epoch)
|
||||
$square_mts = [Math]::Ceiling($square_repeats / $train_batch_size * $epoch)
|
||||
|
||||
# Write-Output $landscape_repeats
|
||||
|
||||
.\venv\Scripts\activate
|
||||
|
||||
accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db_fixed-ber.py `
|
||||
--pretrained_model_name_or_path=$pretrained_model_name_or_path `
|
||||
--train_data_dir=$landscape_data_dir `
|
||||
--output_dir=$landscape_output_dir `
|
||||
--resolution=$landscape_resolution `
|
||||
--train_batch_size=$train_batch_size `
|
||||
--learning_rate=$learning_rate `
|
||||
--max_train_steps=$landscape_mts `
|
||||
--use_8bit_adam `
|
||||
--xformers `
|
||||
--mixed_precision=$mixed_precision `
|
||||
--cache_latents `
|
||||
--save_every_n_epochs=$save_every_n_epochs `
|
||||
--fine_tuning `
|
||||
--dataset_repeats=$dataset_repeats `
|
||||
--save_half
|
||||
|
||||
accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db_fixed-ber.py `
|
||||
--pretrained_model_name_or_path=$landscape_output_dir"\last.ckpt" `
|
||||
--train_data_dir=$portrait_data_dir `
|
||||
--output_dir=$portrait_output_dir `
|
||||
--resolution=$portrait_resolution `
|
||||
--train_batch_size=$train_batch_size `
|
||||
--learning_rate=$learning_rate `
|
||||
--max_train_steps=$portrait_mts `
|
||||
--use_8bit_adam `
|
||||
--xformers `
|
||||
--mixed_precision=$mixed_precision `
|
||||
--cache_latents `
|
||||
--save_every_n_epochs=$save_every_n_epochs `
|
||||
--fine_tuning `
|
||||
--dataset_repeats=$dataset_repeats `
|
||||
--save_half
|
||||
|
||||
accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db_fixed-ber.py `
|
||||
--pretrained_model_name_or_path=$portrait_output_dir"\last.ckpt" `
|
||||
--train_data_dir=$square_data_dir `
|
||||
--output_dir=$square_output_dir `
|
||||
--resolution=$square_resolution `
|
||||
--train_batch_size=$train_batch_size `
|
||||
--learning_rate=$learning_rate `
|
||||
--max_train_steps=$square_mts `
|
||||
--use_8bit_adam `
|
||||
--xformers `
|
||||
--mixed_precision=$mixed_precision `
|
||||
--cache_latents `
|
||||
--save_every_n_epochs=$save_every_n_epochs `
|
||||
--fine_tuning `
|
||||
--dataset_repeats=$dataset_repeats `
|
||||
--save_half
|
||||
|
||||
# 2nd pass at half the dataset repeat value
|
||||
|
||||
accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db_fixed-ber.py `
|
||||
--pretrained_model_name_or_path=$square_output_dir"\last.ckpt" `
|
||||
--train_data_dir=$landscape_data_dir `
|
||||
--output_dir=$landscape_output_dir"2" `
|
||||
--resolution=$landscape_resolution `
|
||||
--train_batch_size=$train_batch_size `
|
||||
--learning_rate=$learning_rate `
|
||||
--max_train_steps=$([Math]::Ceiling($landscape_mts/2)) `
|
||||
--use_8bit_adam `
|
||||
--xformers `
|
||||
--mixed_precision=$mixed_precision `
|
||||
--cache_latents `
|
||||
--save_every_n_epochs=$save_every_n_epochs `
|
||||
--fine_tuning `
|
||||
--dataset_repeats=$([Math]::Ceiling($dataset_repeats/2)) `
|
||||
--save_half
|
||||
|
||||
accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db_fixed-ber.py `
|
||||
--pretrained_model_name_or_path=$landscape_output_dir"2\last.ckpt" `
|
||||
--train_data_dir=$portrait_data_dir `
|
||||
--output_dir=$portrait_output_dir"2" `
|
||||
--resolution=$portrait_resolution `
|
||||
--train_batch_size=$train_batch_size `
|
||||
--learning_rate=$learning_rate `
|
||||
--max_train_steps=$([Math]::Ceiling($portrait_mts/2)) `
|
||||
--use_8bit_adam `
|
||||
--xformers `
|
||||
--mixed_precision=$mixed_precision `
|
||||
--cache_latents `
|
||||
--save_every_n_epochs=$save_every_n_epochs `
|
||||
--fine_tuning `
|
||||
--dataset_repeats=$([Math]::Ceiling($dataset_repeats/2)) `
|
||||
--save_half
|
||||
|
||||
accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db_fixed-ber.py `
|
||||
--pretrained_model_name_or_path=$portrait_output_dir"2\last.ckpt" `
|
||||
--train_data_dir=$square_data_dir `
|
||||
--output_dir=$square_output_dir"2" `
|
||||
--resolution=$square_resolution `
|
||||
--train_batch_size=$train_batch_size `
|
||||
--learning_rate=$learning_rate `
|
||||
--max_train_steps=$([Math]::Ceiling($square_mts/2)) `
|
||||
--use_8bit_adam `
|
||||
--xformers `
|
||||
--mixed_precision=$mixed_precision `
|
||||
--cache_latents `
|
||||
--save_every_n_epochs=$save_every_n_epochs `
|
||||
--fine_tuning `
|
||||
--dataset_repeats=$([Math]::Ceiling($dataset_repeats/2)) `
|
||||
--save_half
|
||||
|
72
examples/kohya_diffuser.ps1
Normal file
72
examples/kohya_diffuser.ps1
Normal file
@ -0,0 +1,72 @@
|
||||
# This powershell script will create a model using the new "diffusers_fine_tuning" code
|
||||
|
||||
# Sylvia Ritter. AKA: by silvery trait
|
||||
|
||||
# variable values
|
||||
$pretrained_model_name_or_path = "D:\models\v1-5-pruned-mse-vae.ckpt"
|
||||
$train_dir = "D:\dreambooth\train_sylvia_ritter\raw_data"
|
||||
$training_folder = "all-images"
|
||||
|
||||
$image_num = 117
|
||||
|
||||
$learning_rate = 5e-6
|
||||
$dataset_repeats = 40
|
||||
$train_batch_size = 6
|
||||
$epoch = 3
|
||||
$save_every_n_epochs=1
|
||||
$mixed_precision="bf16"
|
||||
$num_cpu_threads_per_process=6
|
||||
|
||||
$max_resolution = "768,576"
|
||||
|
||||
# You should not have to change values past this point
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
$repeats = $image_num * $dataset_repeats
|
||||
|
||||
$max_train_set = [Math]::Ceiling($repeats / $train_batch_size * $epoch)
|
||||
|
||||
# Write-Output $landscape_repeats
|
||||
|
||||
# new method
|
||||
|
||||
.\venv\Scripts\activate
|
||||
|
||||
python D:\kohya_ss\diffusers_fine_tuning\merge_captions_to_metadata.py `
|
||||
--caption_extention ".txt" $train_dir"\"$training_folder $train_dir"\meta_cap.json"
|
||||
|
||||
python D:\kohya_ss\diffusers_fine_tuning\prepare_buckets_latents.py `
|
||||
$train_dir"\"$training_folder `
|
||||
$train_dir"\meta_cap.json" `
|
||||
$train_dir"\meta_lat.json" `
|
||||
$pretrained_model_name_or_path `
|
||||
--batch_size 4 --max_resolution $max_resolution --mixed_precision fp16
|
||||
|
||||
accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process D:\kohya_ss\diffusers_fine_tuning\fine_tune_v1-ber.py `
|
||||
--pretrained_model_name_or_path=$pretrained_model_name_or_path `
|
||||
--in_json $train_dir"\meta_lat.json" `
|
||||
--train_data_dir=$train_dir"\"$training_folder `
|
||||
--output_dir=$train_dir"\fine_tuned" `
|
||||
--train_batch_size=$train_batch_size `
|
||||
--dataset_repeats=$dataset_repeats `
|
||||
--learning_rate=$learning_rate `
|
||||
--max_train_steps=$max_train_set `
|
||||
--use_8bit_adam --xformers `
|
||||
--mixed_precision=$mixed_precision `
|
||||
--save_every_n_epochs=$save_every_n_epochs `
|
||||
--save_half
|
||||
|
||||
accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process D:\kohya_ss\diffusers_fine_tuning\fine_tune_v1-ber.py `
|
||||
--pretrained_model_name_or_path=$train_dir"\fine_tuned\last.ckpt" `
|
||||
--in_json $train_dir"\meta_lat.json" `
|
||||
--train_data_dir=$train_dir"\"$training_folder `
|
||||
--output_dir=$train_dir"\fine_tuned2" `
|
||||
--train_batch_size=$train_batch_size `
|
||||
--dataset_repeats=$([Math]::Ceiling($dataset_repeats / 2)) `
|
||||
--learning_rate=$learning_rate `
|
||||
--max_train_steps=$([Math]::Ceiling($max_train_set / 2)) `
|
||||
--use_8bit_adam --xformers `
|
||||
--mixed_precision=$mixed_precision `
|
||||
--save_every_n_epochs=$save_every_n_epochs `
|
||||
--save_half
|
Loading…
Reference in New Issue
Block a user