Merge pull request #299 from bmaltais/dev

Fix issue 296
This commit is contained in:
bmaltais 2023-03-04 17:47:09 -05:00 committed by GitHub
commit a1551fc4a8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 459 additions and 10 deletions

View File

@ -25,7 +25,7 @@ from library.common_gui import (
gradio_config,
gradio_source_model,
set_legacy_8bitadam,
update_optimizer,
my_data,
)
from library.tensorboard_gui import (
gradio_tensorboard,
@ -214,7 +214,7 @@ def open_configuration(
my_data = json.load(f)
print('Loading config...')
# Update values to fix deprecated use_8bit_adam checkbox and set appropriate optimizer if it is set to True
my_data = update_optimizer(my_data)
my_data = my_data(my_data)
else:
file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action
my_data = {}

View File

@ -19,7 +19,7 @@ from library.common_gui import (
color_aug_changed,
run_cmd_training,
set_legacy_8bitadam,
update_optimizer,
my_data,
)
from library.tensorboard_gui import (
gradio_tensorboard,
@ -216,7 +216,7 @@ def open_config_file(
my_data_db = json.load(f)
print('Loading config...')
# Update values to fix deprecated use_8bit_adam checkbox and set appropriate optimizer if it is set to True
my_data = update_optimizer(my_data)
my_data = my_data(my_data)
else:
file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action
my_data_db = {}

View File

@ -10,10 +10,14 @@ save_style_symbol = '\U0001f4be' # 💾
document_symbol = '\U0001F4C4' # 📄
def update_optimizer(my_data):
def my_data(my_data):
if my_data.get('use_8bit_adam', False):
my_data['optimizer'] = 'AdamW8bit'
my_data['use_8bit_adam'] = False
if my_data.get('model_list', 'custom') == []:
print('Old config with empty model list. Setting to custom...')
my_data['model_list'] = 'custom'
return my_data

View File

@ -25,7 +25,7 @@ def caption_images(
return
print(f'GIT captioning files in {train_data_dir}...')
run_cmd = f'.\\venv\\Scripts\\python.exe "finetune/make_captions.py"'
run_cmd = f'.\\venv\\Scripts\\python.exe "finetune/make_captions_by_git.py"'
if not model_id == '':
run_cmd += f' --model_id="{model_id}"'
run_cmd += f' --batch_size="{int(batch_size)}"'

View File

@ -25,7 +25,7 @@ from library.common_gui import (
gradio_source_model,
run_cmd_training,
set_legacy_8bitadam,
update_optimizer,
my_data,
)
from library.dreambooth_folder_creation_gui import (
gradio_dreambooth_folder_creation_tab,
@ -239,7 +239,7 @@ def open_configuration(
my_data = json.load(f)
print('Loading config...')
# Update values to fix deprecated use_8bit_adam checkbox and set appropriate optimizer if it is set to True
my_data = update_optimizer(my_data)
my_data = my_data(my_data)
else:
file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action
my_data = {}

View File

@ -25,7 +25,7 @@ from library.common_gui import (
gradio_config,
gradio_source_model,
set_legacy_8bitadam,
update_optimizer,
my_data,
)
from library.tensorboard_gui import (
gradio_tensorboard,
@ -226,7 +226,7 @@ def open_configuration(
my_data_db = json.load(f)
print('Loading config...')
# Update values to fix deprecated use_8bit_adam checkbox and set appropriate optimizer if it is set to True
my_data = update_optimizer(my_data)
my_data = my_data(my_data)
else:
file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action
my_data_db = {}

106
tools/extract_locon.py Normal file
View File

@ -0,0 +1,106 @@
#
# From: https://raw.githubusercontent.com/KohakuBlueleaf/LoCon/main/extract_locon.py
#
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"base_model", help="The model which use it to train the dreambooth model",
default='', type=str
)
parser.add_argument(
"db_model", help="the dreambooth model you want to extract the locon",
default='', type=str
)
parser.add_argument(
"output_name", help="the output model",
default='./out.pt', type=str
)
parser.add_argument(
"--is_v2", help="Your base/db model is sd v2 or not",
default=False, action="store_true"
)
parser.add_argument(
"--device", help="Which device you want to use to extract the locon",
default='cpu', type=str
)
parser.add_argument(
"--mode",
help=(
'extraction mode, can be "fixed", "threshold", "ratio", "percentile". '
'If not "fixed", network_dim and conv_dim will be ignored'
),
default='fixed', type=str
)
parser.add_argument(
"--linear_dim", help="network dim for linear layer in fixed mode",
default=1, type=int
)
parser.add_argument(
"--conv_dim", help="network dim for conv layer in fixed mode",
default=1, type=int
)
parser.add_argument(
"--linear_threshold", help="singular value threshold for linear layer in threshold mode",
default=0., type=float
)
parser.add_argument(
"--conv_threshold", help="singular value threshold for conv layer in threshold mode",
default=0., type=float
)
parser.add_argument(
"--linear_ratio", help="singular ratio for linear layer in ratio mode",
default=0., type=float
)
parser.add_argument(
"--conv_ratio", help="singular ratio for conv layer in ratio mode",
default=0., type=float
)
parser.add_argument(
"--linear_percentile", help="singular value percentile for linear layer percentile mode",
default=1., type=float
)
parser.add_argument(
"--conv_percentile", help="singular value percentile for conv layer percentile mode",
default=1., type=float
)
return parser.parse_args()
ARGS = get_args()
from locon.utils import extract_diff
from locon.kohya_model_utils import load_models_from_stable_diffusion_checkpoint
import torch
def main():
args = ARGS
base = load_models_from_stable_diffusion_checkpoint(args.is_v2, args.base_model)
db = load_models_from_stable_diffusion_checkpoint(args.is_v2, args.db_model)
linear_mode_param = {
'fixed': args.linear_dim,
'threshold': args.linear_threshold,
'ratio': args.linear_ratio,
'percentile': args.linear_percentile,
}[args.mode]
conv_mode_param = {
'fixed': args.conv_dim,
'threshold': args.conv_threshold,
'ratio': args.conv_ratio,
'percentile': args.conv_percentile,
}[args.mode]
state_dict = extract_diff(
base, db,
args.mode,
linear_mode_param, conv_mode_param,
args.device
)
torch.save(state_dict, args.output_name)
if __name__ == '__main__':
main()

339
tools/resize_lora.py Normal file
View File

@ -0,0 +1,339 @@
#
# File from: https://raw.githubusercontent.com/mgz-dev/sd-scripts/main/networks/resize_lora.py
#
# Convert LoRA to different rank approximation (should only be used to go to lower rank)
# This code is based off the extract_lora_from_models.py file which is based on https://github.com/cloneofsimo/lora/blob/develop/lora_diffusion/cli_svd.py
# Thanks to cloneofsimo and kohya
import argparse
import torch
from safetensors.torch import load_file, save_file, safe_open
from tqdm import tqdm
from library import train_util, model_util
import numpy as np
MIN_SV = 1e-6
def load_state_dict(file_name, dtype):
if model_util.is_safetensors(file_name):
sd = load_file(file_name)
with safe_open(file_name, framework="pt") as f:
metadata = f.metadata()
else:
sd = torch.load(file_name, map_location='cpu')
metadata = None
for key in list(sd.keys()):
if type(sd[key]) == torch.Tensor:
sd[key] = sd[key].to(dtype)
return sd, metadata
def save_to_file(file_name, model, state_dict, dtype, metadata):
if dtype is not None:
for key in list(state_dict.keys()):
if type(state_dict[key]) == torch.Tensor:
state_dict[key] = state_dict[key].to(dtype)
if model_util.is_safetensors(file_name):
save_file(model, file_name, metadata)
else:
torch.save(model, file_name)
def index_sv_cumulative(S, target):
original_sum = float(torch.sum(S))
cumulative_sums = torch.cumsum(S, dim=0)/original_sum
index = int(torch.searchsorted(cumulative_sums, target)) + 1
if index >= len(S):
index = len(S) - 1
return index
def index_sv_fro(S, target):
S_squared = S.pow(2)
s_fro_sq = float(torch.sum(S_squared))
sum_S_squared = torch.cumsum(S_squared, dim=0)/s_fro_sq
index = int(torch.searchsorted(sum_S_squared, target**2)) + 1
if index >= len(S):
index = len(S) - 1
return index
# Modified from Kohaku-blueleaf's extract/merge functions
def extract_conv(weight, lora_rank, dynamic_method, dynamic_param, device, scale=1):
out_size, in_size, kernel_size, _ = weight.size()
U, S, Vh = torch.linalg.svd(weight.reshape(out_size, -1).to(device))
param_dict = rank_resize(S, lora_rank, dynamic_method, dynamic_param, scale)
lora_rank = param_dict["new_rank"]
U = U[:, :lora_rank]
S = S[:lora_rank]
U = U @ torch.diag(S)
Vh = Vh[:lora_rank, :]
param_dict["lora_down"] = Vh.reshape(lora_rank, in_size, kernel_size, kernel_size).cpu()
param_dict["lora_up"] = U.reshape(out_size, lora_rank, 1, 1).cpu()
del U, S, Vh, weight
return param_dict
def extract_linear(weight, lora_rank, dynamic_method, dynamic_param, device, scale=1):
out_size, in_size = weight.size()
U, S, Vh = torch.linalg.svd(weight.to(device))
param_dict = rank_resize(S, lora_rank, dynamic_method, dynamic_param, scale)
lora_rank = param_dict["new_rank"]
U = U[:, :lora_rank]
S = S[:lora_rank]
U = U @ torch.diag(S)
Vh = Vh[:lora_rank, :]
param_dict["lora_down"] = Vh.reshape(lora_rank, in_size).cpu()
param_dict["lora_up"] = U.reshape(out_size, lora_rank).cpu()
del U, S, Vh, weight
return param_dict
def merge_conv(lora_down, lora_up, device):
in_rank, in_size, kernel_size, k_ = lora_down.shape
out_size, out_rank, _, _ = lora_up.shape
assert in_rank == out_rank and kernel_size == k_, f"rank {in_rank} {out_rank} or kernel {kernel_size} {k_} mismatch"
lora_down = lora_down.to(device)
lora_up = lora_up.to(device)
merged = lora_up.reshape(out_size, -1) @ lora_down.reshape(in_rank, -1)
weight = merged.reshape(out_size, in_size, kernel_size, kernel_size)
del lora_up, lora_down
return weight
def merge_linear(lora_down, lora_up, device):
in_rank, in_size = lora_down.shape
out_size, out_rank = lora_up.shape
assert in_rank == out_rank, f"rank {in_rank} {out_rank} mismatch"
lora_down = lora_down.to(device)
lora_up = lora_up.to(device)
weight = lora_up @ lora_down
del lora_up, lora_down
return weight
def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1):
param_dict = {}
if dynamic_method=="sv_ratio":
# Calculate new dim and alpha based off ratio
max_sv = S[0]
min_sv = max_sv/dynamic_param
new_rank = max(torch.sum(S > min_sv).item(),1)
new_alpha = float(scale*new_rank)
elif dynamic_method=="sv_cumulative":
# Calculate new dim and alpha based off cumulative sum
new_rank = index_sv_cumulative(S, dynamic_param)
new_rank = max(new_rank, 1)
new_alpha = float(scale*new_rank)
elif dynamic_method=="sv_fro":
# Calculate new dim and alpha based off sqrt sum of squares
new_rank = index_sv_fro(S, dynamic_param)
new_rank = min(max(new_rank, 1), len(S)-1)
new_alpha = float(scale*new_rank)
else:
new_rank = rank
new_alpha = float(scale*new_rank)
if S[0] <= MIN_SV: # Zero matrix, set dim to 1
new_rank = 1
new_alpha = float(scale*new_rank)
elif new_rank > rank: # cap max rank at rank
new_rank = rank
new_alpha = float(scale*new_rank)
# Calculate resize info
s_sum = torch.sum(torch.abs(S))
s_rank = torch.sum(torch.abs(S[:new_rank]))
S_squared = S.pow(2)
s_fro = torch.sqrt(torch.sum(S_squared))
s_red_fro = torch.sqrt(torch.sum(S_squared[:new_rank]))
fro_percent = float(s_red_fro/s_fro)
param_dict["new_rank"] = new_rank
param_dict["new_alpha"] = new_alpha
param_dict["sum_retained"] = (s_rank)/s_sum
param_dict["fro_retained"] = fro_percent
param_dict["max_ratio"] = S[0]/S[new_rank]
return param_dict
def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dynamic_param, verbose):
network_alpha = None
network_dim = None
verbose_str = "\n"
fro_list = []
# Extract loaded lora dim and alpha
for key, value in lora_sd.items():
if network_alpha is None and 'alpha' in key:
network_alpha = value
if network_dim is None and 'lora_down' in key and len(value.size()) == 2:
network_dim = value.size()[0]
if network_alpha is not None and network_dim is not None:
break
if network_alpha is None:
network_alpha = network_dim
scale = network_alpha/network_dim
if dynamic_method:
print(f"Dynamically determining new alphas and dims based off {dynamic_method}: {dynamic_param}, max rank is {new_rank}")
lora_down_weight = None
lora_up_weight = None
o_lora_sd = lora_sd.copy()
block_down_name = None
block_up_name = None
with torch.no_grad():
for key, value in tqdm(lora_sd.items()):
if 'lora_down' in key:
block_down_name = key.split(".")[0]
lora_down_weight = value
if 'lora_up' in key:
block_up_name = key.split(".")[0]
lora_up_weight = value
weights_loaded = (lora_down_weight is not None and lora_up_weight is not None)
if (block_down_name == block_up_name) and weights_loaded:
conv2d = (len(lora_down_weight.size()) == 4)
if conv2d:
full_weight_matrix = merge_conv(lora_down_weight, lora_up_weight, device)
param_dict = extract_conv(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale)
else:
full_weight_matrix = merge_linear(lora_down_weight, lora_up_weight, device)
param_dict = extract_linear(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale)
if verbose:
max_ratio = param_dict['max_ratio']
sum_retained = param_dict['sum_retained']
fro_retained = param_dict['fro_retained']
if not np.isnan(fro_retained):
fro_list.append(float(fro_retained))
verbose_str+=f"{block_down_name:75} | "
verbose_str+=f"sum(S) retained: {sum_retained:.1%}, fro retained: {fro_retained:.1%}, max(S) ratio: {max_ratio:0.1f}"
if verbose and dynamic_method:
verbose_str+=f", dynamic | dim: {param_dict['new_rank']}, alpha: {param_dict['new_alpha']}\n"
else:
verbose_str+=f"\n"
new_alpha = param_dict['new_alpha']
o_lora_sd[block_down_name + "." + "lora_down.weight"] = param_dict["lora_down"].to(save_dtype).contiguous()
o_lora_sd[block_up_name + "." + "lora_up.weight"] = param_dict["lora_up"].to(save_dtype).contiguous()
o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(param_dict['new_alpha']).to(save_dtype)
block_down_name = None
block_up_name = None
lora_down_weight = None
lora_up_weight = None
weights_loaded = False
del param_dict
if verbose:
print(verbose_str)
print(f"Average Frobenius norm retention: {np.mean(fro_list):.2%} | std: {np.std(fro_list):0.3f}")
print("resizing complete")
return o_lora_sd, network_dim, new_alpha
def resize(args):
def str_to_dtype(p):
if p == 'float':
return torch.float
if p == 'fp16':
return torch.float16
if p == 'bf16':
return torch.bfloat16
return None
if args.dynamic_method and not args.dynamic_param:
raise Exception("If using dynamic_method, then dynamic_param is required")
merge_dtype = str_to_dtype('float') # matmul method above only seems to work in float32
save_dtype = str_to_dtype(args.save_precision)
if save_dtype is None:
save_dtype = merge_dtype
print("loading Model...")
lora_sd, metadata = load_state_dict(args.model, merge_dtype)
print("Resizing Lora...")
state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, save_dtype, args.device, args.dynamic_method, args.dynamic_param, args.verbose)
# update metadata
if metadata is None:
metadata = {}
comment = metadata.get("ss_training_comment", "")
if not args.dynamic_method:
metadata["ss_training_comment"] = f"dimension is resized from {old_dim} to {args.new_rank}; {comment}"
metadata["ss_network_dim"] = str(args.new_rank)
metadata["ss_network_alpha"] = str(new_alpha)
else:
metadata["ss_training_comment"] = f"Dynamic resize with {args.dynamic_method}: {args.dynamic_param} from {old_dim}; {comment}"
metadata["ss_network_dim"] = 'Dynamic'
metadata["ss_network_alpha"] = 'Dynamic'
model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata)
metadata["sshs_model_hash"] = model_hash
metadata["sshs_legacy_hash"] = legacy_hash
print(f"saving model to: {args.save_to}")
save_to_file(args.save_to, state_dict, state_dict, save_dtype, metadata)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--save_precision", type=str, default=None,
choices=[None, "float", "fp16", "bf16"], help="precision in saving, float if omitted / 保存時の精度、未指定時はfloat")
parser.add_argument("--new_rank", type=int, default=4,
help="Specify rank of output LoRA / 出力するLoRAのrank (dim)")
parser.add_argument("--save_to", type=str, default=None,
help="destination file name: ckpt or safetensors file / 保存先のファイル名、ckptまたはsafetensors")
parser.add_argument("--model", type=str, default=None,
help="LoRA model to resize at to new rank: ckpt or safetensors file / 読み込むLoRAモデル、ckptまたはsafetensors")
parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う")
parser.add_argument("--verbose", action="store_true",
help="Display verbose resizing information / rank変更時の詳細情報を出力する")
parser.add_argument("--dynamic_method", type=str, default=None, choices=[None, "sv_ratio", "sv_fro", "sv_cumulative"],
help="Specify dynamic resizing method, --new_rank is used as a hard limit for max rank")
parser.add_argument("--dynamic_param", type=float, default=None,
help="Specify target for dynamic reduction")
args = parser.parse_args()
resize(args)