From c90aa2cc612894f41042f1f404050bcfb99f9a67 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 19 Dec 2022 09:22:52 -0500 Subject: [PATCH] - Fix file/folder opening behind the browser window - Add WD14 and BLIP captioning to utilities - Improve overall GUI layout --- .gitignore | 7 +- BLIP_caption/configs/med_config.json | 21 + BLIP_caption/make_captions.py | 115 +++ BLIP_caption/models/blip.py | 238 +++++ BLIP_caption/models/med.py | 955 ++++++++++++++++++ BLIP_caption/models/vit.py | 305 ++++++ ...tfevents.1670941180.DESKTOP-4M9DSE4.4208.0 | Bin 40 -> 0 bytes ...fevents.1670947635.DESKTOP-4M9DSE4.10236.0 | Bin 15962 -> 0 bytes ...fevents.1670954444.DESKTOP-4M9DSE4.13888.0 | Bin 15962 -> 0 bytes ...tfevents.1670954967.DESKTOP-4M9DSE4.8248.0 | Bin 15962 -> 0 bytes ...fevents.1670955184.DESKTOP-4M9DSE4.20268.0 | Bin 15962 -> 0 bytes ...fevents.1670955508.DESKTOP-4M9DSE4.17076.0 | Bin 15962 -> 0 bytes ...fevents.1670956233.DESKTOP-4M9DSE4.11580.0 | Bin 15962 -> 0 bytes ...fevents.1670956729.DESKTOP-4M9DSE4.23776.0 | Bin 10706 -> 0 bytes ...fevents.1670957586.DESKTOP-4M9DSE4.10508.0 | Bin 15962 -> 0 bytes ...fevents.1670961310.DESKTOP-4M9DSE4.20624.0 | Bin 15962 -> 0 bytes ...fevents.1670971669.DESKTOP-4M9DSE4.26752.0 | Bin 696 -> 0 bytes ...fevents.1670971772.DESKTOP-4M9DSE4.19112.0 | Bin 40 -> 0 bytes ...tfevents.1670974130.DESKTOP-4M9DSE4.3624.0 | Bin 32090 -> 0 bytes ...fevents.1670981976.DESKTOP-4M9DSE4.27240.0 | Bin 32090 -> 0 bytes README.md | 4 + dreambooth_gui.py | 498 ++++----- .../{caption_gui.py => basic_caption_gui.py} | 8 +- library/blip_caption_gui.py | 111 ++ library/common_gui.py | 56 +- library/convert_model_gui.py | 10 +- library/wd14_caption_gui.py | 73 ++ mytraining.ps | 609 +++++++++++ requirements.txt | 9 + tools/convert_diffusers20_original_sd.py | 9 +- 30 files changed, 2757 insertions(+), 271 deletions(-) create mode 100644 BLIP_caption/configs/med_config.json create mode 100644 BLIP_caption/make_captions.py create mode 100644 BLIP_caption/models/blip.py create mode 100644 BLIP_caption/models/med.py create mode 100644 BLIP_caption/models/vit.py delete mode 100644 None/20221213091923/dreambooth/events.out.tfevents.1670941180.DESKTOP-4M9DSE4.4208.0 delete mode 100644 None/20221213110658/dreambooth/events.out.tfevents.1670947635.DESKTOP-4M9DSE4.10236.0 delete mode 100644 None/20221213130026/dreambooth/events.out.tfevents.1670954444.DESKTOP-4M9DSE4.13888.0 delete mode 100644 None/20221213130909/dreambooth/events.out.tfevents.1670954967.DESKTOP-4M9DSE4.8248.0 delete mode 100644 None/20221213131248/dreambooth/events.out.tfevents.1670955184.DESKTOP-4M9DSE4.20268.0 delete mode 100644 None/20221213131812/dreambooth/events.out.tfevents.1670955508.DESKTOP-4M9DSE4.17076.0 delete mode 100644 None/20221213133017/dreambooth/events.out.tfevents.1670956233.DESKTOP-4M9DSE4.11580.0 delete mode 100644 None/20221213133831/dreambooth/events.out.tfevents.1670956729.DESKTOP-4M9DSE4.23776.0 delete mode 100644 None/20221213135249/dreambooth/events.out.tfevents.1670957586.DESKTOP-4M9DSE4.10508.0 delete mode 100644 None/20221213145447/dreambooth/events.out.tfevents.1670961310.DESKTOP-4M9DSE4.20624.0 delete mode 100644 None/20221213174728/dreambooth/events.out.tfevents.1670971669.DESKTOP-4M9DSE4.26752.0 delete mode 100644 None/20221213174915/dreambooth/events.out.tfevents.1670971772.DESKTOP-4M9DSE4.19112.0 delete mode 100644 None/20221213182833/dreambooth/events.out.tfevents.1670974130.DESKTOP-4M9DSE4.3624.0 delete mode 100644 None/20221213203916/dreambooth/events.out.tfevents.1670981976.DESKTOP-4M9DSE4.27240.0 rename library/{caption_gui.py => basic_caption_gui.py} (91%) create mode 100644 library/blip_caption_gui.py create mode 100644 library/wd14_caption_gui.py create mode 100644 mytraining.ps diff --git a/.gitignore b/.gitignore index cc18568..705a9f3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,8 @@ venv -venv1 -mytraining.ps __pycache__ +*.txt +cudnn_windows .vscode *.egg-info -build \ No newline at end of file +build +wd14_tagger_model \ No newline at end of file diff --git a/BLIP_caption/configs/med_config.json b/BLIP_caption/configs/med_config.json new file mode 100644 index 0000000..0ffad0a --- /dev/null +++ b/BLIP_caption/configs/med_config.json @@ -0,0 +1,21 @@ +{ + "architectures": [ + "BertModel" + ], + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-12, + "max_position_embeddings": 512, + "model_type": "bert", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 0, + "type_vocab_size": 2, + "vocab_size": 30524, + "encoder_width": 768, + "add_cross_attention": true +} diff --git a/BLIP_caption/make_captions.py b/BLIP_caption/make_captions.py new file mode 100644 index 0000000..59272ff --- /dev/null +++ b/BLIP_caption/make_captions.py @@ -0,0 +1,115 @@ +# このスクリプトのライセンスは、Apache License 2.0とします +# (c) 2022 Kohya S. @kohya_ss + +import argparse +import glob +import os +import json + +from PIL import Image +from tqdm import tqdm +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms.functional import InterpolationMode +from models.blip import blip_decoder +# from Salesforce_BLIP.models.blip import blip_decoder + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + +def main(args): + cwd = os.getcwd() + print('Current Working Directory is: ', cwd) + + os.chdir('.\BLIP_caption') + + image_paths = glob.glob(os.path.join(args.train_data_dir, "*.jpg")) + \ + glob.glob(os.path.join(args.train_data_dir, "*.png")) + glob.glob(os.path.join(args.train_data_dir, "*.webp")) + print(f"found {len(image_paths)} images.") + + print(f"loading BLIP caption: {args.caption_weights}") + # image_size = 384 + # model = blip_decoder(pretrained=args.caption_weights, image_size=image_size, vit='large', med_config='configs/med_config.json') + # model.eval() + # model = model.to(device) + + image_size = 384 + transform = transforms.Compose([ + transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC), + transforms.ToTensor(), + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + ]) + + model_url = args.caption_weights # 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth' + + model = blip_decoder(pretrained=model_url, image_size=384, vit='large') + model.eval() + model = model.to(device) + print("BLIP loaded") + # 正方形でいいのか? という気がするがソースがそうなので + # transform = transforms.Compose([ + # transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), + # transforms.ToTensor(), + # transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + # ]) + + # captioningする + def run_batch(path_imgs): + imgs = torch.stack([im for _, im in path_imgs]).to(device) + + with torch.no_grad(): + if args.beam_search: + captions = model.generate(imgs, sample=False, num_beams=args.num_beams, + max_length=args.max_length, min_length=args.min_length) + else: + captions = model.generate(imgs, sample=True, top_p=args.top_p, max_length=args.max_length, min_length=args.min_length) + + for (image_path, _), caption in zip(path_imgs, captions): + with open(os.path.splitext(image_path)[0] + args.caption_extension, "wt", encoding='utf-8') as f: + f.write(caption + "\n") + if args.debug: + print(image_path, caption) + + b_imgs = [] + for image_path in tqdm(image_paths, smoothing=0.0): + raw_image = Image.open(image_path) + if raw_image.mode != "RGB": + print(f"convert image mode {raw_image.mode} to RGB: {image_path}") + raw_image = raw_image.convert("RGB") + + image = transform(raw_image) + b_imgs.append((image_path, image)) + if len(b_imgs) >= args.batch_size: + run_batch(b_imgs) + b_imgs.clear() + if len(b_imgs) > 0: + run_batch(b_imgs) + + print("done!") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ") + parser.add_argument("caption_weights", type=str, + help="BLIP caption weights (model_large_caption.pth) / BLIP captionの重みファイル(model_large_caption.pth)") + parser.add_argument("--caption_extention", type=str, default=None, + help="extension of caption file (for backward compatibility) / 出力されるキャプションファイルの拡張子(スペルミスしていたのを残してあります)") + parser.add_argument("--caption_extension", type=str, default=".caption", help="extension of caption file / 出力されるキャプションファイルの拡張子") + parser.add_argument("--beam_search", action="store_true", + help="use beam search (default Nucleus sampling) / beam searchを使う(このオプション未指定時はNucleus sampling)") + parser.add_argument("--batch_size", type=int, default=1, help="batch size in inference / 推論時のバッチサイズ") + parser.add_argument("--num_beams", type=int, default=1, help="num of beams in beam search /beam search時のビーム数(多いと精度が上がるが時間がかかる)") + parser.add_argument("--top_p", type=float, default=0.9, help="top_p in Nucleus sampling / Nucleus sampling時のtop_p") + parser.add_argument("--max_length", type=int, default=75, help="max length of caption / captionの最大長") + parser.add_argument("--min_length", type=int, default=5, help="min length of caption / captionの最小長") + parser.add_argument("--debug", action="store_true", help="debug mode") + + args = parser.parse_args() + + # スペルミスしていたオプションを復元する + if args.caption_extention is not None: + args.caption_extension = args.caption_extention + + main(args) diff --git a/BLIP_caption/models/blip.py b/BLIP_caption/models/blip.py new file mode 100644 index 0000000..38678f6 --- /dev/null +++ b/BLIP_caption/models/blip.py @@ -0,0 +1,238 @@ +''' + * Copyright (c) 2022, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li +''' +import warnings +warnings.filterwarnings("ignore") + +from models.vit import VisionTransformer, interpolate_pos_embed +from models.med import BertConfig, BertModel, BertLMHeadModel +from transformers import BertTokenizer + +import torch +from torch import nn +import torch.nn.functional as F + +import os +from urllib.parse import urlparse +from timm.models.hub import download_cached_file + +class BLIP_Base(nn.Module): + def __init__(self, + med_config = 'configs/med_config.json', + image_size = 224, + vit = 'base', + vit_grad_ckpt = False, + vit_ckpt_layer = 0, + ): + """ + Args: + med_config (str): path for the mixture of encoder-decoder model's configuration file + image_size (int): input image size + vit (str): model size of vision transformer + """ + super().__init__() + + self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) + self.tokenizer = init_tokenizer() + med_config = BertConfig.from_json_file(med_config) + med_config.encoder_width = vision_width + self.text_encoder = BertModel(config=med_config, add_pooling_layer=False) + + + def forward(self, image, caption, mode): + + assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal" + text = self.tokenizer(caption, return_tensors="pt").to(image.device) + + if mode=='image': + # return image features + image_embeds = self.visual_encoder(image) + return image_embeds + + elif mode=='text': + # return text features + text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask, + return_dict = True, mode = 'text') + return text_output.last_hidden_state + + elif mode=='multimodal': + # return multimodel features + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + + text.input_ids[:,0] = self.tokenizer.enc_token_id + output = self.text_encoder(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True, + ) + return output.last_hidden_state + + + +class BLIP_Decoder(nn.Module): + def __init__(self, + med_config = 'configs/med_config.json', + image_size = 384, + vit = 'base', + vit_grad_ckpt = False, + vit_ckpt_layer = 0, + prompt = 'a picture of ', + ): + """ + Args: + med_config (str): path for the mixture of encoder-decoder model's configuration file + image_size (int): input image size + vit (str): model size of vision transformer + """ + super().__init__() + + self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) + self.tokenizer = init_tokenizer() + med_config = BertConfig.from_json_file(med_config) + med_config.encoder_width = vision_width + self.text_decoder = BertLMHeadModel(config=med_config) + + self.prompt = prompt + self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1 + + + def forward(self, image, caption): + + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + + text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device) + + text.input_ids[:,0] = self.tokenizer.bos_token_id + + decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100) + decoder_targets[:,:self.prompt_length] = -100 + + decoder_output = self.text_decoder(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + labels = decoder_targets, + return_dict = True, + ) + loss_lm = decoder_output.loss + + return loss_lm + + def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0): + image_embeds = self.visual_encoder(image) + + if not sample: + image_embeds = image_embeds.repeat_interleave(num_beams,dim=0) + + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts} + + prompt = [self.prompt] * image.size(0) + input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device) + input_ids[:,0] = self.tokenizer.bos_token_id + input_ids = input_ids[:, :-1] + + if sample: + #nucleus sampling + outputs = self.text_decoder.generate(input_ids=input_ids, + max_length=max_length, + min_length=min_length, + do_sample=True, + top_p=top_p, + num_return_sequences=1, + eos_token_id=self.tokenizer.sep_token_id, + pad_token_id=self.tokenizer.pad_token_id, + repetition_penalty=1.1, + **model_kwargs) + else: + #beam search + outputs = self.text_decoder.generate(input_ids=input_ids, + max_length=max_length, + min_length=min_length, + num_beams=num_beams, + eos_token_id=self.tokenizer.sep_token_id, + pad_token_id=self.tokenizer.pad_token_id, + repetition_penalty=repetition_penalty, + **model_kwargs) + + captions = [] + for output in outputs: + caption = self.tokenizer.decode(output, skip_special_tokens=True) + captions.append(caption[len(self.prompt):]) + return captions + + +def blip_decoder(pretrained='',**kwargs): + model = BLIP_Decoder(**kwargs) + if pretrained: + model,msg = load_checkpoint(model,pretrained) + assert(len(msg.missing_keys)==0) + return model + +def blip_feature_extractor(pretrained='',**kwargs): + model = BLIP_Base(**kwargs) + if pretrained: + model,msg = load_checkpoint(model,pretrained) + assert(len(msg.missing_keys)==0) + return model + +def init_tokenizer(): + tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + tokenizer.add_special_tokens({'bos_token':'[DEC]'}) + tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) + tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] + return tokenizer + + +def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): + + assert vit in ['base', 'large'], "vit parameter must be base or large" + if vit=='base': + vision_width = 768 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, + num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0 or drop_path_rate + ) + elif vit=='large': + vision_width = 1024 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, + num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0.1 or drop_path_rate + ) + return visual_encoder, vision_width + +def is_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https") + +def load_checkpoint(model,url_or_filename): + if is_url(url_or_filename): + cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) + checkpoint = torch.load(cached_file, map_location='cpu') + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location='cpu') + else: + raise RuntimeError('checkpoint url or path is invalid') + + state_dict = checkpoint['model'] + + state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) + if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): + state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], + model.visual_encoder_m) + for key in model.state_dict().keys(): + if key in state_dict.keys(): + if state_dict[key].shape!=model.state_dict()[key].shape: + del state_dict[key] + + msg = model.load_state_dict(state_dict,strict=False) + print('load checkpoint from %s'%url_or_filename) + return model,msg + diff --git a/BLIP_caption/models/med.py b/BLIP_caption/models/med.py new file mode 100644 index 0000000..572d39d --- /dev/null +++ b/BLIP_caption/models/med.py @@ -0,0 +1,955 @@ +''' + * Copyright (c) 2022, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li + * Based on huggingface code base + * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert +''' + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +from torch import Tensor, device, dtype, nn +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss +import torch.nn.functional as F + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + ModelOutput, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging +from transformers.models.bert.configuration_bert import BertConfig + + +logger = logging.get_logger(__name__) + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + embeddings = inputs_embeds + + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads) + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if self.config.add_cross_attention: + self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + mode=None, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if mode=='multimodal': + assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" + + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multimodal', + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warn( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + mode=mode, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + mode=mode, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """ Initialize the weights """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + + def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, + device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction='mean', + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + if reduction=='none': + lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": encoder_hidden_states, + "encoder_attention_mask": encoder_attention_mask, + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past diff --git a/BLIP_caption/models/vit.py b/BLIP_caption/models/vit.py new file mode 100644 index 0000000..cec3d8e --- /dev/null +++ b/BLIP_caption/models/vit.py @@ -0,0 +1,305 @@ +''' + * Copyright (c) 2022, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li + * Based on timm code base + * https://github.com/rwightman/pytorch-image-models/tree/master/timm +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.models.vision_transformer import _cfg, PatchEmbed +from timm.models.registry import register_model +from timm.models.layers import trunc_normal_, DropPath +from timm.models.helpers import named_apply, adapt_input_conv + +from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.attn_gradients = None + self.attention_map = None + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def forward(self, x, register_hook=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + if register_hook: + self.save_attention_map(attn) + attn.register_hook(self.save_attn_gradients) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if use_grad_checkpointing: + self.attn = checkpoint_wrapper(self.attn) + self.mlp = checkpoint_wrapper(self.mlp) + + def forward(self, x, register_hook=False): + x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - + https://arxiv.org/abs/2010.11929 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, + use_grad_checkpointing=False, ckpt_layer=0): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer) + ) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def forward(self, x, register_blk=-1): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + x = x + self.pos_embed[:,:x.size(1),:] + x = self.pos_drop(x) + + for i,blk in enumerate(self.blocks): + x = blk(x, register_blk==i) + x = self.norm(x) + + return x + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) +# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: +# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) +# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) +# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: +# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) +# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): + # interpolate position embedding + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = visual_encoder.patch_embed.num_patches + num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + + if orig_size!=new_size: + # class_token and dist_token are kept unchanged + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) + + return new_pos_embed + else: + return pos_embed_checkpoint \ No newline at end of file diff --git a/None/20221213091923/dreambooth/events.out.tfevents.1670941180.DESKTOP-4M9DSE4.4208.0 b/None/20221213091923/dreambooth/events.out.tfevents.1670941180.DESKTOP-4M9DSE4.4208.0 deleted file mode 100644 index 93e903255e7efdbb64b993ab00ef01ada5d9807b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Qm$93wJp51Vi;!P?_%*@ksElbTSu`)Vw)Vm7+$(anB diff --git a/None/20221213110658/dreambooth/events.out.tfevents.1670947635.DESKTOP-4M9DSE4.10236.0 b/None/20221213110658/dreambooth/events.out.tfevents.1670947635.DESKTOP-4M9DSE4.10236.0 deleted file mode 100644 index 8a8fe2414d4de11b34687dc64280f0bf07bcda50..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15962 zcmY-0c{Ei2AIEV*n3OCnmP%^%V3FNU!h<2poATQ{)!oN?EX~dX#h18KmeD?W17rUXT@kAJ3G5b6bp+xcExBSCI+z|1Ti&7vq z>{`mKYr}gH5lR$y$o}>v;+Djr55-3<4Px8rWo)R#tGz^o62(oe7E6iPKFNWLG9cR7 zE3!e)dwLQPN)#17HkA-@d|40|yMx#_Prye1D0d<_GIM67O07qTSeWEc3^}9Yll@#g!eoZbY2wHJgh)Kul2B$5KYCTM)4= z@NbFY=f_jN5HY_R9S(G3b%P@g5fMrhBOkm9C*loh7cMG+*ng%UvuG;*OhhPA+}xtL zfr#It!?>snqRw3@);{!oDG{MW(K+T<5fPu5pX8znh(`S!+1<;7qKF73igE@eZ-_Yi zOf?r(L40ibU05Zz^Dq&iMDe-1k`)oXRi!J$yY2&`be1N|DH2r@5lR%B)MlL{;;s7z zTje>^L~$#-^@E6W^aM}DyVd~Fb;Ncy=hur-M1&H>rfy~%iKweNj*FTgR<)V2 ze8r0HM1&H>5|>$Li1;CW5f=x5IKv=>$$7}h5fMrhy;l9GAmaD*7%mP3apBfo!uQ3w z2Z;zJin7zkNfEJ7EuV{nK#YryVuCxl>O_PR#YpR^H;6dRshNvfAR4^WXPOo9IYfjK zMTznH1BjToL%CAC>%kxv*&49`!FhQiLW!bp$!xmo^Oa^?90KBn-9uRO3QbueLW!b4 zEu3EC%0BVoqBe+@uIbDwsW^;?>aYKnD7H*cUqp`jjaecWhk|(7N1D}mMJ5o@%HeN` zVo8#>77_0zT;<|05X*1tvWl31t3-qn#S?2K9uv{x&Ic|I2eCcgiDf;vP#_|dDBfx5 zkRhVgYRxL~u1A2l{mD=^;7t89B0`Cxaqq)PM7+}5hKo8Nwk;mZ%Ey&<5D`igW8&xJ z5ivV^9T#;${KsNAb3I)%k%&;D818<2J`v}+AK;=Mh_`cFgk#d}#}W}r6knP-&})jk z54X8E5=8I%_3Ymhi*tzxC5p~nzTL@;U7g>!s1M>#-6o-I(a``RLW!ci9eY8<{ifR0 z;$4pd(Y7U;<*%#DA|jM1_P+hmiHK?Y?YU?G;`jt_c1tz!C=t7z`dgxC7T3#;h||Xg zb8$3?+oNoRg1OGOhzKQ$C3o9Zh?re=jEjaKs;rS`4uivg5D`igCyuI}LPUFyDlQs< zcsy5)E$O#FhKNw2XuS3f{R{7(Fa1=!8DkJN+T+>D?;jh87=G(-iQ748u6~jf!KeyH-$AhR? zsmg|KlN?V(C{f&NQcAA?T1-8;I03|Mouk>;&?ALJbh7?iqNr5lxSGs(O>zepCxRHZ z(1@ATgc%bNN)&I-R~=5oTm3F@aT17&oWj_xDT4(>gc8LJkqW&^F;0ESMIngCt{Ac5 z7eWh&2qlWLHNKa~j8A^`dM4gA193y9hzSl!7ZDLk6l;f?(tCv`UL*P@-6B`=A$@vCnbF#c3dBySfAAgc8N6 zvNC6hSkdn?7p*}oEVgBj7A*`RB9tha_|-lj;;#X%T$~PK`J~~@eO(#7N`Vr^9}ROF zh*-S6Pn~$zHXshIHeyp#jU>s8P@-74{&f@)OU_T`qAiG}jXG?&;PxycLW!cy!&)CA z8V9f9q8*6cng=k8aRca+VJJ~7JE3e!#M#S|xi|yFmjN@_aranfG9#2I-fxtr&nYgP zzQ#p+5X&>lh5AmRIz)sLMcJ)t&Sb{vY3*E`38GT0F}v}wG?0i;qPTajq%ILpUDbXr z-n9dWD;h$DJ11>wCL)w5HpoxgLB!25_FS9=;+4e7OfgShpNLSRcwKaresYyh4CbOE zh;DU9*+R#-8X`i8;wBGUdS9r~e1wZmAo@@1&vZJM?I9wRC^o+9ww4@qYw%+(&IVEa zt}9cB(V_1`C{dJ(H%%a7q-7@;ok2X)p}>4ahrGy)P@*VzaRB|E%P~ghg?QI4Ai6pF zvBw#DHbjIH#ntzgeu2qu5R^x`SAFDvLSF9hy!=C{Y|U^@l$ZFQw|%i+Ak-qIaeu+g#Bn*=+qw`DN))@oyy+d1f6P`cE&_3H%x__G)}R<7LW$zAbw+eEhpJ9< zaWRMk<43aB7poeH2qlV|JvRx->)1KFhKox;yk)(L^+=RbBqEe3#!b_v&&?!VWL}DQ zy%at6Kts$c#b35nS{E(ZPEYv#9d?O++YB{F*pFhlo`*XSwJLV$CuG zX1Uz1iil96xN)vO{XNC^_*yQm0P%vakR>*ot|TIqC>mKw(>u+#Hgd1TyIu+6^d3i8 zN1N$#B0`Dcwd&4?w}!;H6Z%vxwG@TU(jz; zphQtK`_d>P>c>6jqCbdhe^@g61rLJAj8LMOdTrSqB02@hHHvo~0Ak#*V&T~4`kO?A z5=F=RA5)09{(uP=*Md0ofdQ*wimejTnA$6AYJyVrp1JaP@;H7 zq4hTrA5=tfaXpCLPsy;evICol2qlWG9ea8bac=!NE^Ywv{Z~U))+_NY5urpeXG%&w z5!*7Kb1@J^<8#ui&O(*mb3utByD@}*uTXwPzDd06jUc|z8o|1!B}I@Kp+wO(^gcaP zgr4|^i<>~ySIlFzab*{Y2qlVJt}dY87pB#Caxn=7SkqsyL5bq9kH1fn8I?{bz83E~6vSOw)@+8CfPT(EiQ=XCPH#2- z`wgOc&omYJA4?{3F${mMSo5zXYb_12Q2Q_4`AJTPq-b8Q+PsdK^-W8-8IGoWz(L{B z&L}NnVv6QPw@=-O=_iWg=4LcIU8geN#vNyfi7A>nBfFjub9L1fZf-%d@m&}T)RxO7 zCZ=dEOm4YJ%;lyZxVaV05q>k-+I`N0h>0nh^9^TWVv45u`~J$roc*(co7>TRyRJj1Q9L!0n3$ru)~9|5 zF<-xxdL!P^4m9T!xUoYjR(iz56wUFgPmLnxyfOoB?nKjVp*i!_c{Yidn4f6_J^VtVhr z$julu&)>Kuj8f~+Att71R=R$ZBxYgCOK!%Zc`wd@y+4?rKuk>0oTC>?ceL=8;#=|F zcca-A?!sC>TM3AXDVpi0rnHec*ZiKq%{^!yA8yOiq;2VrFh$e+lczZ`dnEdBGY-w5 z8+};fLH}K3PE66f8-4B)F~6oJax)&y)Rf6AS6wlln3$qzU}H+(n+AnfxhX<3BI~|z z@&5Z>#KaU$)r1F!$ea=qU%8oprpDDCta1JCx5UI0O_SZu%ZRzPK&wT(_q}MwH}|2bIZlObeEThgn3$sJ9x!bVF@4VkaWfIkz(3aPeBq8^ z#KaWMo)?0wiK%OOl$%Ltnmc;3xm%LyXECN|HhNfg64N~S5jT_3tgqV0mXGy5LFU91 zO&^OPxy01c?BZq$npryy*}g$R*2KgV&3h_ccEnUp(Q6g&Jrzxd112oeesdHtF-0>+ z_VPVqmVI{N=6*E0`xvonD{YF1i7A@x{Y>ec4XxqaOha>m z-=|BmRuw6Fp2rl;^!m+;#QZd(o|{={7TOdEdnDi6P3FWD&9#H8rW5o1JHb2g-jATU z=A8%g_#^$Dn3$q@c>lcJ#O&)~%FUx_1{zLaS2`wF6BAQ3vlSAK5>ujd5jT&ac_VQI z^N&kAPfSeF^vpZ8n3&<;W4W1)X5auDmSlf`p6f70v(T$tM9fbTMch1&=I7`H;Y{lR z^gNF#nyQiQ$;2$U{Dzw+(CoIUMYt!d@+moXOwl~FtNk4@E8eQU7w`Qfn)#nog_~wn z3@0Y0XsTJQl_KWX6voX{XzstB#=eU5m57Ncnjs-BUx*p^uP--Gqxo_BNY=7aiJt2) zMbo8ZG5w5lxi8}888m}c&Dff!MonZ+OwrVCIGj!9oO!U6n`hA+_^?^1@_8b?p2rkT zId@ljy%gX6k(=kx{MlYBjJhC|N#?{9&1(C|9%RlX2Q)s2_nw31YxV2GtnQig{f;S` z8vB2KBxd*2>D)YzW<=>2CUYfZ9+?wUG#}o)JB652Y&UW<7tMc1bqYOKDvu#1rf8mY zbzDwNTdz!RUO=;2LYh@K3HK2bQ#AkivM7L<{bU|-^CFs&e-w)2(apNm#8l7L`zYRf9-3NDH;w@f0OB+ef=j_VrsXH=4J_+<&(38GrET86BAQ3@715zA!cOeTy9=Q zb7e%8@b{n%1;oS@P0^(ccDAj#;%t2nOGnku_dMW}Xfnf1!zlF;@#s3eylNAY# IGM3o<5C6I^P5=M^ diff --git a/None/20221213130026/dreambooth/events.out.tfevents.1670954444.DESKTOP-4M9DSE4.13888.0 b/None/20221213130026/dreambooth/events.out.tfevents.1670954444.DESKTOP-4M9DSE4.13888.0 deleted file mode 100644 index f009137eece3e4daebf3d83b035f5811a421c399..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15962 zcmY-0X*gB;`^WK68OoFlp-2d2EGf0WDMFDW(x8xvA{m=RNHRovi4Bz1LcMuf0?0$N%L=J7`HpMj3}0cRrh@D!nw+ z+b_^)#yX$CHI`;c2~IMq|8+yMo*7D>J0rjT|2sJm18J#2QvCyh!on15C!Z4d$Ssgp zK@rM9Qj$`l0ilZJ7V+kppX)7&xK%KPi=rU5s6G{B#r?J?B9th0N3GQ%;(YxTToeOw z+4dFeXMnCB5urpeKi(yXi0-4)xY!>=-^(jm>_^vaM1&H>yqLDXi1^Ux3Ks`}*yAG0 zd{3B36A?-jZ-!g<5HTe9B^L*R7+o&KTr+DX5fMrhs}6{o6LGSh%m(4E#X%f9+ljRV zO!-7aC{g^WYciFHljK*UX9(&55gOM@8xp-iyWM|u|#p+qrq zpj`zKzwDUEMHvtmblhh%r(F~!B9tf!G=3)#apE&?F3N%!5}?C0Holi4B9tip`%7dw z5wAZ_=As;kQTs*N$5{7^M1&GW9fLQKM2s`M#6@`!*VzTJ{sYsTi3lZ%gQm#P*JIPD zRxSFY!c#62<12rz%8L8`Hu?6%fnj z$FNhI{$4^vC{f(oGCr4x?RC--!d(voaaHdfcG2zUA|gVGqUMOwLqv3|o5Dp^5G}4~ zFfrYP{zQZl#oYC=Nkn|mu!@UnAS!AnvD}k;=+z8L6uX*|tBBb3Y%dqpLHxUADy!XU zHlEA~C5pS2+R&r6HmT&|a1h_?M6pPTEkB3|C5kE+EH055>rH#OI0D3=6)A$f5*r$c z2qlWv2DwzcZ=e+^+_eUXmPv`s-!&_Ph)|+9G3e|xGUHv{*<92Fadh!mre!iEoQP1O zc-whp7!em6M{sc@h_}s5*_!s^1R_F-qH@*%F(TIK7jjVx#3P%uSf^g`8zMr9VnokM z8zPRoUB|^yAa*(AGSPZHWgM3^E18TLWyE_yj&*{Uo?7fQ5VGcZLX}`Y|Bn^)KH?BY@Z=PMD?*L zTpSJJy*t`$Qh@h6B0`B`!r2?TL>xM;go}D0&Y3%bRjeqd*KjCN44mcFLc}n=E-vbW zXzrxT_K8=zkr|;xG5yZJ9z@Kzt{5fUwE>9PC%Xm1!uynn2qlUUub%xPqKBR}7sr5@ z^mrZnc45jsB0`Dcjjc+iL_B34%*C-Frb)dNoTxuZKOsShVpYdyI-{@g0WKPX_@iw* z+xeg|mdpqxipJ9Ueq=_q`1@Qm0?{CKDQmBJt3X63QC#P=mcH+a8{Er9V-P>=oF$lk zBsG+XP@?#;agZ#T(d4#vv~brZAYNZB$>OxqG>8Z#ilZwp(EGLe9%n9^g19_)7|R_0 zK#hn{qPTnUw`4M-&iiOCnt@o^xr=45u}mQ%lqk*<_YNRpy6Finjsx*v<}1PZ3)*c& zgc8N<=>zGVYkpH57srFxFk?M)lv#9vh)|-qRl@5XnKAykNQ`hZ<{;{8+A}5d(1S#T z5=CEs*@r|t?`Oot2_Vi@)MSTaEu)DDC5jz=o^y#Zl?Dcxm~BZI0?iL_lL6hwjKUNgc3!ofZ4ue z#_4(uT(kty=7%x!=u`bhL?}_b-=*D4#2C|on}xfc4B~u_O^s zHkop93W)n=$FQtV&PRv{C5pS73et(__|=1p0ubkxm@=FG-SjMk62)QnyrhYk^-m%f z8HfQ#ceBdFZuAoplqec!ZMPuei-*NroC;z~gBnY!(sCq64JC?Ic0>LlVw<3qi&h{G z9ihc?qMGPk0ZJ6_SypKgaZK~zSmCaxf!Ng;!{Rsh(!V2=C_XNIKwmw{hLgBB9Yn*E z&a5Q%$Ut(`P@-sQ`dOUJ_@LN_i`F0}s#q}l{j;|c5lR$&%*PBN;^y`=F4};2)jfo* znAToJL?}`88(T$>I`wiH7iWMN*tmiXy|i^b5urr!&*QrVWJc@IE-u=FnAcn*P)~H6 zNJJ=6tm)tWk%&LODr^z%+73i5d!zrmbB%vXL?}_bQ=c}Di2J0ba?u{dWhD;GT64!q zB0`B`V~Tnr5ywQY;^IsY3#aL_-r$=}M1&H>;@J7tL`+?q!Npl1uB$a=--k9A6A?-j zT@;R$5pl!83NAW;7}BZ7l;=;QR|+Umv{g``&-RwC`^ZH{5dB{bXR{x^mnAboiQ>eb zj!9%jcPEWF;jU+cxO~lQW_QSE6A__A(MHLdzQSizIdIVl#O-M}1rwT&93>)@D30yu zt|l{PlyBtXUm%Ldo3We$S*wW%C5qj(YtIwWc60$3ok7&>h!#l4PctAQlqjAL)&G}> zzem<^aSn(QsaEXL_1W|}E|e%XuW~aX;?8-0xab0+@3R7SxNOw}G9#2Insg;yBw}uh z!B*j}T|q3Gt-^+F7MCOoD1UOn+uqsc)m3ep+r&k;H7plqx+0F zF1mrJ`fD(o<|pV!L?}@VI^(HH#BjMYT$~5ufSEyT^?G|fB0`DcqX!1Fh}g2EiHq|= zlxUsBZq+W2BqEe3%1h6q-yy9YC>bx@%mNUd%nVte!L>(3gc8N!uQo0tGv2;w&c%fw zrhar{?+Trl5fMrhWiP&{C1PjGQZBlKSh*~ksk?+W6A?-jKQ=9UO2mBg?Oa?0Vp+ja zHojqS7ZIUEQGMiC`mXu7;bkr^261kREc5>P_bnnqiDH{rFMa-4s@=v#4-jvz5M_J6 zYtZ`)lqep`dpMsQwR8L61mUhdLA>1U$W$AL3CN64qNpR<&yI+H+9q<*3&g}*nyhDk zHN8?miK5J}G-D#hHTiOJ35eAmdzg(~w=J0wN)$ySkNFd^-Xe{Q-XL17)Mibw62%35{@O&8>6pSr9}xZ1$FKo{@AO+KC{a{c zDNo-?<&N^>qA!SBtpb_d<|6vt2b3s^&on+sW<1=I&cziVb~*H8J7zAX=QWflF056f zpW+TpyvoIuAf8d*%}lB{(^oi@C^~Fv6(ckH4|vT*KM;Rv{t)Dq4!cS=10{+-ey>s{ z;@v>SB;l_8LEN@rBl~&ZQi6z3qUieUsyY$(Do^L)DiFPWjF{*>b#)>_iDJT*6#9*L zfMXCB13=WDDPXTpr<^AulqmiTjd3C~=ICT`aW#krlE>HtMOQ;2LWyE;>~s2*%kI@} zE(U^V;yQ)RjWPa8L?}`8pWaV{%-A~mGZ%wEJpXhdE10>5KGB2{#ckIj9f%lqclb8p zu7g4B+drR0)QI(w8KFe6`xF5F@{sutzhR7n2#GL^0-24t+Xv>2f3&H-Px{e1E2< zsWOa+P@>rSw>`a5G<6nmF&xCnTBBK7(T8h9gc8MwTa69m?`Y&$%f*c#<_s-ip3Y(P zUI-r({gI7zyG#c|Dfn@s|Gb z3Q82mNoa)=@%R13T-*fW;4LHAyZo{O@^^$1#j51GV?=apOX6Y_h>Z~yg1!+tsYHYl z#Xlbk6^MAf5x+?s0P~nku&5EcNme2V!E1rgpV?J2B04d$}2p=C~Yt7Ovxy zL`+Q49F#a}A2IK%=%fnwo`9xKq9F?&cuky`n4%e{=Ve9Ao+obHOhmK&Yd`k>rM(L= zF-3F9pS;(^)a;1mW)hm+;k|-A;wKu2i7A>VLY@~AbHBw|Zf-*}walG)Y*@LDn3$ru zWK`cj#C-p{g`3I5w4cVzZoRA`CZ=dsTg3GdbL>mm9l{-@pt&KSSa74t_&YH%Me~)Z zYBe$4ItAR^j^??Nd=};MG?18>qB*1McOWtU1o&|?70vNy?+Wzph@T`Trf90}bm<|c zlJXvI?m*Ks#fZIA^e-kRrf4cneSMvnw*9Vib0?ZXA@f*itic>&Vv45O(k+*W`C!+3 zZl<9bFXO;`3uAkUi7A?I{4F$yX=pcmr*QA-Xr@;xvOh=Ft`HMbG%GCo;)yvZb{02x zq4_D-ibYpv=o1rDG{r)%-Xmt)(skV2jpnK3M}kH5*J_E0DVpA$wu!`?elU-ld(d<* z9mLk&j0hkmrfAkY?^{UB_rD%+GXqW2!9>c7>9x&CS|H}|61JWGMO zM;_ZoOia;i2ub)&%z1{oX~MnlL$k5ho{2nBK2JN2r5J?yA=m46ZJWW|^dBqN5Vv6R3 zFRpdOv}8uy%t6!Nb1CckYuIdJVv1&#sVqI;_3GTYnTzJl#M7)S-_4Min40nhBX@UgC+2hI zCT<==^IiQsW__l_mzbEM`O3>|2QlN{NbVBuJs-`K&PlAl{KF^2#1zf5^K&zZS!rX= z&BJJp)!oDD?wDRBCZ=dw_3ra0=Cc@YZXQANmbD1;Dqp8VOia=A{uHWB%=Lxaxp@># zd53br>n#?Oh>0nh1=}j<`_ECGCEPrQrtizeEGjgCUgt4Iv%%>Fy>G@@b#k)+%?fQx zCR-HOLFU91P3OF|&&jb5W%9d)dq0lm&6Lq>+_w|-eI2G~wuRlCPt4XC0&W(fxkOi8EMN_w@REd}?E%$J<2u;_}Zh_+o z^JZdVispcs=|_p#KB9t~C((>(&lcFcaF8S>rfB}@T(h2-_HrM%c?!+MdpT^3ef>Yg z#1u`d3$pYx*NOLPdxU#Gjb?Y;7^Zb-Wf3tkMbqR)qXL;Tw8oa3XVA2ioyxY=%H1L+ zrf5D->K;VQZ|{P+c^1v!)>neK4=T>Y#1ze~rl-Y-IeBUpH_xFt z$5LXpX{cui_g;+Vex*KvMRjX5F)>B+kleQlVjd9Kaq}XYKA*%`;vktgVq%JBkcV_S zG41o#a`O_JiNB_>A>}#reF&y#s@AkS5p%%wTy9=Q^H}^(!KN=BlgXTzqG@=gVFfX( zy6p?27eZFWsWeh>0nh3!O&O_sN|$4%{q5bMoanL3XMf{S1pKnuFJVEhlp-_O0b+ zIhu;i>dd`+brzWuQ#7-ej?yD$*P?^myo#pLT1{qSup``8S#y z1%-lPa*gy^C8lULMQ@?^4yDOoxp@uE^pJXitlSqTGAE{JzI}1mRP(d!=-JMN+nik`_xuv>;KEEmT61RJ0RHd)gCWTBe86 zlNQk?jdo%*lT?1kbARVI$LE~C-^c4XuQ~7K`d;o-`tkn`CpwRoPD%ax)ugS_R!u%2 za&c&w@$@x8VNsLK3t8e0wST%HdmG0||2d$x=YO4|n1Q^Etc=8p@YSo8lD?i0%JNEb zRZxUdRz_Mzd_|;E#l%hHyT?d65HTU=8yCevwA}bWcr-W9g^2MvJ^x>#7~IxpMnv6- z`rkz}_6KoU)e<&%y~hC}LW$!2-uIV@_{nS@7bQTH+!n+p$Y_`m5lR#_-W4klF(Y9k z7YBftSU-q8Y>SU0B9th$mF9U6@!*{ET$BXSE>xPGR_=aC#2-ukEm8DKO>`mRtNE|F zI1ohrZLaLNZthVcLW$z78PW5IXp^AWE82_{h_}BiWkz9k^+bdc#ez=L>qKlUu;HRK zi0)bk*&;8aWFkU|qK#d{F(S513ge;-i0OwXunBfE^N9#0ihrJZFqw#ZChg;*EQtA@ zyV&Xut6@Zh62*$j)DR*%kFVvT9Ej0c``MwI>b*pS62(E47EMHykL}^2Jc!*A6~ayZ zC69>+C5lU;gH{vq_laYEh;}^)L~YkvCNG({frwC|IR3b+CK1OT_Tr)fh~8m3EPv@> z4I)B`;_SgS;Y8H+N#&v!Vjeb0Eit>mH-h$UX?%vbtFDiNVXF{Siq zBAHQP#6B(#2T>|1fo)i)d6J>N^9e+R62-ft4wn*f zuk|-Bs(^UeKaN>dsG1THN)(@6h-@cf@7XcGM7vf6G0yom6G9eL5)n!iRqxcu5m9=E z7Z=q){FXhEbsPy>K}0A~OpjQrM?|x)8@Z?s;!Yur_04>7iHK05xOV(rcOt6qyTHW} zAP(1>%2qb(I1&*`6zhK5xt@r+lUli`0pgXCc=l4(q@9RRq9~W)wU~(VTL$-uHZu~$ zj#Txu^xA?h`}y zq@Vjyiq0h8?r>`U;lqd!-k-1C6cN?~IQ4hp7exA(IHF^ON zp+s?-*vPF!{83WP#W5h7Iq5KmV>xa_gc8M&D-RQhXmh8Ni(^3)dpn-_q<(!zL?}`G zVxJjGL=EfFVxrCHgXr#~!#4MI#1Iim6jPn!h7(ckr8^f5Knyg0FKmq4sYgU8QPg)k z?oY(F^kgm?f*4c3nr$=FkRT$ID0;3_C?{g8RT&qJKrAb66^<1o))Em)6dUX0R}nFM zXA2j{foNr#$qE|h(EA=JQQWNRQ%l5;SqkE!%@~8|Y8k-Job040O(;Yka@A0xXxN@rNW_S1tGH+iV&@iVCg0IUzg?f#{ad1VXseSl5$8@R z;G!9bM$1)MOSwV;nGs49T^7Egr{P}x1}>U|82BZJ>ARZ$BqEe3R-39MlNnQ(cz@Am zEI_Q)XcOX21j-Q+N)+GLMK}?$TrlIJC5Z7_(d@54<7Og4iQ*jhQz=BO>Rrsm@gQED z?7)1+&GjN8lqlX)s!<}MVXo)DKAab#EhjKUlS2Z6y2hi zmk@D>_LdYkLqq@*`N@-_vY}2qlVX*;4c=*NS64T$~PK)`ei^5}vM2L?}_zX>dp*Gdg)~ z<)QM<3NFq7(Jk7LNloZ9Au~dW;(*L{dWs94_KAx# zL6rO8$nNRPrT0BhqBwf8?h|s=6*lSvMVoO1@l}*Avybecr#L84OcR3% zolcrE*&(~>W}rlI_qb;I99MVtIxaecxb%%4OU;gY!KHzWvoa1s45YmL~+9UxEo|f)jhYl=mBC~N(pl@^J9@sBHvh%N zKoI>jl-Xc86-GoTQOw%&{1llnGuT*8wCg1x>Mi>&blY{t6A?-jLljoiC&PYWK3og} zF|)*w4Y3a_B_fnWJQ<`yW?c0-lZ(M1Ms8ll#7YOz%|MCb9~XYnXDO{K%DK1{#B$GH zf|UIw`h5*0ioVPG$&ndf*?#0=2#DY7cCf;opIgXPLy6+gBg1rwIG3r(i#8JqqU7Tq z!B^QJhlo(3I7P4HG!gG_bL8SO5S9Jcu>gZ2`YwbL#qApw({tgF-WV<}2l1VQF|(a< z;|7@#N)#m?sGJ}(ibWjZ;tCKy{;+1CDmd0|tpU69!^x*<>bPJYomAYA8{>v+XPWtJK>}OD=|kxK_uT^<4Zm zmWWWIC}D8!Jh^K9>498~0I}w|7dv+^E{cdyq9{2|f&LZ1Vo44cSApmfyntB^$ZaAb zlqj08f%Gqs9}26v7ztu{ZfMuVvLS&uopHPh1vlqi1L zw%&w@drT9#xCTVS{R7y<{3EN$j8LNJ*e-X6h;Kif=HglqAAc}mqZ7K_i3lZ%dy=dC zh?slqB^P5rY_X7FBTj^P5)n!iaqpC`}+|QN)&(1i=s28 z>}Onz1yQWz66@^m+ekzxQJf`i9!qA-&=2F{dJvCir?X?N)%mAr_f)KTpYij zi*X?4zSLvd8!N?$2qlV-UWHvDGX|8`aWNjm`wvF2@a8c3Jp(0*58UR`-}jiu{@`K) zh&tzMgvsTj=m`l*6np-AcP5!JW~$L((XJCgT$4M4&9}EdL9QA~6m72mv0eS2-yo{V z9~dg_U%ilvN%(WcnThtyfB*g0s{f=Jol;{-isqKpn_`K%)<1)r8_<0Bra;IS%Qqq> zrfB-Ce|MCaDe70bnT%#alpXUH_xMIkOwo+LRFqFlhpu!q zVirZL;pQeZt21XX={Qe$Vq%JBoY&MX#Jts3%+1Ycww*9!t`}}q6BAQ3H559Nh&ij| z5jWG&lWNhSx0th&aX~lVv6Q_+h7A?y01IQ%^WmOEFQ)VwfX%d zCZ=ebUp!PoOkJ1f+}wfY>pUA)B+=}_$;V#dsrQxfec7tQ5v^}_w81+$2W zDVm2}zbq%_L(eJP+=*u93mNvTqcWA4n4)<(sYI5T$#yHaxeHD2_&niaR@E+IVv1&c z{DCfFhWPB~=591~1`c6UZ@beSVT$Ipha2pO`M1wKZswsm++ilWZ8a!}%!w(Q;k#%1 z64NQASh0_o2`4kTxev`f(dNufAxVswn4+09%)pwM3+7dD zb3d9<>SpZEJ5KcT6H_#&_m86A?=_*Hxp@H1PkxJ8#j4!D$()#?sXX?EF`3gmN<&$+ zql0KpIdh85Yk1{MOia;Cp8l|jm}Y6SxOoW8^BoRs=sc6N#KaU$eIJ`pVwOcFaI*kS zxz}sh!Qk!r#KaWM&Bq<*5VLvFX>JyxsZ`^^E(FY=uX#+--23c{J~5X!zvSj&G?&eu z#4bmvq?0)@MN|G=ydyEY^9HGic2tDsklne=-qe*|JEmxw%bsZ==GHXE&0;j0j{g!A zpL|v#b7G2St<%{6V&*x9bF&0Zd&LUjj%|PkF)>9`zA)qlF*AJkbMpwA7NHAScV%HM zF)>B6sG)H_F`aJQ<>pZ|FO8ne9v69=5))H2r|lgQOw7}IKe<_o<^VN4*4d?)NK8!8 zY&tJ-kC?8-MyjH{A49Wdwj*1l_O%}|F-7zIsS)(E&faqoH;9` zwp089nNw+2CO1!@dC%~J5aBydnV6WOc{s{CikOwrmE1gu=F)ll1euOhdXB>s&FaY+ z^2BWF`ohgqXnI*6V5;+be~~#cMRRrYl?q~t`;Af)?dUX`Eop{q*r(b)Vq%Ks0nhbstUV5>sn@GB?kn>1p*=(CK>df|!`1X;D5kf|w;r zW!x-7b8qWYVW|9n9ugB%G;?J;6p7jF)WXekXg&_SDfD`5`AAGm(X91Qc~8vPpuy^* z9i2zh!@`n1+&72b*I|lgWBQO+#BBAM%FPRC?#}2HZk%j+MCQa4&6C-RH;FkZCxV+7 z(Ucl0#?E{Ro=Qwi(R`g!HjS7o9S?Hz5}L1jtXX7vwjVJuMYCNZAc2^B&(w4CGMaC^ zdxi2W`&MFNisrzsJ@m6KT%z9y(T=X5>F+GXJT#j>5ED~0<+~GCkvRo36K-BbGyOs= z(+RBWAtt71zInOmaLIbvdp zW>HmHFEOisSJx2j{W_YLl}m+&l27#gjwzaz4*DmEscP)Z%_=nYG@l4wFB~?KIWa{u w?Z*5ETK_yK)&E@TE9vP4-*)>0`^QIajPghJMl zrNUSnL`YtPAeig0 zR|Q2VWu&E~#bOpKpS4A=#QGDAu@iY--Yd3LG9K>@uVeH(%pDT$7C5q>Aq9Tc?^5z^D2Z5M> zYbZNjr|&~VC{awyka$nTwfWs#91PGAQEm?(6%}XLeiDKf-^r1vl*LcE384#m=cCb3H5Jw`W zjsLGi@x7+BE)fq{%bE!{GX%ss?Om+$nYuL*p+xbUoc&=Uig-`vqAZA2gU$+84r^IR zL?}_5s_eguh}IQRTpS8wulo&Vf1u$Z5urq}zRkOZh_S^5TpR{sK%5?Hdf&K*h)|-K zd;R`vA}Sub%0)R4+cL%2?dUQ+B0`B`ZJrL$n5urrU@cR1E zWX6=xu@i;6Rsiv7m>7#Jo~T4bC{gVD;{km?I$xW^MMV&c1H71BS@9$yLWyF))~Q}( zMxVWFxTpkTLE>k@Y{y&ohzKQ$OU76JO2jLH$GNBsqL!~Zd(=53mxxfJsFU%TzI#Fk zUT|?Zh%eLASpLMde-RN%6o*7-b&(m9wIN-m9~|!o5)n!iD@SV6YoWe)0vA<5%$t6N?dhMjoQP1On7hTPgB-P5@*Xa# zf!Ld?$+8X>1Q8KR6i>TczDz`C^}AeD2l1L9i`_Aeze+?XQM~+@Y%vi9fg%>d&1itQ zQQMA1JzEq>L?}^gXbh+CaDx#hTpS7For+Y}v&?%a5urrU&v*MFGGmqNd@gE&C~|Cr zpge5i-$aBG#WJU~Rw8!1&gP;Ph~F%;m`79cB_cwJqGz1)9U}G&JI6(B5VzekWNsTv z5{L*Tirs7P9w1`*m~JlWfao>Rl=aV!^(P{fD2gb!rxH=6S<_OuYh4fv&g-yn(-L|P zhZ4oAj`1=?WL6$r90g*LRxxu`F-ayfLWyGf=hCr6Y*?AfMLiIg-xOmDEDm)M5lR$S zX_Y=FqQlo3E{+DVZsa)Tv@e97d!R(IQ}>w*5yk&#=i(R;d*}PGswJBA-2)|x5^)LZsAH>#I7OZi(g&KJsp+xbu%^F1_ z+FW1A#c?1i2Iw)5Ot)D?gc8Lh^E~<~EDtEi*XBbsW_3Ux}jF5>tTS7w^PKEG`(5urrUG&W)%nQ=+zMJ}3v_+_0G)BjOIHv=V#S}CFQY}PaTGZ#%k zY=~B61__b$Yz8HY->c8+kQrk#TE zC4-4x)q6r_gc3!YDfbJ=j1lWxxo8by!=43fl%{4W5urp;qOHS#h}|K{T$}`Ao}D6- zoD)UAUqgvvS?hcH4B+pKN-o-fX!65|b=`=pATvUV;!VHzg=EGGn}=Mq1@YAt6Em*>xhy1G4l?~6Y;y5Ef)nKD!nphRvAX- zM1&H>wJNe1M3ny!!$k(7Tf!E$;YOkp5urqJnfNUgA|{6wb8!lY0WunFL(1oHB0`Cx z+?&kJL|hWw%tbp8r$%Wrw>Ky65)n!iZ31iBh*;k`z(soyeFIjo%UW_pM1&GWai7=s zh?reyJXyGF2N1Q*J=r&@SKUN}62-Ak`C zE<3Wh=mcWg&V}qrleHZYp+qq$?~jRO#`B)%xabUGb!|9X(Hd}(h)|+vyYiJP5yRHJ z=b{UUCfO|lv#AC2&K^n>8+<)_iI{d$Lm=GDR1lXW8nSOaVZ~%dC{dK~h-f6Db;NWo zx`Jr)){Ui_2DcFrN)+4tBI)Nk&v6+S-9UW1WHgg_quEJBC{fJ4p}UmKxOGJp7pH+J zTVuj%T$Ln=2qlVXt;3s%c<9+fE=~ur=;avJ_w`E+5urr!!fL-#BC;eICftlWh?QL$ ztbE3ccSM8|#Xo~8HWIOGeIm4H)ZaB>(FO=P@;I{#Hrmx)Z1}`i=H4FwWJA>wfy|Z z>j))^%WNOhUn)WpzjM(G#0!5i_UWQwD-oeYQKWL&YcgZ`7lSFnU3-Jryt;x#9e<-t zL?}_btNh_45#0oST=W4kKXn9~Uu=;{L?}@ls?tgCq^y5i$HiG7IxU^cf{x}I5)n!i zv!WO6ATzq&I?2V^AU=^C!hRorXAKdnpZ!;&Xt`qAA|gIm-O0r{AW9d+GM@y!@kE3Y zMLTB^Z6a!qQneFq#uvoL52rDU+JWIjgc8M7o!@2>QRDhlF3tsULD_iLSLa0Ud!R(o zV3l4b5!a{v&c$Cryx8Q<6e6cQCo@8cqHjPied?;WtAdMuAa+Qmv&4rD(L{t2#c{Vn z==Z|tH_4~< zhoQnYxc*?ilG)SlZa>&*2u*LAa<|b#_|lCG>8Z#iVxZY84*1{^l&i< z#2lqj%=$qQeYOWBiYdF=Er}R=NXJ3A>tGP=oYt{{j#*k{MkrCNX_ho6;^P4?E{1^U zG4g-sM?0BiM1&H><65iIiJ0fRl8d1rPLBF6hfF5urr!ay4~E$&Kz%`_u){YIOdfky%vs@U&zHs5a+gi6ATO& zqrX%@iK4B64EKjT4-NyN(6%_PKIqt+XkI z95s|EJ{{Lb@68Ge0=O6l;;ADxY^-g~K_Wtl;+WL|Ysu@FFg%-!@gPnfJ(tZWJmNt_ zC{Ywuo&AuAZ87y+TnJ+LlR3by?~6eGhSa<$;Ctv?YqXX&cyYhWJV}aEPw8u zO~j_yKe?C$qV%po%*OWoO(H@`#D!+`$*}(-DHq{peh2a7TVv*Uuk}3+ksrq@C!QS3@GolTCqvGEEQmw`AscRkay^ol1lLW$zLRg>k2xF_=; zE-nW#@Yxs^F*J`}XP`teP2Pk)BUL`CH&wXnR1kabYOsv2)e2-rC{gUu7)O7VvKu{% zi)kPZuDu{An(um^h)|;V^;!dcwx_&v6&KS%%-BAaeas$3cMT! zBfo?aGr1~(n;B?&F7aR&N?Wpti7A?eqPvoaDWkfFn`_W~WjLSZEofoH#1zf!FKIuC zsbP7Cn`_a0uyHC|nsN3WF)>AR$IJx|#C+sDz|BlFZ&n+!@XSYNiHRwiqqG!95_A4q zBRApRv(W4s=ojo?zfqK!n4bm+0)?4%^WnN zBCZJ@r+gYlOia-{*Q<1jnC?=^+}wnw%OE4RZbiy%Vq%Ks*DY!i#2jb4kDIw@Cf)L3 zI@fpg5ED~0b-ff06LVNX8#gzj8FR#qX;nGJ6BAQ3lUtRq5mT>Qe7bN)ThJUEqQvgr zX!au}rfBB;ku-~#l@ramxfM<8{3-0)2}2cPVv6RZs6i`;S)>`l&24DD^t>h5Wc$li zVq%Ks*6a=L#H`=DnVZ|uy!up{`M!ubKuk>0Y|E_B2fmFi;O=4n- z=EYrV2E?q;=;3A_nmL0NSc`eh5MpAA=J};B9}@GrskXat@A+s>cXnlE@^eCni7A@y z3I_%gQ^m!Tn>*309tdPD#qZ}36H_$3KXu(Brs>5M+$=ydqa~T0k2ek_CZ=e{Eq9mf3eb0VwQQfbF&D|uZd>N?fgo*BTUh}ePUKKF@I(c zn<3oME;MsBOj(R!jw+cGQ#7yi6}1yHxyX*2#b^!>3Sv=j_w6Jmrf9xeIfkA?PINBh zW(k@{57#h}a$^T#Vv6R-@u%sv0nhaZCH?d%pXk88`Q$ z`Ppj{yLQGqpO~1US!Ug-M&{i2JcyfRXsYFHW4W@vNyNkyO_{$Eb%|MFu$i0XXew3r z3kJ(6M-mfLGXT5HmCVq%KsI)$Ath?#Or&QrLfgJ>GfDH8Md3fCU zKZyB6RLo1bqoZibz5G*9n<_#-o0y{MzgMY<`?mLo@JyKRQ%uBxR9f#}v)q`o_x;^Y=zg zZ{glgqS^QB4}pqc+bd#Xie^AcR|hfOpL=lg6q@&@OESUX9(pat6wPZ^-|4X*)lK8( zX*7qQNMXAhuQ`%AF-6nKzyB_obL6Lk+&qJ3P`w|Uy<1C;n3$rOB6)fcF=dLLaI+rG z2$MF!LQyp{Vq%J>w*Ei#d7bb6AwI$#okdgnojUXPeSD0Vn4;OGX75bq%xbgc<~cN< zNo%pc*RzMj#1u{Ar7^95;0%+|G>Kj*9(4Fxjgwl D6^A6c diff --git a/None/20221213131812/dreambooth/events.out.tfevents.1670955508.DESKTOP-4M9DSE4.17076.0 b/None/20221213131812/dreambooth/events.out.tfevents.1670955508.DESKTOP-4M9DSE4.17076.0 deleted file mode 100644 index 9da53e8357ee42d53ddcb308059cee88c32eaa41..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15962 zcmY-0d00(*|Hg5pqEv>^K!d2zpg4-yUsOmLLdX<_GA9X1NhCv*G)e-}5}ref#@;y{^ydv-a9+ue}-6lmBv;c$#a*Eb0(pUsch^NXLJ! z-?Z6wedkY^JZ<^#8jiDjjvS8fmG`3x1|{E>K+9IxUJsC{c`l^lKZ5XU}htq8f-N_e~WiXS=K>5lR#vz8tuh#Duz2Qd9>~ zXGmi)Z|ve{BtnT|S&gA1iLqvHq^JR6Oo5hYuCehXiBO{W)L+eu#Ku)d(aK#n0P(u} zU=eN@bD2aaQ9NRsR7GNBa(^i{1hKuxOwr@;)$t@kiDJ2t)iV-xVnd{;38JN8ju_vq z?OhU~L~*?9ndc;?zs!-M7KpDMx{0&jTAnBI_xJynD4x39DwxFR>`PK?1Y&dTRPlP< z3TG0bL^1SH+#3@6q{AspKxoaH|J++Dz-fC{i zB${Nj`A>=B?p>EwlBjWfknD6pJo%%0Ep&d56C^^3;*C+4 zo|2e$a+?%eg7}x!5)rR!>_j4zC|*0~T}YzkzA`E5gIL`uTvXny=S?D%C|U<+8jv_n z_ooyMKwLd4N)#^rHitwgQEZi1_Ljufb?sx6yEX*T{?`SuboIM~BtnT|@PgqVNjz`v zEkz>``y4eD+K&g!A`waypFd4oL85Q3B~ok!;z+Ai!mW5n3W-pnczkQZI}#`8?2%$? z5Z_hz5_(D5A4!A~#gy!nb0iL{tdOEHh`zRw!qrkOoJ1&5EbG*10*RsX)mJDtV*+Bg zh&V;v*3_pYLW$xUwKNwJN2+v|qA7?QV^<3O%Ede*lqe4H4{t)ESB=D@_Nbkyi5}`yr}Kx}cbgYe$v`jbQ`QMB?a%_ULQWUdq~K{VPhT=>T3^&$~U6piw_ zl#y8UceWI*K=VK&(>QySm_8`8S*)cFoT zt8kbUtwBt`J70L))&E6D4JC?=f(E70jH;!PQnUf_ajR#FSAjFf z^;OEvbOLeVau3B7mGFKvBa|p^`Jy+RM4#oIrDzYL_Z&?zCI;){D_6XkrK#5|Y?dbh9W1H-ztChR% z3gS*ncM)ySsxQq5C5rQVN7ayM{Z1i8M-Vj)&BUjopWjG?5=BLI=t2@R@~27B3B;CX zmkZ-V_eYZmC5n^HLjzZTBe7vwi4=Q*c=diOvBE`7gG4A% z^f6jDl*B{tYo*v5#D`|);$z^*t|UT<;+k>4rjS@N*(6T6>pmd9zY`^P+*z}pL?}^Q zezYsUX0B#>NYMpE&mCT(OfQL_NkNHX{|B8aXh!pAp;B}O(M#V+4ExGK#_C3WD$u_qB!sOZa13I z^UgOZ_5;yV=cd9tSI3S-C{gsT_j($MMei-vDtFx<#NGq!#J5@ZN=Sqf#e$($k4T)l zZ>SXALHu&UQ@FmUyG!BnYR z1H@(J4kEwo`$Q6-A9tPr~iXCEi zNYASzLW!b9dixZb(YTs!6=vvAYyUgSfD6u=tXDaxfh=lqh;dwQNsftNbZa90TIm&?Ta(&m_L$LW!d9 z8}rE|c08CM#jzmH*u71hf9h0DGeU`?hQhQTi5>@zNpT#A+F6ao+4vtOBtnVex@Q|s zkr?M(EyeL5UIq?6( zDf)r9Vp=<4Gws>}5}`yfIJBBSX^wt+Ns9g;in>)o7#3NR2qlUyJoEX9W@PRcDNX`0 zxJ+N{tMa-*B9th;)u{-gzvKF9^F-yYCxbY5G z8`n*FO(K*i?in(F0L{4NVVM-Cf*8~$Ni4Xl=SU)yD3({HTp`g>@l%S^K-}8>o8oiP z&I2SuiK2(MXG0QIjoWWf?s__i!zV2g;^o(_BtnT|?ek;yBnI{wF2xxjzL;(&k`1Qv z(-|mHY@U7Z6^S)@ky4xqqE4tn3{hQ`Ni#x;Vp-H%ejc7|w@Zq%Kpd#GUCb-~=s_Zs zC{`C-F{T-#m)?@%Y!EN{^%S=zhdYo6C5pZQfu1DZ@K8xoZYBuC&*h`V*wY!gBtnVe z53g09N$inmBgHu&CT<%g`W(z@Kq8bV>fNhRA<_H7NGS$`xVG(B@mnpUm_#U1Y?Kx^ zn#8xN(NdfXV#3RQ;>NY$V^6m#Z&wx$`c zX;w&a9*E+$oe0b9v6Dn7Q8c{plYgNxI7B5`x$966-CK1Kh3yiikq9MK2OBExJx5LW$zr7d1O*#v^U(rzm$F4q^}WIMM!-x&?_)qBtbGjDOW+INDB%i$Sbt z*`xz)vw3-*fPiIPM4oh(fh--4)Mb3;@{O$@RiasxbEn5Bi8$=_WEIrNnZK|Xg zr2^#RMg2tJmY3Rw|E6nd8@8tu&5`F5I+B^^(>ztV?WJgHFWjuK9c`FECZ=dwyV+)w zXBx3uVv45T36)M{F6c8?n#<7K_c}^k*!k)tnV6!vBu3>VnQP5A zOLIAz!^RB}=k5-9NhYRfdNe70Or}qptI~`?bEk)&*mpZ?5}BByxhrk`EixU>{*>kl zG@ZA)3zs&k{P)BZ&F_nzIFdPNqD`7|@3ClBMA(Y=4LY%jDVp(TyYZX}_eM)|C7Sc= ztB6(FW-4^-n4E1e4nyb((nfXNROeUsi4n0#BP3FMqI_b(C#i4m&_FsyW zLbXsbF-3ED`2!;|7j5q$&9!J273Ye$lMvlkUFymIa?|o!GBHK-Q9IWF zG8IOLUGXnB}85$->hOia=IXKmpgGA{-lmSzT; zS!?Y@+Z>B9GBHKdb%oJAGQDyiOLG&NXOg-I5C6^SWMYbD?oR7`GC$tc-K5-mCYoN8 z9K=W8V-3l~6wQ9Ve_bPUZ)0a^W|29 zu!2lX(QNs^_Xe52-Xusf2hGcW_7~4=?;Ri$Q#5No9kV2JYqMf$ZbtLUl!c-;{3%~U zFh%p6YkhvbpBhjj%`IqFT^}jF*y*a%oS34Sy++ZU8=^346_uzpT`uY-`52kZk46n`%W~Q#99eI`&;|T#1u{Km}WUN zr^lUs(%glnj;@EWZ1Uy@nV6!fb?iwHnPCPY(%g+^Ky9Ba$;1@RZVu)4G^fA81!?A?Iq~HtMfvYAz6Zk;&Duw%o@AbN_$bZ2 zXr3*}5}o6!qi9Y{(LA=W+=k4?dZyXRz3)TQWt+8d?t8MDOia<7`(r=~nGgC5kmi0g z3oLqxHx+f3WMYcukQ|fiWZDMLljZ?5H*T+1Y#0+5NhYRf2A=QFpSjkj=SVXj&2{DX z6_(daI+BSgnll}fOK490$EDIdh-N#p^NL)@-h6L^DVmB)9d43YU;n!_51~2iVpq{1 zJ2jZ*#1zf#22VA}{FG{$qul#nXjZoSrKs&cK8Z|B(R}m$c{Q0eej}uL7|k`RYGV4U zzekgaDVhmwO(V#xSG-J`N6;K^qY&D94eH3m6wT`|5_nG2M|-7Nfacba?~2?To;S$E z6wS`@hZfSD&vYJ0vk*=F%7!BSpHYcqVv6Rb$yJBQ^dG0aS-GR5XjYyM7tsR`^5vP-ScJDPX=m&50?RQ>|m~k{Q^#SenPtyc=#NZe3hnN+zagzB3-q zpWhotR7>*&nhB1(6mj5Nkf ziJTB`>zN!CpXAgtt#0HVqyIYXja4>kuPuW5{D0S`aaRrXmg*{T2~(%)PWh}R4)irV zVt^u)E!EZ3o5W4gEpu38ANRSS7m0Iq=18##h!0vMi;QU(%1DF~#VXy086+MUQzXTv zAZnh_7mnV8LP>-Y#g_|hevnuZcUy`oAm&%^7qLC{N=SqfMg60*JxH{-{w2j`AZnUA zi{R#8hmr^-im#58>5;h8r>lc$n2qlV-o2q;!QTJVt6q|#XQU69! zuKU`VL?}^gby#&Zi4VV~NwEcpPR(ZseIo~c9ic?=bInN$65aKRq^JgB^Yn7XvMaYb z(Tq@{m_1>2E{UxJZc9-eMB95O6{_u9){qD#iVcmjwvyOA`ll3Ig7~?9h{)+{yo^LB zQH&qgXbXvl0<8Kfcijp^w=uB_Ez2@h5}`z~iQd*~5}!K;Nl^pD@#B3(^8lNA5}`!V zw<;!v#3;i#Qq%;|)7VFt&-uV2lqiPu>>W$uH{G35Yz<=elkTE4>0JTM2qlWv`O^-N z_~OP*DYgO8)qAaY&{2N`iBO_AU7>%OMEf2;q^JdAD;HOxb$zoxiBO_gaJ0jpB=+;M zbX4wI8^n|IgT)v9eVa&x62-ZZ{$ojesy$SSIv}2{))VW(G*6QVC5pY>+F6n4c_>wi zZ9zP9=d&Vw#p-S(LW$ys-@^x!IKg&@6m>z|*-$2;Y(9P>5lR#@_u8|VHTZ@U^+1f- z(n!3^2-hbON)&tjqZvvwevkfFiuxcH%~&h4zD4k-8I&m6Y^WRJ& zlqmjaq-8^5vda%Cnt-%3Zeyu}Qv`*jp3OjzlO? zeAIZ`ED~Rj3zA|75S_<+ii^`Ka!7;{#rK?VWClN{%TdvE{HvnZklR=3s-Z;DRjqYr64Uez6v|y& zftaKw#FJy$f076#iotHVOG#`K<|W0hAifXzS26L|*pnneiK3$Nu^)+--X}}38;FV5 zMhe>#pP!KkC5mceR`AJwRzRT?twFqe@0;S{YaKHZp+s@Qb{7^0#NU#l4Tz(rbrEkX z&s&iQC5qQ(FVCZ^z7f$_C^usZV((9LgwvAMHY7rc;-wq6_L2BN-$sgdAl{rBFN$_$ z&m|E`6z_%=^F3F}*x^#_4&u}5`65T{xg&{CqB!r1Rz1yVV!2p~JwPnkQ7HxtZ?S-ooMMysrcQtSod>^+M_T~KuqVkUyFOC12T^m$0ukxS}j!>BQ@CBtnT|+1grNnlZTF6Dc}^SUKNbM zT+3CtYbOxjMd^tR1=`X9?b|PiYxO{DoN}(Vzm?%Ag*?|7M)k=_>l-DipjHeCy|&JUoJ%f zVw_WqxVOC#KjnfF#lIE~2_(_$TD=tefq2@?P<#wosX{YCiK5=18a@Fec;j8keylUP3ceP$SG1B2;$%CRmAlO z4ZdTC62)ixcitv(ja!}+2Z6ZSK}V#lpBz9}4JC@#zCDc~G1lga6um)o8IvV?%{jAx zL?}@_9W+se#MOacr04_UtD3tC!;LR1NrV!`dHzwuN&Ga)tiN*Cz95!{OcAG?Pw+h# zlqd!aG%+FZexAP+{XmS?9xftnZ|c&FV&DHt6hC(9@`gmmfitD(4`N|ATVWOB|Aa&+ zQS|CvsYBw*m>p6K0P)I#*`jPjf+mSjqB#D1)4e1n$J~+PU=Y{M4-hf7)?Y}362$}i z!}+N~hDRe0gCbk&lRgefJigw*2$C3ESdY%-6K)ly_uISdn_XCMgqF8xMpU>fL zy-K7Q3}Ur+Z_%Mk=pQ6PiDJ->;tz05z7sL{S!is2w0U+yP*4tZZnB9th)eYluT;-#==1C+ZS3F7;U zsls7>i8+Z-qF7)y@&kz;aXq9s3dEu{HHz9kr}@4RN)&ew+1-M~-S**991Y^pDk0vg z3x65TZ(NpTE_ZEU=R@ukE3?FuD|ze2Y^B5{*zQXB{3cvnqPzDtkq3!y}@L-EjGB>FyymSPl$MIF7x@Ok%p(Tq@{ z==Z~C0EuM-R!K1$#3`ot6=M^kPmu^Eii_iT=(iJ;f7<8l=p+xc8$j{ShMnn6y1C_g;0Ai|9yfA1V$4`c#M6tWt z&VwY54sw-ZEQr~)r6Tc0MK;X{C5kbrhQ%ZXcZ!wbL=cy?ZzIILYg9f+qFWPFLz?kdnvu71*V90}PDJFxM{y9+e z@%X{#a41nU^Lm_5Voq{`6sLpe+QM8Io!=i$GeU{tt=)c$NF42yE5#Wg7XDl!tld@x zk_aV=Nfo!Alc*ssNO2~J_kDYckp6nwBtnT|n_)+~lIS$!trTa0_;dAQQDb>;B#BUx zXi>=DTxUBP`zUvv0%A$Np72$Dmr5d(C{DM@jiwn(O$SOb6~sj`?Zy3+0=}Pt62-|5 z^WKxVwEt8o&Ia+^RWo6(bGnLVgc8Nu>dOz4xIB5Y6z720YW_T7GGZYAI~SBFj-Ni2 zH{+UjNs4J8cK*9w;p+5(Poz+y*ld^k#diPw2GK}kvyNJ!=?5v!#h)vNq^}hxr_I}J z_+Og&u7xF~XzF)6ZA50axtXtW+w;)ubL*ob=Gw!DWMYcuYuEkb$Xs{aUz+pL+&wW- zOz&MVoJ>s7YbCSP$Fq!Sf?T}_VnnSb$#iLP2 z&ytBLns%+;Tqg7TtXtAti01X5N5tm!-fhXm6ix9@vj#GoSvK)g?r0I3W@XMI^I-H< zGBHI{<8osaGG~vqk>+AFjR!=EsI7x{k%=jq-@K03l40{~&7-#ALPy86hD=P+oZxEmjLe>K2c(&Srop?R;!A$l*JNUfX6esa zE6J=sT`SE@G+&ObQG98;@e-MsqB(Y=*IY6yT{ZlbdtZj;w#%KxQtf6@WMYcuzDTty zGT(+dN;3=1sA@lP$6=-ynV6zE__wtFWSTCBk>+wV=SF^0G(IxSnM_R4e32etMrKFH z)zVynX4S&ZBD=DxnoLa5)H%JwoJ{X=XQi2q=I49A6k58L1Ife`&05Q{nPk>?ekIM7 zXujUkL%5BM?Mo)6XxgnD=R;;pAEN-}-dCYHA#0CF>ofTqnV6#aP-WX0GM`#{N;3z| z#w&6~R{hmXGBHK-{PwUoGJQKGN^>=u`wUD)b)=>@nV6z^Cw~85WEL83lI9vT2aGZj z4&I)3$ix)QJu7QZka^MUiZs`vdAshq;%4#I-DF~lX86;`o@DMy{3^|LXnK707R9=^ z1IWY_&1+RLzGUWRSqxV0eLb2VSN0PJZ@&6LCZ=erH(cpXW>>#JX>LH%FaLnTLqLW*Xq z*@d}eMtR?nh#o> z;Lkd%zigzr8Oiht4VnIt|WJNa}dqpG-{A9FySChRi=g)P^eez75ULB4=S!v$&2-OwkO!81RzJhR*iV z+>U0=)MCZMs^C;IF-3Ftw$aXH9$qwBngwW{d-Iz(rkM~##FnV6#a(CGGaBoPZJi?{$;1@RqekJ|$ZS&lSeiT0tk4-G zyaWH@*Ar7Tr`G9RBh!0cn?U8>ccGc+)n25IKEUrKOwqi){oo5SM~1javk1*8Hm2fo z+;exjc1+RyV&h>)W>`dwG>g&9?(QSrU;VIyOia;qylNdq=GPfH(kwxLx_W>^0N;oDHp_eYqb8TaZ| zKABVIUXbPiG=D$cMQGMtSxj?cie``cro+fQ<@;Wm2hlYA?kECtv@Vc|DVq5YR0osU zDbgfZx%We8+7);Rlh==@lZh#sUsN0)lc{rVpfnGo>3@5YaB^D|OeUsiPL3TKLS{u# zk~EK?xvk=eD4satEt#02`C02l9+~q3@}zkb%_ToPMX#!5kIBRo&C+)ZYsk#BzAVjS zXsXmt7nyVV6_bf6nkMI4@%wX3>KADqNAt|)KYzzzie|n%DtaJ z)7)~9&~W;T?+r0UGw+uXpJ}T@{iJyk&64XG;?KI-FX`GbMRQ7ZUtucvxor8LXYoU}Sf z=nem_Mkc0c4m2+1&$@<5|4Q>Tnhkrc#mlI4{`-z8nk^5M$D024pw#NsuEA<8QaXev g_ujaxj=Gk*dQ@V<*zu#4{}0^!`WZ!QliJq*2V|D>WB>pF diff --git a/None/20221213133831/dreambooth/events.out.tfevents.1670956729.DESKTOP-4M9DSE4.23776.0 b/None/20221213133831/dreambooth/events.out.tfevents.1670956729.DESKTOP-4M9DSE4.23776.0 deleted file mode 100644 index 15a31121c3c3462855718859e89ef3cdcef2aea9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10706 zcmZA7c|2G79>;N6DwGzXq|KgCN-6q%i$Wx6BPyf?W#3IvC=*Sk!f1?0+k`en6e-y% zk&#A(a;r-*k&1?N@BMw}&b&Y4@7Lq`_?+|kE$94>@gn^1^q%F)l5fo{FX_FhH&z_! z;1Ln^%D0>`zYpuN436#wOS6NTwD|$^KtAdWhCYDQPKj4{v{eIg!tj?CP~f zoAD?@DJ>-_CFHPDChCly7x&B4V+!KNrP7 zG%QtM>n(ep5fMrh{r$=o5%Gm*G8e@`)a!KX_w zC1_YZ5oeVBRifzo$vK#aHlB=&!$EAB7s4)Yss2JlC{axLvTP&~o4s7QC<)?_eMN#B z$Mv*{2qlU$o3pius9JP{i&7vauPPLTJ#V4s5lR&8iaOeeSh*{oi_##5Pg>0?#R6=} zj8LLDAv8joh?jS~=Hdtt1HSDLWEyQAPDChCT&j{CoYbunx8aL^?plqibcT9!ye z1D^&ijs4w-4&N!BBiywth-F#|?3wY5SRz7+V$PuZF(MxQ zsKdo^Agbu}3RX$w>JSl16jgt%zDC5wp0-?+1M$U<+ibedTzeuyiK2PGzabHs=Uy(# zgQ%`Ogt`7+v4x0GqG+RXm%a(4UAGN7Hjt9}V zMuClZZ}W(VP@-7XC~8W?!q5ROP5`m)NR~i7rZRwtP@-sZuUdqN!(XW73ST{nAUc(m z3*LtZoFpQYC?1ta_8?-PuQeAZg6Q3{ktMycNhKnbC|X(1tR|wwKnNF=K%8CS%q|?) zqURAx6ula{TAQrX&_1&q_B+j4S7U_62)(~*IXdtx}p#+DuZZuQk5Bhn4?KVC{c`% zyRS~f4L;|&r~+bq^g;G~Xd#^uN))RWZFV4HckdG}s)A?{lg@GuS{)}dLW!bjVoDYf z3*=}kQt#w(Z>G=S0dW{I+KgqAP(05A=p~FvWSRKqIkQuREdat6CJpy z1LB))noMEnq6tKV62;)+-{@!f^4@4J>VjBucn|BmeElU6p+xc4lfsu|#(>*bxHuEU z!}8uNV)_Dl)KH?RYVJetGM+Q2CGdQfQtnGs49U7H@#D=Di0@teY3>wy@) zsfPV*{p~#wp+qr0E|K25z6{jm;%pFOkB(vuhQr4Z5lR$WdU6NIdEEWhj*D|ZY(M@z zYrb;uH4&jiQ6{>Fe)c4kL~&6c#M!>4>_l6HfQV3{D8H`u5t*@Y@>MR*1##-AMh06g3Za(cb_j_}X!C0f}?s!o=-chKtw1}{B`nu8zSbG3~*5Z;%5yVcERf9d?G@LV!@=v zg+xqkQ!5a@dKidPL)=)3?L+!K10{-*S$%Xfr;@F?xClhayAzn1*AfRZBa|pc>J(om zGfur9!o|fPia8iF(w&fPLiP#mC&P8JoJ0__yBl$7(T?i$Lfti!v67gF@ zB^OOV+}7AG=>DpZN@j!-#rC{NdPRCQpqGnFK>RAZizQ95vnC>xD1NnH8A@h+$%a)ZnWC5rPepMOZi{=f%ZTn3`U z2nm+&Tfd2jP@))CEvHDt$c~R(v;gtJ2U%9I`uPhYLW$zI*1iNHR(4OmE!?#wh`)#) zW?emn^g0|$6jyo|d?w;ESxYW12hlt4H$iFVIr>QfC5kcco>UMq)8CJaRv?z9`LW?f zdUMH9Ly4k$PCgYM$NtR46(GLTv1jH3A#Fs25=Gr?v$15x3l*hYv<9)|rY<|P_R(r0 zLW$zOF<DnDGx#Z@3)wU=gnUNT~2MkrD2mvzk{;u=3sF0KY~#A6+1AD}yoh)|-~ z6nJHjh{=Z&xwr;IzZ4U;qUPsFB0`B`RohKRB4z{>ad9n(b2Ij`@J`EqB0`DcsECqc zBARu!ad91p9}4EOr8i$D6A?-jx4w6w->&I?vUh~LwgvG)%@52c=M{a=K#8KO{izdV zMq4=n7wtflu9ab@-_EAjJy4>!{HKysB6j(^a&bL~m#W4x={ps#$WcRy;&+2f&Jr>2 zSsWMbLA-G`nn`+&N+2SXC>E-mq0bA|8}qs70Ak>2BerJ!b#Wp>iK5A!dHQ6=X8%?$ zZU8arm@Rue_B_2xffB{CKzVu|v#Lkk74CW?h@uy*nOaxvb}}QBD8_292p}^yHqPhb zCJ^I}hB1vHCG?&PN)&gF`N!|!bssl#(GkR4?Ph`1&hP;;Ba|pAC6&|jm{A$aMJEt< zKUl=BmuI>W5lR&0bRA{MQD+FQanTvX6V)4-a_hIVRQ$U{QQX`kiHNq=O!!1Va38-yMP!Ow~QIovK%5piDIYc)@&j=SIpw#77*Xt8!%tT zoM0kCiQ<@oRQjZ1e2@bdw}M#VK8l^%-qu1yC{Z-6**=}lSQE*`Z6LOlTC%b@Z51Lq z|6QW!(wo&z#0Bp&xwsv~+@@y&-HwZaM1&H>r*|XiccDo`9T#0e)V*iI?9Tg4CnA(6 zX1$tApOTJj9^|4Mh@DUO3syGnq`wS9iDI0|JvnmJqQRQ?gs&cV5XT+!W?rAiTag)| zMDfA&z>P%wap`I@~I^tos5Hp+xcXLm6cvHhlPwi#tIa=o`(Hx&rC0p+r&7 zOInSH?m=g`=mFxfGTHEp8=VnK6xWBuc@uG$NGKP5L3BB4#qKW5I!eyt%h7+8C~BByBoZ+s z^b{BUK=cU;VP#FJK}3WS#l!^t8AN2d54q?MqV?u<)@yD~|FZx}6t!%d=)FQ;=O-=( zfVg9X5-T3zbB4?aC5p~sNy%hJiNdL+!d(Y~_^#opV7iL76A__A@xTg2dbM}9(vpio zAin(qNuDnr=H9hsvp3`U=Y>MsW7EgAHNb2N)#0r7sV6tzSc=DhJfh! zDuxZMH=y^|P@>qf?V$@1XB?>DVkn3egL9a^v6ViV5lR#fYc2T4bJw4`x%eH3y|ZGN z!XG<-Bx2CtC5leb@$@&+o*cz8;jVXqctBi%`PC~J5)n!i^E9gZ$WiN-F5%*C5Y>IA zFqK91B1D7|#nfK+I3i}3dvY-h#Ez{hY?6c*eHI5LidNDx^tpn3Y!Vm4LG%laWaq}! z9U(J9iDK06v-Xo2TMUZ1xCcZd{Z7H$eRbc62qlVhE}aOO_~-v1DvmrUE15O6lZ$)t z-xVL1C9=%LVJ|2AnI7yOu1QifPm9cyC#IvQLb-6;5onef^$K>@c1|NErf42fwa6u= bOROakW(?7sWw5Cl${AUC{cV}SE)zDl!+c(>_cW1+P@-ry^ZPU+KD6D;#aTI(gc8O3>l_Xfao*&$Ty$C9aB1QcB0`Dc$MxNx z6EQ&IHy7nWd=Z?=6m^=+i3lZ%9`8$vi8$3lD^~RBQ2_DXGZSWZDDNT>p+vFIx5GDx z*pxGei+w>Q;2qlWjYmFp`*e`b#7ZpKl*HK|JK8h_SB9thqc8VnsagBE# z7nMLIXh-twtpUq9A=AtID09(eY6EfEj(_{>FR z5Y2VP*jVML3?f2_;t=(yWFq>^9JE!mYZVZ!OH-N9l601cP@<^;ywqijn=zS;=p+xc4u*v@rF;**si)tWxUA!cO zNqt;ML?}^|j`a;8;(Vr-d;?C!G&B0`DcZ^MO~h#2{#or~%q>Rxtd3Ts{I zaSbJk39G{H5OJIDpg7U42ZHFFF2QsY&Loi;p+s@~+Bf5f7*lD*#X%s(JX2sw{SQte zB9tgvxYwH#(O}a`E)E9KUBi++4Qr?89w@U|jIrelx)Bjd6pee1 z{6WNVKQ!V+yB-SShp1RKc86|05urrU{%Y$7B2EsO#l>MDuHT%?Y)WT+CnA(6PB1F! zNkrEkVO$&zV)w)kf=VCjxTKRB0*6G4ZH-u@*zC_lgc8Mf z^}|0A@rU+TE=~rq#aM+^yiAZJB9tg*yGi5`@uiz)qG;E~Aj(WmW0NH}ej*~2DEha( zDIsE_^-L~K0kPkco=ny9Z~zgZL~-BIBKi!#UL}N!0*F(zN3#`M&d_@tiG{(h&bZwkR;KrO+gGEG@MB{9W^52mR=hFD^YwWE%}s) zAssfnGXpWZu|@c-XS#@pP@g0K623# zM6;T|Si+_T`jiVw6c1I~ zQSY>f2qlX5M%^(b;<`xzTyy|&cI9}c8X-;ZU7Ke7SE86X-ke^Wt=G!r;(QQgZ*OJ# zVMo%*j8LNZK*xz5Gl{3pbMY?_-_**ow^iB~hzKQ$H-D?8iFWM>VpHKJ<}W|ZfXoObie7ss+7ofKp#>M6K$OZiX05-> z=urqIijAgzK}59F^yT705DzEMXKK~5ugHv0qUfL9jc!K&avB$%LG(E^i#7Pzo*^QX zDDDVfbBN5C6?2A*E+7s~Sj%3%iOwM+lqkM9^JFp+SC_YN(G|o-y~o0oDe`JBX#V3z@UNoemM9L~+FX z91k*MleIS&JwRNwbO_V6e0q+EP@*`XYdn4asC7J^i=H6PO0Z%(vt#!V5lR#dOyb6p z8Kc~ea&Zxe){Ya{>7UC+5D`igmBq#d5b=oOGcJ08DEP>+mYddAM1&H>=AW%6iP%;z zoi5t7H;9?%teI7M+)*MziDKA*?TJKO**>0&i$Of`=)Rz;ch)|-ab-cS25%1hD=i=WW z4o~)D7W==u5)n!i)xuIci0GK|n2SCjrpJ{r=C?|gh)|;FmK4xHMEwNGouXYY1JUS* zJo9{gjlPdiqWG)qUOo}4ej9ReIf&nK|7I;W(&!l%N)#haTcnA2Gs>BZD?q%px=Rp` zccgz;fD*;`llb#CZb%I4i^JKlnM!DmLosY=M+$)cu=yA9@onE?YX!T#3=V%R_e0OoXiL% zisHGGoXCtA`2itAA7zUzU=UArNSv`n|P@*`%ZZExK zZ&V25;wlguCTwP@uED{xwsm{HZc|U()_{!B0`B`jiTZx@;D z8$`7}yA*rhmI>lw1pZv{U`;A>nt45_|G#N$hom-1(L8EZJ&~A^uDiIo9?cl5ufp>I zmd}ZaDViZ2k2Hyyzw#0{H=x-Z7{Df$yk194Own9EcH?1Uc3bp;n~`Wb7#Op$E6&&x z6H_!l{B$xPra}3DU80?DMDywbdsZFzLzkGCqM5%k!zrdV<_3Qe5>$5^mu{&Hes zie~gq6;ERB2o2=sCN!^03(U5nuO~4vMRQ(>h8!`Y^>=eK8qMr7Pj)G#xr><3r~XON z%q(~GAg1@$8g9m*ImFtXwW>_6A||G2&h4})Af`Y2#LdlUE|zg+>r87BiHRwi2^WG@ zi8(Z7&~DM*x1brQYR`_n50xM$rfA-nS7JoW?sIIo8H=XYUr&VPbuLeci7A>I&*u9Q zb9Go4H@Bi08L7qA?{nNhOia<-)3snaF`o?0<7OP1m$%Gfnmg;=h>0nhXOGyH6LZag z8{CXXv$FGt5Mr4&l9-sHS@^u$J7S6jeBi7A>7roR|V%zkFWb3}VjMN{d~ zO+koqks&6gXnG7!r{|EJ*|WKshUSZE8@8A|_)g6AIe$_#k9ZDCBXcIYuj1woH06() zu*SO^1`-ofG_wyDIudh6K|VLr(cCq?Lde#vTS?4=6@OARBSHu15c8zY4Q}p4^UX&a zwjfwppO~1U+59cfoS4x^zj8AJ%^v%LSl#xR8^mnA^e07g%Ip!xiCLVinJe0RCYoJC zrn9!Et=7cE6wM)3ufG%X^skxR%tCX^41bmpHuokmF-6m>qO5_KsnH?a+=Zrpo&!5L z@bElM;n4;NnBTbFW+5J=qH}lcFIp=_2DCST>Oia;y z>3HQKF<-6D;bsAv-tAdzRmIRZ#KaU$p9Sv^5mSERRc;odd2s1H!KCRWy{5$!&C|Nx zIm8qmedcBnnvIK;S?TSKDP&Ge(Of3&bC;Nz78?1Yz3)Nu%Rp;(IJqL3n3$rOdf2Ce zm}7!$xVab24ZeffM~7b?#KaU$IRgzpVqVb>=H@;$OL}Rrcx%ZW#KaWMsy9rRn6;O7 zb8|nMxkk23#!#Ey8)Ay)B=eGf#4HcJ!p&kd16wZ&-vf@GC39km=KXIT!-#o3>?1c1 zpy}5t%k=#_E)Ww_G&8dAk0WM9lX`(@?+4M`l#wAgymI|QOia$og#9!9gR z$evl4?&%~Zrf624K1c6iXY{?y%~CW+*!r_X^?B!si7A@J4lVRLq?h7HZkC~`_Eej_ z`gpK_n3$p&p|Y)-y!Nr)0}DlaFGurSs4*Mk-C{~iOwl~vwCf@~ zfGxX;i7A@jB_8Y{=KRrt+&qHjh3kGSbn(FmVq%JBQ-X#BF=NMNa`Pyf?zP8Q{NaHi z#KaU$Ay21@nCi(Fxp@rDB#Y_n*A*Fh-+?KbrdAm*iJ7hco}0(foS7QR_MSAQ&jc_< zQ@n4jG%*85s}+g%ege&bU!vIl;;KCI+A&3QmuwFGeqW(&!OfFsmWElgwiMS}#KaWM zuC8)>GN+$|FE>x2>0TAf%(5=T5))H2AC*@|6Vt>iotu?t%F9VGCr|4+#KaWMb5B>V zCuZl;bKI;#Q^_n*C(NZ_j Q{{yc(Q7wEO^KI+@0eURMD*ylh diff --git a/None/20221213145447/dreambooth/events.out.tfevents.1670961310.DESKTOP-4M9DSE4.20624.0 b/None/20221213145447/dreambooth/events.out.tfevents.1670961310.DESKTOP-4M9DSE4.20624.0 deleted file mode 100644 index e674c635563e265ac6daa603ca7c69a00047e5f5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15962 zcmY-0d0fqT`^WJ%lcJO;r9`6=p+#lV_tTzuV@l%=-lnszi| zVJJd5KvqUpB4pXnQ)Y=K9u^~HiP%?N%tZ+huk>HR+>ZvXCL)w5E}wd%m58Gs-sYkt zh+k?|*x+K5F+_wCMQMlg>O_3i+{Z;J5S<<#VNLIzZ6hL-C@w!ZY9$f>VrTGlCNtjr1`(k| zaqlwu1R_pP*~>+F5R=uNS)I~6Ga^EXV!mv!D-n}l-r(XO5MOl|FjJLWME|PqDDJv$dYOnn42KztcC8L#pr;=j z?Wb~#%m^il*DNm_C!$r8Jr^}VoNM2Y1*>aLAR?3~+DT2^M#Q&G(Oet>;-A|U+3moc zmx%}^ibh$VzEH8HfQyDHJ3)!}@{7 z#c?2ZU-n@?d>wX@8KFc`yi;!`5!c-9nR82&?HU_b&GLg+& z7DUh2P@?D+!UU zh)|+U`IAB*In{OR(frwC| zXx;vFAQ9u+ySX?S#B1#aEcILDTOvY@tUbC z7p*{aa+DF2qlU#CkE5|>#~&;4r6A?-j=a;OzMvgkZ-GhtvAb#td z!#<8$^O1;9qL^A0u200Z?d!Pc0OH6XU%{bP@*5GMM6ssqTrUw1|60aHM-WYSO0oO} zQ3HqwC5j0LbPI{Ny5lYvr-3-vK#BE~)|(O$N)&f`w9sb&)h*(`i8kW|Voz`?o3n7f z5fPz8(MbAT7n!lMMW2h#Ail1@Cuq-2sU{+nC`L=)nMB0$_TRZU9mKSNWsEKBnomS1 zQCuEXTR=piDV~cnK&&4+m$fx$OeP|fC|>>Q8b-wIZ)IF`0Wo~+1oovvbrun!L{a#{ zju5di>;V^Nf@l%9mOYNQ@+TscDCVo&+C#*&SSfSSu4jSxHEHI5UypHWM1&H>`YJp6 zdSvMaTyzD|_mM2Cd;a}35urp;Txv7DFT83spNq3Wv{M?-eq8g|N<=78ycD>Q-d~5R zrEt*=#38S(S+>V0`s#ra#cv-U3@3lb{r9T5=ni7h*L7^0_al2UBa|qHWv?4gMB_bA zxaa|5wDTl3P%)8;P@>o%yQYMQ5~l`E5$$>oh$jrU{P!E5H!fsGC{f(-M;v|rIQya* z7k>wFT#X!?zd+?X5urq}UzFrkGUJu>04~l2@nNkZt2xkMLqsT146gE`-<&C&O6B4_ z5S!c+*|V36%ZUgjil1)V($60Efi+z81aVclIkWbS`IU%JqS!Ji(uN#$TJbY3dVx6h zz6Vp>%BB+$N)&r1T86xZXxH8#=48%b_4X&|9XpgL<|Hno&na@Z{>DWg5Owaa zVX*@S)8~&+qS)AcQkTs5tt^O(z97ctKN2GE--;kd4JC>l$!CU7m=`ORTxL7{rCDVXUdH=N*|5N)&r1bkV0X z+xLfZaS4cBO_i+lxih^=ffB{f%c~s7jPJJQa&ak$uSY7dK`I_U$c#{;SW!{;nuybK z>bV$#Vn{H{+@ozyL?}@lmoxVX5#z(UxflxK;0Zs3t4X=xM1&GW@o{?eT$ui0gr#WL zVIbB#4QE4Qo#|%}lqg;*6n{fz3@mrzVmOHFi+otr<0gU32qlV^8LLBy7&&w$7b8IY z=&a4;65i9_DnNu4+wpExx0aiz`9g-lxTkRE!T05lR%pI^yq;8C4G4=VAt{M9ai@=KhN(z1o8k zMN=;wddI%>j=YU%*XuyM`Qw$~=afOu3Q(e`d;W{t@PB`Us3w=MB(w6TITzRC&lTI( zWicC_arLVIrYZ0Hk0B|Vl0K6^5c6lwB5uZ`nf3gGFs?B8G%+znQ_SQ?KVmv<+riBQ zG>ao6Sei{Zwc3(oVq%KsyZaSGh`GnoiJOUNTGUNp zD$X(2h>0nh#pRkN#AFv@xS52e=kNaP&aXjE#KaU$Y16$y#QgrIh?|?xRMwc!+Ir_>`V3AFk)hgX4=B)>BLl*?&D@MnlnGoW~&oC!ik9~ znl|ZfyNDU$Io4LR_Y^e0mOm1XwVjwpOia=AOfl&r=JN#}+}wg@-c=oz;+XGCOia-f zyS3Vfn8hXW+)PDt!apSHOWe^inG?UBX zrx3HxpqZQLXugcnVK zH*qr)&FbV_Rx&$BASR}0dPlnrBBr+CF>Y>0Gk(NKX7{)Nk65IWbn3$sJI<&`wm;9mK1sfvn6hUBxtW9JoP0~>9PRdnn3$q@Ir8@qVt%XJ%FRE~{IIV| zIQ+42A2BgS(|M?L1Toi~tm9@bn%d6JEGh5-J;z~+X2qr+C1T2Ndcn<|XzI%@XFV|z zu4GP3(X1{x??}ws#}yq!d(T7je6t-}@LK!{F)>ARt)KrpVwP-Y+}wrcCigJ*`Oy3( zVq%JBom)gRF)x%aBcC(Wpbm zUXKI42Gcrno|}8nEUh{(1nImuMNCZ56n4LQPv-2a?BZqtn!#^-ggJ};UQA3((Nx{B z_XII(wACC%doM)OJ>QS17JZ5)rl#rtQZy}6HR!o`SdJ|>i_py7IfXsgxNj6OF-7x` zN=_P?b8dYEH}|4>EwoIK?${ATOia;~eGpnp%&w|DZtg?VxGRr!hnmsrc}&qXbPM=} zn5XJ5adSVKBc|UI@}rV_$efs>xi7o?EHTsn>fvTFn!1uhSzK#a2{AE6bBE>kTw;FB z9WhO`_YyQ~hfQM&nf)#k6H_!tUG=|B%+`M#xOo80{HY_D_^DCy#KaWMUUkoSVqR#8 z;$|tDViqIWqk|a^#KaU$g}IHJiP@aDi<@O=o;G)8=DW0x5ED~0yWA(zYp|HS%iJtS zbH~n$g7=Fh^g0<+G}Uxe)5)B{=X$t#5Y4|%$g!^hne_S-Q#2F4h0^=D0qUAgqP-tN zv)4CQIGu28DLHmb(bQ{@yiev_Y~sSr!)VH9%CcvxpQsQMQ#2jhX4w)`+Ao%y6=((o z+cJX-lV=eVQ#7|Zm@Opc+Qxm{Jc6d)z1ht8^LKi^gejVrC*;v<+Rj(Exmk(kY1eRO z<^28(nG;hqlca_%BXcGl{lU#DG?!k{WxczzhY%A}G_O>cixG2s!x(4L-jAaB=+$p* zSHMbdVq%JBko5BwV&0fKhnvUHv@Nw|5psL!eH^A}_EX3XBW8DEJU6S+Og|9xiv8{ zMN_fj-acZQm-e47+EERf72PY@?m-sxnif+uR|XqgAZElKV{X=>dGPB-=92z-FPRfl zG~@eUq@Q(FG5*}FL-WMwX{_?47k##dDVpc5uP`QaDrlr~^CX(pcaqt$Oa1A26H_!T zI=!QbIsbAkH&3Ct!%T{4?fP4i96P3Hh8Xmz5woD|IX6$E*;$n#$eX;YB_^h5-aKb9 zlbGl86=sO`eg;jiLU)#NR46AVrf53bekdSj^zJF#Jd0*msXp6tRZfkVn4-Dk;_#f2 r|2`ohH^s*)$tudq21SG~2%azcf8eLCpA%Zso;dyoPoaM+ diff --git a/None/20221213174728/dreambooth/events.out.tfevents.1670971669.DESKTOP-4M9DSE4.26752.0 b/None/20221213174728/dreambooth/events.out.tfevents.1670971669.DESKTOP-4M9DSE4.26752.0 deleted file mode 100644 index faeb705298b75db120ae0149b05bce44f70e1265..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 696 zcmb1OfPlsI-b$QmE$OZf&u%zM@g@}|X6EU+mZj#ESQ)XM?(CL=suP-hLzk0n!emd7 zIsry4UM_AfmYn?JV$tYtoOW;b{q>PRHW5iBHy0-tQ%;fSUh_6nm9|6{6cg>8k3mdi zLNak4SDD@Z=M%o5n24kj*~I9l^WLGD`0wmHh>6TdCT1KJw`>3SL>9$FB$db}USaz3 z0mZ~)f)ZX}U$Y>YSoCJ2ox%4wTND$KR3e+WT;Szl6chWh93dvMBAM8$YHhch)5-|N zL?o5SCf*JGeiy~WhnKP;CbA)!n72~G?oDCX5)>1WR3e-B{ru-x6caD^O@Wxmj$~p@ z!3W!2=U6VIn5aJ=t`gbA#=~zSP)uZ~-2*X^1Ifhc<`s5*mls*1n24kj*~DOZj|Bie Cn8r8& diff --git a/None/20221213174915/dreambooth/events.out.tfevents.1670971772.DESKTOP-4M9DSE4.19112.0 b/None/20221213174915/dreambooth/events.out.tfevents.1670971772.DESKTOP-4M9DSE4.19112.0 deleted file mode 100644 index e8f711d41abd0a9a1f8f07ec6483884a1c1c497b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Q36QtuEp51Vi;!P?_%*@ksElbTSu`)V#^+6N>&F~E> diff --git a/None/20221213182833/dreambooth/events.out.tfevents.1670974130.DESKTOP-4M9DSE4.3624.0 b/None/20221213182833/dreambooth/events.out.tfevents.1670974130.DESKTOP-4M9DSE4.3624.0 deleted file mode 100644 index fc65723ae8b9fa169465a59c1febee31e7a1058f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32090 zcmZAAcU+I{|Nn6#mqZa+84;zDQHqrJQ6x$!qhXa~iy|2%Nh%{{(^3*LvsV;XMD}Rd zA|xZpN><;`?|poJpX>R&uD>6*`|WkUkM}sw<2-Ye{5s6Tu*kpXrIug5d?s!D` zI@SYmk&cz{9=z@YiBO{WZEBb$iJJl(rT70f?HR z=M*WY%hO1N62+Ju<{wEsH2p+xa#McN<|Tj=Q=RjzeI5Ldk$ zE?fc*XpjgcisDbKH;D$p4pM9cqKfYfMfDK92_!;^;*vqz9*|hvX`U3-L3Ew&BxX0% zze^&ND5kDjQ%qv~^<*h(fVi)nqtG#Ie3e8fQ8ZCGr&tr=H<&~y zQT*`hNe>bumwb|96A+IN+AM;i*VvN?C5rhSRyHE>eQdj9%C*)6v5B>fIC`M8Hi=N8 zXd7EUm&EA{21~Iih|W=N;@Y9~?j%Bq;+&2O zZ6a}>UZNDWKy-QdO)(~8%QzCDM6pLR3;tXmy>Us3%|Yz@CrcD1&zMLelqjlXR$ZVO zbptD<*aF0d+iQuODPu>G2qlVx8tSK!m|m~_aphXK1Tijnvv6o(<3%ErC|2T~@d|kBuPlTj~Ew6hpc;X+>gRy(>~|17g7Ral-#oK7X4*iQ?TpWfMp&82MR> zZ9yC}+*@@0y><)D2qlVov+NQ`?3&d6gmSIhf%tn^J&||geHDpNqBuWf{cjS37C1<; zJ&1MEHO0cw{(P1KC5n|v@p&YM6bDLC2gGl_cEZf=&Y+0JA;@uV7u__ z7Gy~xlqmWb>}f}$+R$(*>VbG5wo(y%Zi9kEC{f&5k!Mb#?u-Lc)CV!E&qXn}WRxz6 zP@>qNrs)BXpS)oPAd~I}EipC)N7gQ_umen3WB9tgT`=b(1 zVh8<~QtSp|t+`#q)P`QGNrV!`j7M`%lekD<^R#lUyMuV7I#j%}-}{zCC{a{j*x($A z{+q0&*aO7zVSeKI#ZUa*10{+_@8@Td*m9Ad6nlag^Eh1C{9g5qW`q*OLe*bMBu-xx zFGUj&vk%-B8|w5eClN{%LpqM;^Y9@XE=$oA#JxotVr5|K;Uq$d;xRLW0W{;5Yn4*$ z1!BV!QKHXNwX-BbiQ+jM{eC368g|H3uC*D6GyB>K$Ng%bNrV!`<5qJuNlY|$lA<|? zPEpH5_Jw&bNQ4r_$*vm=NGx5lP>Q`lv{0F@2=r{xjYKF>%r&m$Z!^6`X;SP1VrE>F z$k_6RzbT+ZQ9muijb?1w^`;apKsz97z8WG-gy zSm8|~lqfcDGnZdSr~Z0plxuAXVv%19apA*q{tkx{MLmPkwlw48Zo{Q$1!DNN&7x~t z)e4#sN)*f1U0qIMM9?BB_5(2_^DmKgM{6dDP@?$IyZ0Cp9YfQlr~t9Hld-6KY*{N3 zp+qtG_OMSRb`Q>%q5!d}>r|0*>gHtG?C5l@nYJVm1*o!ez91LQUiC&6~mRqlo2qlUwK1X&W zv7XOzDcXZ*vqeSxZtWCHB9tg@?xew2NO8N4O3?wtS~ktZyX8Y4k_aV=C*lfs(Ts_C z#Znvs;(Xs2u_q?*H;GW97#4iFHi_$^)w7jr?FizuYekB%qaM5`1tp51W1W7ISoF+N zicTPwcm@b_i;H~q0VRq-cW2xu(WJ*zDGmkkP0LYYw(le#I%+6U^st%iN@9m4o22Lr z;`vV9MVHZPr%8ko#di@}d?t0L`vobwfLI!`Sk%2W=rD;;qWJif;X;}*yUPbD4g>MY zN*B?%`}vb3LWyE!^$6ZA%w5vzoN}#)gLtz@O}t9aE+G+06qgoOOr;t7_}NNv1c;HU z-9(p+CVU0}C5nfN|8*62AW zE#dGi5}`yfv&_+x#N+O{Qgj1xyR(_-{(it35}`yfzC z4JC^AyKmS;Gd?n!Eyd9wju{>$wyB2k6%v#vemHDxM`Fd3L@AB|asGZw5%Jnvg=T~j z#Wh0ky^qRnm!vorM7?}BaVFN4uU(-;v0m!vS2W{LlaEpy2jbqup<;?o<#w78N)(qC zbmDi9O?aEXm22$@qWY~6ksO%%l|(2}ysLWfDb1MCbFdV>K+IeBSn*9Ou8>41QA|&; z^&|13%3LXq2k}|45H=r-qDX`i#iXVo18QS0C9Sej`(wO6Mu(8iK6eyd7&hl_)aG6P607P|AC@ixHkXJK#8K6YU(c%6HKm4aVm%! z_xg*-9))~ITxY=lN)%_j@(CgF+_i5~oCf0VrxA)TPK9IWsG&shUT$tQiBHmWFDTb~ zI*1qA`UyqZUcR1z5=9r^TcspU8#+{qGeGQFc0?@i++!Tg2qlW9ZF}%{Pgt*oQuGBe zTfd3Oxp(duiBO`r|LcGQG-D%|G%3ymarlgBVp!16c_c!KVk3*usU&9Axh2I}AckA~ zQ9SD__+A{8C^m==oK9kD+z%=Gf!O$=zwlbUn)l71MA2Y<@q7}muGPP&Tx)+2hi&l` z6Y9R%Lq`oIicK^E`Ci=af)P@j4dNkJJu!2-VLXX{z4~8?VzAy@KC{<+9V*2+AgYBs zikUYD#E}RkioPGtU819YYPm;>0U$;N%ojI%sq@`EC{bLn)$tpN_eFse=YrT^a;i98 zq&1mlgc3zVtH@Xq2lc9vVjze)S=wS$Y=6GKh7!dqISu=eXxhXmSGm^nKpb$iP%-wC z+jyE0N)n&f<1^B2OWdS5AH>{eBgLg2`(~2}C5j&}KTf9^n{E!1;sOxQ|I`)65d$qq zgc8NYX?A@6*nGu)DJ}$Yi0Vpl;{F-llY$b(c1BgBX~w%N?nyBS#EOBYBA|BNpEM(s zD7JXoUyH;iUUe@iS0)(5rkj_G0lPK$c{3o9y=v1{aJv97Oq>(>8GqbpSnC`Gg3s|RPvRLklk&82A8 zy81;?b}Q(p-k-4lPUJ zU0QXLOia;y-=}B-nOnwWNizaX?RF!D-;PtdWMYb@<zfN zo{8>cp6YW`nyb((`Z_|SOgQ+JOiaoAY7#@H!_sPLMx*(<$rDAvl)dI;Vv45qsu5So z4En5pMY-;)(JZ*GFSd955UnLRtSxTak9Eok;D9U>+ack>|=Q#5CcO!FnP*2)3Wj6-uw zq_vo~bEh_$n4-C&-&X$q{BG(e&3H89wjEUrbr}7bOia<75HMl{%^6ZBL7E9@TDUlh zmyVVE{=^i`s>vdm%ovwkX(pmMuI@Y$Qx(RqC#GnIjd5v4W~+^r(o8~gPoACVSnEp? z9XqCIdYYRKA~VCG-F4-{G7hgn9TTIyQR4c&5(6P3Y#H^Hj{}dns-}8y&}_J zQ6SB9G@tZnF1*@~nN|5^JQn8_jL49YoS)ubwm~rfANv*6B#5R(#i+ z%5~p^rk$d_=rlg4l1xm|bhI}*N@lGn4{7d2bFEbe5mI*-f7fA(=I|3Po5@_*cbPQz zp*g9Kqj=o^P7j(BQ#9QsIBJo3XkCUh_oI3KMCZ=e%`(0d3=J)6)(ma4> z=p}WrG{8NROia<#IqzIaW@C$nx0Gv?f#!Fg1Vx`UBl5_^6wT*(^^cI5D|$=wAewcy zsELt^=qNHVMKim3F8{uJcu$b#AvAl=A1DmgS4<-lQ#23d&uK(+I=ifv=3z9A3P*^Y z?xWk0i7A?4gNIv^Ii}4?X&ymyy@S8#`)A7qGBHJSkfC*BGQB#MNb@L~ck{c5zi%Eo zK_;eXzA+7qB{Ofj`fcU9A45~)OJDKADJhLiOwsK0%l|EzgSz&W=5aKKogOGkY#**7 z6H_!_yovWAvp8{*G*6&8s%VyI;CIW2Oia;i(Zl{HnNLF2O7kR|GqX+#_YK)4WMYbD zMD2~6$V}8bE6r1Au2mZ(V$*VGlZh#s3#Wg$Pp0#LH_|+fX7?`(#E+e&Y+{P$*+sr7 zWV&_Ix}#k8Of)anjuhjnH}gINrf8PFNZL#0PKC8J&!D-kpMxkJm^g}#9aA)|7p!z8 z)7W~3G|!@W?a?Y>+kO`R#$k%)*i*CkJmgFDCTV7&8KzJX_kDEuw*yl&@7wo$Kywa$ znj_6@G-sb(r_lQLioZiJMbm$*H-9h9HLZ~5IW)JYyNWJ7vK;93#1zf!=I8#RInz37 z-&L;rc{F#NH5T4as(DWUQ#4a{&#)r1v)3SL{*7j^M_b|i%$9aA*7Y@WxzMk)# zh=qH#o0EwtnxBF%FCw!%VwW_ppjqp8nZmNWW;Zf1Mbo3_?S^Dp&b%Ydt7s0$d!}gP zpD>Y3OwpX+@_@~L?|w`38k(m2t|(d^?iEHRrf7C*KaBSk_fP3spj`LsXx1<4B97Zv z@m?jSXle#u8c4_fx&A0=-aymuTYaJVe*9^g6H_#=T^Papb@vx8ljcn{Yd8C%xIJ$9 za56DPGw;&Z*EDCU$dKkOG@mv%5f)}&`1c4?Gy@b-He?!SKa}QeG@WB>2@R`3y#I+Q zn)83GZck?7>H3ArHM)bQ$Y>$V?yj3f$BrqQy)Q4}YcuF%L%{IfG#IG}_M$(*^qS@%9l`5InKCX~v0h&8&zfl;+Oy%niOwkPY z@HHe;MI4i6A)4Wx{}Kmm;(6Z&Q#4&-jCgPD&8%nAEJAaNStl{AX7d<2cE`|vQZ!qY zohhO@dyZ;Yq+IuVXii#rP+`_QneTI9ie_l&;(26lj_EDU`)G#cJW#Y9?4eI{Vv6R@ zABpK?+GhGl^8uRql{O;s`~=>w!xYV9Z&ubL^Vj+{(tL=fi`4?rF|%_EniEqr*OU+0 zL1u#Y8EHO3bAEc9;z)JkIWjRt)A{8net&kJT`JATXr5ivO&nkLmCqS4MRT9`&-XN^ z>Ck5PlHn#E||eEnE4xu(GrGBHK7UB1gCGB@`~kmfTqPyh54ap9j9k%=jqhWia3k@W@rMS9+v7KQkclap?NkOvlX+gxMVc?sY})RvLj6j+<78rrrgKE}YBEcPFOudf zG$$IJQ=FdMG?h$D(VUb#l)pbWEZrr|QZ%nVN$Hn-!nr<~n4%f;K<^~Y8M*GRG+(26 z`|&EF(#buBOia-n`c-upnU$fn9w^tS49!zhhlqZamRHHd6wM%&akt6zH!zmw8#ISF zsffQ~C#sW)DVlcCr+Kf^`n`uV-=cZ;W?#|A>oy-drf4qk_*F%7UQLdW<~uZJTpJ_? zF1Wdt=EM}u!V_xzyv;1TgVKDD=0Yz=@j2st4w;yudCRPDGR>Kl@>rVXXtwa(qVV?} zwt~!#hyF>?RDU|A4VgzG)gCI>{R5go1#ZHk$y9zlF-7z1+a z(faOCniEqr_rxUgK7;OAA8CF>GjmX?FzVRl5Sf^w8SK!JKbwJhvC^zWvp{8sm{H?! zpG-{A{O%Yi==EHrbw-+>(ERx6j%Zc2h3_9d?fg%QX8)?Y?aBN&v{ag((X?*gNmROM z7Sf!UqPd}$9)FG$O`APZuKO1>ul#;1Je^Z&lNqo3Pl~3M!P^p=^H~oYX?{g>yhkIk zIB$+0nV6!vH}mgPWEy^-Da~(aCeE=IpX}}xlZh#s3k(|Z^`>jTcxhIl`Pj3oVsGmX z*T}>a%`YFPE~7bpmR^$PcQn)OFDmjD-s1CQOwo*O9#KwaoZ)9_R-@S|+*H*45P6E` z#1u^p=gdMf>#o*$tX%gWXl8^D6vm_Xej^i8H18Kw93V5(%SoC)(QKU(B;N1(%brY3 z(X8~k%zKsnjTcJu7n)9n{Y2RIgvVrJie_4efPFOQ)978&{EcQ)9ba*Mb#p#vz!c4h zyK`d5-2eHGH2qt0+8Xk+ zTj{P(r5SN$rlm zB=cRWg*2nk%yk$m+zxzFqd74}({hvf7c%FJnJmraXgZIJ6zyl$??fi1Xf8A#;Z0^_ z_*!YMKyz5b$wEI|jrW%@MYE4p_*OC}9ylY-m1urS4Hk7Yo|(~{n4&pkQfLO5WB0$7 z<|;Ht-f|NC=5@V5CZ=ePG%w~gs-CY^tX%hKG(*E1iAfjt&m|L6G)LsMnNM?WJkwvA ztI=#4rzhMe1#}@3Q#9|a-e5%LAe))ej6t)}^Fxa7H}bEOi7A@uE4%h4Gh<+!G-J_J z8GcsrGk7@f`D2RaK*Kb?FSF9-qBPf_X}<7-!nI~Ezn3sY^JB#XU7EAMR;4u8q8V@N zB4YQf=RFoo(cF_XgZGzunzwzXT=#Wo&d4$lsSRiIxgw@$o_!dwi{@P4-d>vP(X`a^ z7t6|z^Z7icXdYaAZ5^4VBj-wU1DdZFh6ulyZU<@IF-23&?cQiIy?s-pxe?7(2-T_0?lb{-A$TlXcji|5wk-6b|e#1G_&ev<&bG#JxZE8(9G8PqS)+NpT9pbMKfPF zZV#D}qa&ob6U`1at|I;V{K+&Yrf6ziy~4jo8D|bja~GP?p}ImZ`U>y;Vv1(xP9x^g zoG+d|l4d%Ze@Cf_xZl|uXiiMgG%dB@bMH4b^v9$ix)Q7x(_~eqEPo+0s0M=E4{4#j>p@_`V^gXzmLMbE0F{3VAEdqiC+~ z-B4UEtXe>GVv6Qs{SVK`Oq$!gRJrcQ&`jE(DH?`d;OD_GMf1er+PqI|^TS4($I+Z! zQ>K{nxCvj&V2Wl=u#F$hxxj0tG*6(p^0$`g-0UL%?Et1|F8>x@V&4`mmGGHC*FFW*nqwwmCGD_Z2Uf+e`Bdn#OHf z2)&JO_;0H*MRQ|X1)nctx+ngi*K=oRiZrv({L~^# zv_E~6pIgHe%{Dr@4rE>px-QLZG}kwe5SpJF^`bd3MKkJ#?OZZFdwrMYIW#xkOH&jd z&E)G2OwrWX8phXLF8X?9%5^`FW`TYOam;G270rn$nlH091ks#py@yNlZ!~w6$B6Cv z-TcYK6wRIk|MeW8>D0y2%t13%(MFgS@8;)NFh%p2Q_@(P^HtJ*Xq_)RN}RYMUv|D`*;*v=IX%qRq*~6wQL* z!RN_bH=|6NSJBMvzeiZart@_%rfAOnlQELauI|m=D%brQnrn}&7F$m~+fH+0ie~g% zk32Hn8rw+oI-1>j`-tWCYub}}T<4z@%^$6D`P}=8ldm*ypn1_LNqp%wq?$}j(Tp%( z$bSnwZTS{y-bAzAR-riVv?-8GOwrtF+sK8E{lI}7Y2HF}%be2+i>iCP_lqf-R?CZW z$UOC+T$;DhoUYwUn13nd`v{n#>1FwgpUrM&+xney-S41zX+%TezN%AaI(AIa?A=J6 zuVFz3%;&V2qG>s(4__Z8 z2W*vQ0h$M_%|y)rqbqdmn4&qhT7M?3(SF-&(kw*t{gOgOjfzPXnV6#4==EH_uWjjE zCCwr*?WR_I95<735?NwfJGT};ueRc|Ig?_Fo7 zi!|?}S!+#u(fGA9zdtcWGqvt0et+iAStQK|Xl{1W6q@;6e$%mIistgDW-*$luFMdfikI{2i7A@5RcLYlA9Y?U=neAxcIKFx_Knk9En9;P|NO&V7y*S!qQ6x@h zc}&stU+^cI%&^IJ(yTypkYkbJV9F`J-+?Kb?lXqvliAWQK$;)XG#jcRZjE}>gpM6k zG;3*&;or>y!xU*&qFJQTOUxcHn(v`tie~lEQT*AwKktS#KcT6geN1t0LNETC5lqoc zIJuAS@kPI@mgZ+PC%$SdX50Q*LdT9Nnqzy-7(}mUJ6HWm<+^`CbJAH&;aN40&%H54 zGbV0#AepE8xk>XYn#S&y;>gavWi+Su(0@`i+wUFXPiFi1Vbc7Drt2Le@krw-uMwtb zwn>>>Mdt5~`=wchW~X^-VvNRh6PgoKGy^?N`5Lyq!vkr4N3(ubtVo@5fd94{Q#4EF zp1MwRwwkQ+Nx4SVXqI-URtztW1I+?gEAcY^_9QxXOwk+{ zcz~}rZCyR3`4dh1f!dIgUx9m{? s-+w67uzb(32K`R_*Q{;SOifcwZSrjYiPOd_|3BzHs@7t`?v~^J4*-`k!2kdN diff --git a/None/20221213203916/dreambooth/events.out.tfevents.1670981976.DESKTOP-4M9DSE4.27240.0 b/None/20221213203916/dreambooth/events.out.tfevents.1670981976.DESKTOP-4M9DSE4.27240.0 deleted file mode 100644 index 940401174d4e7a94944a2d19c6685ff4e6c7942b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32090 zcmZAAd0b8HzyEQmP!x#}iA+f)GqEoTWe6#vK`4@xsSL?f8jv9rLMoI(hLYhfC1jox zrOXsUrV6(>=j`iy&hP#H-G9Fx&&Ox&Ywx|*y4KF!vX1<3|EkeO8cDCZIE^YR8ltb| zv2gnAx#q(c&zu`z*>lD{vugeSbWL{`nQ1ia|F6scbWK!Dv@{!Ps`<|I_iui(e*^L3 zdWKs|6rpUWsi9fdcVY8${lfb+s?EJZ;xjihSEX1N#0@hSi3-d7auT6L(R=gMPb9W> zbd{nih_>(b#KfO5AtXYH;OR+rd{!S91MA2oMVI+waKW<8~A&7e0>_zuK zg}x+0iQ?VX1HX`HQT9uUjX?Y{&RbDFXu%{Bp+s?Tl34(Wsm>--lxwX8;`eep5kFc? zB@s##KmRRjNa6>V$x_q?F}UR@@yyb44vA2rc(J0OKZ&zEBc#|E#49G$u^h%04Ao^$u;px0Aoma;_KN~Zgc8Nni>c)#-cPlXqArN~dV1nU=X2^LLW$z5u13E| z?9qO{6q|wgxqg)*Xlb8l5}`!#+?SVrB!*1fE5+s@s@GZj;Q*pFmeuZA`way$4$96fy89*Us7xVqE*-JA~EksB#BU>7#_ImD~Y3nji)Nt zx+RF_md!+M+UhbAp+vFmzbd69K7HgQ#a19rYj9X`>PB%UiBO_gJ$_a+iDhn~Qq%{r z;NTU-k>cAtBa|qrPhPQ}L`B~WDYgbNVVSFV`e4~tnh{DAz5IP!koeT@g%sO>xZHJ` zu-A|Vp|a1balkH zlOyVq2qlV_`xNu{bh67Olu35lR&MC%@n;sgmV!Qfvoe^7lDnr~lP% zBtnT|$HA-lx~E0PEh)AK@u|&jar5M*S0qA-Vr}yP16moY{lBGX1Y%01k!T(JZ~=)> zqF8t!I-EpNW-?v5)*V1>@^qc(_97~QL?}`0=eZ|@#N~sXrPvWfMPi~Tn^{>+qVHgr z|1D9R=Um&EMD=OwW!DMB-#T9uot~ZRP9l^jhAsW|oy7FWf27zM!~-KQiblt8+#?Z6 z6!le_B$4Q|PtRMA5@+{WuaMR9ku|SH>8`oG&$supye3BtnT| zoBmsyk=V%5R*GFgOf(rJCf*;+M-3&42EF?8b$FMm#Zoi@(RcJOh3k{!GigRBQ7n6v zwvT2^%1V-AHxQ3cb`#MDKk_>qN)(3~-B%}ZZ^Z*Cnu56SL5-p)s);Vm2qlV>PdTq5 z(QdY?r*dV?KzyFuS)4n0Cx%2QQ8cI%s!3w5W=|=agSet_x!7CvXaN)*Sn&Raq=zJI$_iWVSFcor-IO$zpr2qlWwZe)!h@!Qut zDfR&Ia^1V)_FDDtBtnVeS?|5qN&MO7vlM%RSof8dh+n#X0*O$fXz$SJ9Eq1k8_rO! zbuSQu0>VV#+`Csugc8MFP4D$4@!7VqQtS<4(cmG%y5{_L5}`zK`pDiEB$_u{DaAe@ z&OH<&PHH`TO(K*i8XEO8Bk{cI5h?ZsQN7MAMboe|eD9jAn)bgXiiMLFsF0XrRVurF zAfDYECUoM52GNXAqIlPE%sUc)`D)Enu1tRr>kaNMriLf$kO(D;d53P-A#wJmfl?d* z;^qc@gid%?6^T%y_^QN*ufxBc_mQF{h)-WN7rh;a@Kp+wD5h-q^ps|tX%#EQfgnC; z5F;|{jGId{LWyF)DdU$U<~v=L;vf+FPE!{@hOgmw50ofwJK3X>#0P_aNKpZziE20T zt+Q_>%?KrmuPw}6NSwc`vzKzM1&9F}USdIZS$`6tL{Vkd`IaOaSx=PWU=TNEwh)hZ zO*=v&lqf#`(!7Mkg}PxsvUo>3mRBSaTtiP14V+LH(+if0xK+E3z;g*T;W3t~cst75k6G2Tmo62-8R zqD3U$I`pp;M}QbrU00l_t@%qcLW!cghRRJ6e=O+ctz7GoAdXdOD&__^okk**C~E3O zI+Hl0+F6RDK=g9jAWrMd(I62@6nAH>%pfsv;5sRe260bXp<?P2a^aTitqAt2GER`?gvQG z9>mA%$BNfJZ*53~62)e{Y>i2rKl^|b$AehpqA8M$7qAE=ikE!aCX!gO^}ZAxK)ipW zo9O!S1Yh?+iDHV|5qlDYE9>|uSH=;wGN0}Dn(ZiwWm)I%PO6{NrVzbmG3|K4r%h1gHm(@(aX?I*p_vBN+Ogf zo@qU%fMy)}x=@N!Kr}TD5+*&429O9Pis70A?vkjrRLxhpGVUNcsg)`=m#YmR5lR$S zxK813+!l_#r8pJDPlZBwEKlWk50ofQn{N4mW^A{}Q;O3-Ec)y$ir%ka5lR#vcD={f zkLvkbq&OYKwTXtJ=kAM->8PPZ@uMinrx~@X^Q7njV!W!YXxqE)aT1|KF}HjT-)U-j zevzUlh!&grh)F-vPqX;{N)-E;7W&YP8=e{ZDc5=ihz~TiMauQdNhCsvqPd&z9}??W zjFaL_5Va1B6fG?>d3Od%6wf#`JVD~89V?{h1>)|gM~d5*-1(^#lql-Y>OPc2llw=c zI19wJO^1lNd2N@_QA3I1{9|r>p0RuSSc={tz8$khk(5-jpF}88ELeBx0L}PWRb#Gl zWoCou^?Q!Er&qw=W>BKoxn=b*5{<3;OVJ0!e;%9=I==e+v>8eiM`tg1OJdShFDcFe z(W8B1QF$ly3>`I;D84u8&97tVmKZ7eg1FMhO9-oR{7e%{6jM)4YDzOcetS`hejp~s z{Z?Ebv$U9Igc8MjYb=6DoYMZA6z78IveQp&t4-lwxM-*CQiWdcB~`rAr8ez|EQiBO`LYHn&t;_o6$DJ}vrv!Xyz6;gKt ziBO_gxXkPjiNC&jOK~xXZg~zOB6@cgiBO`rwcxlli3^Q(N^uE@OP!6xygy(084{Ez z`Whzl=Q@4HWhpKNv17nm5q|GaW11046ze?5=5wLwQzgYf5VxG{DZjq14Ifx~@4aD6Y-)4~rC5lB;TI-T%=yFVoD?qeKYcE_>9+{B{C5oHeNBEI= zIQpp+SAwWJW3?FMx{`OfphVHp+<~ugGs+q*RIbb_5MS+jub6Id{|U_qC5oGp2Q6v+ zpWh(rYwgw5m^^)u6j$TV6)k7Rh=Gj;+-vopH1bOWV@lC{q_^%6nWOA|q#2Cn5dH6p z4a@d-A`??In;m<$icH<`SZRi!`CWUScs`}H1DTkj`P-{-Cz(x;T$Sb;G$&eHio;`$ z&LR_2G@oC5dWFn_H#O1>MYGj72XW_N;a4&-Mf3dM4#i}4H}CAPT=g(CS2sT``i$9H zLnfwZT6HVrIX$OLlx8@ZrtK|-+tN5sGBHJSMxQP>XwIe0L!}vkrr&|7VtT!T{bXW_ z=J#>_zu0VLpzk zDc3y;%_h%l6?Q(w#$;lOW`q7!He_1pOp@jnG{d#Ji-{fQ#FB|An)f!lHzV`lk_c&T zMf29_G*KI5WJo5aX!;$Tnn>p7JDJklhUSE#o#K`Mb}KS5MKk@2>0B~PLtjfX8qE(s z+KS6dil35+DVheQyZHNafv@gjAHIV`x9=(C}DUotU8vx(05Iy7fa&TwgNN7E(Z zrlRCwkr$bmqPb%B<)&mFxw=@IJJ1XpF;WC4#hoM*Q#5rF-`J5kzJHQ5ccS@yvJemL z0ymS1DVnAi40e#&)AoThW6|_6%24PP&HGL!rf9ZT8#|xOrK{DJDA#Bgnx3Oaif3hS zK9Gqin(s2}b|Q1txL(rSji!&SzsP+NnMo$5XihP4DkSsAGY@I*K{GdWn8^6EYzvu~ zqS?Lm8$QRiQr{xYI5dYS{KSr(+D>F*iss?pT}RWLs$(xob1#|>299FU3awUTVv44p z?sPuK8Ap7RW;~kF*=@wh%;x-F!W7Mi=l{H?IWOmSSgKt21T?K*o>yES{i!?6i7A@h zHTUypb5*s2G!xO(Gx?_YHQ0D8nV6zk(O~#en)CUj5NYm1^J%%eu$!>w6PcKzIc;)` zA(?OboRH>zGuq<78HW=18Vpl3-Ivb^Bva%Mk!CWQ&AvTQ7`oqkLvvz^=AFAEvdB#P zF;AKY(Hyd0SNzGgDIyb7H2>K6jw3TYI$oM7Xbugt5l;%#o|B0wn!0TR%E+`Uy(7(3 zH1|(25Cz|3^T@;$O?%72YBDeE{432vXli$8D`q&A@ii@`XinI2ER)R2r{>F)>wXx` z1ARsb9i39X{=^i`DN){6$$Z?=U7APG{Jr6l!oqF+NIG^*(KML-z?972BR5Jj4b3OJ zwM4rW+h>!BDVp1duQ@?ixuZrf4pU7^NWdqwxo69z!!%TT|qz z1%{D{DVkI2-mD~Z&@=rY z#-@=OTp1(HGiW+IA0}St8CQ~tDVkfyPuW6d!_HTvc^1uq6#*hG`|)TpF-7xp+guAW zmrtpd=09luIJ-u;HE2JHOia<7vsg2VOx0_hRw&m!3(W)%TjAz^sf0{S(X?tSg2`;G zHBp+`Xbw8LPS_Phb|q6=%jy48G)HL^b|lmPbf^r^p;>CEDs0!a;`1h^Xy&Bng_HSx zONKOa(EN}TrFgOH6rVRSMRQT@nBHVo6up#YE}Ht;jw14`(|bC0Own|$ciorF#`;ZH zD%a>dnx{idMVsz>uaSusNb?e! zZNE1ZDd&zllZh#s3vPc-CewfYA8F>JsWB>15w+Lco=i;9oENLc*I?Bd-B&5s{W6+* zDk);wxad+cF-7ybU(j)ybH+n=X*+o}MVhzK)VuXrp=&pl zuN5&xb93tCbTY@LJd)-eG*3o%7oPXZ`CN=Cno|bOT}$S{#|?s&YjhXQn~r}KK4BR{ z>DVzvGbHTCJTi;+_mk#5G^=(u7Y#bQ^SunFXl8D(j3aaFnAy_2k7nwf`Qn#fc{7?5 zQ#6;ZD(OL{mB&tL7NEJl%SdrJE@}mtn4;O?RxV%9dk0;X<^wd3M3gH|Tq~VRCZ=d6 zTfN*zb9Vk-CCx%K|F%*WMW%Z$kclappQ2wcBr`*$V~BFyAENpAu8}x;=JF0QF-7xn ztCPIvZ)Y_@nvc*tJ1|Z0a9-bPGBHK-e&$!cX2{78k!BH^r9X=lxh4xYk%=jq9kYgv zq+_qTaYCBKXjY{OVb&-rj!aC^9NTOj?<*SDK9}ZWG}k*V60@TF<&cRfnrCJoYDIH) zn$u*Ba*ayRw5Z;psJ$Nln@mj6OdWW8FPY|hg)~dijJ?}U?9w01=OawftXWe30-5@+ z{G|B=%@M0iMV8yT7BnZOXvQD8&3n`~k$a^16wUPA>%@X|JHCg-6wR`}i2*d{@pCt% z`3y}5?GnW`m4%&YPE65Es1wTjOM8=lN%J|HBkg7jo2?Ud$;1@RyEfhU`DXIruA$0x ze}SgQm@VRC?$^a+Vv6SG=()U4Yj5Bz&6jAlyfIc>{5i~uOia;q9@6M0y`DwpYo+-L z%@F%xVr!EkH8L?pGj7-|TQY4NPf7DNn!N|TQ;hHMH=0aL(ey8g8&2k-wr`~Q2F>=T zvK1Y7uKkBhOwqh`Tg8#g5w*?3lxtLmro+U9K@BDv^8OO0XlgghJ4NQ3Uc;qXj^>=_ z>qNq%b9yu5CUh^DUauCq{?|mMw$H#1zdbkFWCe(xm(Y(tL-edTCuT z_DVNPGBHKd$aB|on)7k-18KfT(`iwEQCn(-ROLJn1rqSj_{9U)ga=J7>qFI|8qp+BN zmcQpQMRR+}-vpYoU`~`YKcTtU%2E9CG2{0JY#^?^=zf5X z9aA*BKHtQ17N&ia=4Ui#-;Ea$e$H*k#1zeAhHLnHzFS>`2<5teL34xIev#2OpYM+_ zMf1X)Lf%u&F}IWES2UND-xVF-J>+v7rfBX^naz9Yrf>gyi}`>4rMYitJ_|Pz&Sp2N z=-4qu^LN+g_i2ryH>67QJDO()z7zh<>$D>iQ#6f_OyE6L)50QYR-x(Q)=*U2^{62e zQ#2nLtJ=|=hWZWGDv!My%|=NIQR_XMzn3sYv&EZvyszkO-B+45Xl6C+rU+U#f`7+? zDVo+59{ip^?c*iQA82}|X1dLP?@>n^Zl^ghMbq`lUoBdryEDE?^ItTh9xf6Gb|wrW6H_!Z ze--e)Vt#V_b;@=Bji&vh0V2LEuMe4+qIsx&d;U$-{Std=)}k4ZKTA}v4B_7^Vv45Q z2^9-E_DfbF()@$wm(vr3+I8(xniEqr<9nC$z4ofm@4AZT1?SA)z@}4 z9sA(W-O>z2b9v1Z#kTJu{QD(L(R|x&8{fxm*mqr;VQ4yfMvDpQro88mDVn;z8hJEl zWzkP*hNJ0K=q#E&e*cPIPfXD~;WT{>nX~(Mja06C1e!;4Lq*)rIX}t76wS_C3>?T@ z-hGlZ*P{6%)KeUtceapBOwqidbDi%IlKX^9a~+z)<_8Lc&aQm_jwzaV%r5X}^T2>p z(p-)Qju1_YWXx_P%&HD@scDC4{ zT%!$W`v2-6R15p@cQU4Ejy$-Cf4}5kZY#}=XwJ+(u2{3}^;nt{Q#8G=EuKxs-Z?c; znw!upOvqAL|9gK1nV6#aE+xMunS+NPmgZ(OJ&isow121bb3;tg3>;{kN~U2%sWhX| z9JyqyxG}Zq9GVkTG}Y`o@^kI8$Fw#o*Julx=LdHYMeqLdoS32+)^*HGn)7~BA7n9GUy<=1OxLnt%Qc65Ye&E|ZBVn!1I>=494YNswkVn!i@8 z6bE{0FCY_BG;dUPT~6jd-R?*;2F==#Rl@Q72OTmoMe{<~sXk=(Y+HAea*ej5S#)@~ z7@=Z*luS&~)I0q{hfMcTJ*Bw=&7@B{BG6J-i%d+>Jn2_ZLFVVx)1|o+&7wYeid&E4 z){%)Rnjg9y+e+ryLtCU7i{>o%pNcw3Z-0@ADVko<{=DwPuV0YnE;I)WFHqP$D*r+p%zifHA+OYf5T@APyf`WG$*EL)``E$&*MI{w3X&QG#yu0DVm;7d6vFVjS0?M1xLfGL{$wp#E#*PVLF(ma6X1M4q}7@hA9bnKX-dA{K? zzFulM`H?h}(EK;mQP|dt2_zF!G^gwdai?RCT&J-`xkkxo9tdnN9%k0(Yg$avOt-Ij zLgv2gfzmvPX73Xk!lCc{J2WSzX!?B3KS5^CygAZLL38D{jUuCi%?L6vMYH4WFF(i} z)p3tBQ_@XZ0j9y}PfK=20|L?oAUlsha%E6jL;99E|w4TV-ueOY<0- zk?o9x=fnT+H&K|Pxl7~KcbfBNYMC^TqgmZ(rs$R5ouBDriso?V?;2!QzG%Kpxke|@ z9JO$>_~mrQkzP+s(JXuYiO+F6Mhur`I-1tr24Z+(Fy9+uiss29rMwq3c+(PTW}vB} zuPL&6^;u7IVv6Q^k5T{7vHRsFOY{c@oY+%-djvZ4p-|X|~OXleweWZCB&7(nq z;*FOfKP$%+&Go7+_!?|r#0+VkLG$kNNO4xJ%LAGdQ#6}=w&wjM-zVFoc^1tIl@{Xo zAA5dgiYc1U@}9q=W1nqyQJVjtsg_|RY)33lqB$`|)4g1a_h}RSzDhF-&Dz<9K%bjw?geT>EF2)qiSWT;qWVU_fAkA}VK75`fB97_~ zrentx&HQb7e18F zrf8-VdGS82lkanBo=4NAqOF*I{{93qF-5cJ-U0qwp_=pB+m&mShvwt{8^we7RdvYx zalY~YrDz6@ILx2-JvBmx7tq`lsxN9>&(9?jQ#5Ph>!{G{8JOiK&5LN3%sQypcGZl} zo0y_mwm+nn%x@-f(!7M`zCHDXY2<*8G$*EL+J(RLCG+XrThh!&v*+|MVew{O8!|CP zQ!~4g_v>Dq`z_7OXukVoE6krZ_ahTiG-EDr38Fc_d^FjiT=y$zZkQM$veJI=GgC~_ ztpDfyelp+ub(ZE;H1`HOijaya&1p_d(d^qIjQ5ibP1i~D8k##a^u>r%K^w@#6wQn6 zQux@*UZ0lcbu^PK4~mOU+qaO3DVi2T{1kNTOTx>gc>~Sf&XMAjdu|Syn4;-Ee9(9@ zD{txTRIbrYG%W+CiLEIK{mH}>&CGL+|ByMObhtEcp_yj5OT;+_@%|^KXx@Gi#P9ik zTP~93Z8Uv%35B^!&OVwGQ#9*!apm*o$hk?rr6FW)*_|iNLNuqUKTw#A`{xsxn4>XV$CBrbWA5%5{H)<^;9Jic!a_`8Pa2AVqW1wQKy@44h#v%_21W zrWlB<8L#-H@ z5{I@n@+K2gH1E3J*h}X5*QL@dK~t?*SA3she~nB`(TuGVc8bi(Hd?!tYgCHn>+r*( zcU{NVWMYcuhP5AFka^E@kTjp5+0oNod<+j*L?)(aj=G@2|Bgh>;5pKKil)ulJ)+mV zVf;5un4*U7BDJV~0b&>T~*wb=aEou5HtistoI#W7@-HHwhtYcx&VzA376boqCc zn4%eF)Q-PD)2?Sq^9`B{tsTVx$DrGE?3ki?#$o|~HXGZ&k!Bg1Z<{v~s!`rJ_E<{^A1eW zyc(F&nC9&AWq~x`q1m~7f++tQG>zuO6iwT(7Q6>ISwBIV@6o&wWh_p-Y_f(-Own{n zjypzk>W#W1%?dOJ^!5=IA8YwBh3$Jwx2di{BBg?Omkw2W{1@q zj?$b9N=)}E*Zm`!K~0MkbNZ~}`%O&I?45Xp@8b#|yGrvDnu9J|3o{#+aGDcSG^_9J z;pZLOM?^}q63u*@2Z|m0)6&Vr6wUfOF7uv%VZm8xenvB)ppjUeHJ@KkOwpWpz4jWt zo`cT3mF5>T&mZU^j%0jDpgA!`)3#d^zJFi;utmIbjlQDk`XWP75%HP#X)#69|Dpwd zhio5eE6s0cz8Bi!jM~>+niEqrLvP&T`?!xomq_zFnsaA15w>n&v1DS3=3`qYXL>!8 zy^^I_g=T%t{-VOk;~SZnqG|7HWI$%j@dwhZMl-&HiI|?bZZDabqUmjHa+b`V?^P3& zYgB`#%_9wQW~P=pnV6y(v@Bv6nJxPFlI9OIKlR)wZr;Ac*EX1{@kqGBHK-=%*+ATaMHLmD2o;X0fN2c+>L`|Mmb=H2=9B@rCBBSk^XCx$d=S z#9wv zy2uONx0Osx(TwO71GBHJSquKba?f&x*g?9azYOnEs fp^AyFrjDlO%=z;?y{9VwKj?O66e8!D+T#BLR(Qk> diff --git a/README.md b/README.md index b94dd2e..5eec22e 100644 --- a/README.md +++ b/README.md @@ -130,6 +130,10 @@ Drop by the discord server for support: https://discord.com/channels/10415185624 ## Change history +* 12/19 (v18.2) update: + - Fix file/folder opening behind the browser window + - Add WD14 and BLIP captioning to utilities + - Improve overall GUI layout * 12/18 (v18.1) update: - Add Stable Diffusion model conversion utility. Make sure to run `pip upgrade -U -r requirements.txt` after updating to this release as this introduce new pip requirements. * 12/17 (v18) update: diff --git a/dreambooth_gui.py b/dreambooth_gui.py index cfa132a..f5cba87 100644 --- a/dreambooth_gui.py +++ b/dreambooth_gui.py @@ -11,15 +11,18 @@ import subprocess import pathlib import shutil from library.dreambooth_folder_creation_gui import gradio_dreambooth_folder_creation_tab -from library.caption_gui import gradio_caption_gui_tab +from library.basic_caption_gui import gradio_basic_caption_gui_tab +from library.convert_model_gui import gradio_convert_model_tab +from library.blip_caption_gui import gradio_blip_caption_gui_tab +from library.wd14_caption_gui import gradio_wd14_caption_gui_tab from library.dataset_balancing_gui import gradio_dataset_balancing_tab from library.common_gui import ( get_folder_path, remove_doublequote, get_file_path, + get_saveasfile_path ) -from library.convert_model_gui import gradio_convert_model_tab -from easygui import filesavebox, msgbox +from easygui import msgbox folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 @@ -65,19 +68,21 @@ def save_configuration( if save_as_bool: print('Save as...') - file_path = filesavebox( - 'Select the config file to save', - default='finetune.json', - filetypes='*.json', - ) + # file_path = filesavebox( + # 'Select the config file to save', + # default='finetune.json', + # filetypes='*.json', + # ) + file_path = get_saveasfile_path(file_path) else: print('Save...') if file_path == None or file_path == '': - file_path = filesavebox( - 'Select the config file to save', - default='finetune.json', - filetypes='*.json', - ) + # file_path = filesavebox( + # 'Select the config file to save', + # default='finetune.json', + # filetypes='*.json', + # ) + file_path = get_saveasfile_path(file_path) if file_path == None: return original_file_path # In case a file_path was provided and the user decide to cancel the open action @@ -455,261 +460,258 @@ interface = gr.Blocks(css=css) with interface: dummy_true = gr.Label(value=True, visible=False) dummy_false = gr.Label(value=False, visible=False) - gr.Markdown('Enter kohya finetuner parameter using this interface.') - with gr.Accordion('Configuration File Load/Save', open=False): - with gr.Row(): - button_open_config = gr.Button('Open 📂', elem_id='open_folder') - button_save_config = gr.Button('Save 💾', elem_id='open_folder') - button_save_as_config = gr.Button( - 'Save as... 💾', elem_id='open_folder' + with gr.Tab('Dreambooth'): + gr.Markdown('Enter kohya finetuner parameter using this interface.') + with gr.Accordion('Configuration File Load/Save', open=False): + with gr.Row(): + button_open_config = gr.Button('Open 📂', elem_id='open_folder') + button_save_config = gr.Button('Save 💾', elem_id='open_folder') + button_save_as_config = gr.Button( + 'Save as... 💾', elem_id='open_folder' + ) + config_file_name = gr.Textbox( + label='', + placeholder="type the configuration file path or use the 'Open' button above to select it...", ) - config_file_name = gr.Textbox( - label='', - placeholder="type the configuration file path or use the 'Open' button above to select it...", - ) - config_file_name.change( - remove_doublequote, - inputs=[config_file_name], - outputs=[config_file_name], - ) - with gr.Tab('Source model'): - # Define the input elements - with gr.Row(): - pretrained_model_name_or_path_input = gr.Textbox( - label='Pretrained model name or path', - placeholder='enter the path to custom model or name of pretrained model', + config_file_name.change( + remove_doublequote, + inputs=[config_file_name], + outputs=[config_file_name], ) - pretrained_model_name_or_path_fille = gr.Button( - document_symbol, elem_id='open_folder_small' + with gr.Tab('Source model'): + # Define the input elements + with gr.Row(): + pretrained_model_name_or_path_input = gr.Textbox( + label='Pretrained model name or path', + placeholder='enter the path to custom model or name of pretrained model', + ) + pretrained_model_name_or_path_fille = gr.Button( + document_symbol, elem_id='open_folder_small' + ) + pretrained_model_name_or_path_fille.click( + get_file_path, inputs=[pretrained_model_name_or_path_input], outputs=pretrained_model_name_or_path_input + ) + pretrained_model_name_or_path_folder = gr.Button( + folder_symbol, elem_id='open_folder_small' + ) + pretrained_model_name_or_path_folder.click( + get_folder_path, outputs=pretrained_model_name_or_path_input + ) + model_list = gr.Dropdown( + label='(Optional) Model Quick Pick', + choices=[ + 'custom', + 'stabilityai/stable-diffusion-2-1-base', + 'stabilityai/stable-diffusion-2-base', + 'stabilityai/stable-diffusion-2-1', + 'stabilityai/stable-diffusion-2', + 'runwayml/stable-diffusion-v1-5', + 'CompVis/stable-diffusion-v1-4', + ], + ) + save_model_as_dropdown = gr.Dropdown( + label='Save trained model as', + choices=[ + 'same as source model', + 'ckpt', + 'diffusers', + "diffusers_safetensors", + 'safetensors', + ], + value='same as source model' + ) + with gr.Row(): + v2_input = gr.Checkbox(label='v2', value=True) + v_parameterization_input = gr.Checkbox( + label='v_parameterization', value=False + ) + pretrained_model_name_or_path_input.change( + remove_doublequote, + inputs=[pretrained_model_name_or_path_input], + outputs=[pretrained_model_name_or_path_input], ) - pretrained_model_name_or_path_fille.click( - get_file_path, inputs=[pretrained_model_name_or_path_input], outputs=pretrained_model_name_or_path_input - ) - pretrained_model_name_or_path_folder = gr.Button( - folder_symbol, elem_id='open_folder_small' - ) - pretrained_model_name_or_path_folder.click( - get_folder_path, outputs=pretrained_model_name_or_path_input - ) - model_list = gr.Dropdown( - label='(Optional) Model Quick Pick', - choices=[ - 'custom', - 'stabilityai/stable-diffusion-2-1-base', - 'stabilityai/stable-diffusion-2-base', - 'stabilityai/stable-diffusion-2-1', - 'stabilityai/stable-diffusion-2', - 'runwayml/stable-diffusion-v1-5', - 'CompVis/stable-diffusion-v1-4', + model_list.change( + set_pretrained_model_name_or_path_input, + inputs=[model_list, v2_input, v_parameterization_input], + outputs=[ + pretrained_model_name_or_path_input, + v2_input, + v_parameterization_input, ], ) - save_model_as_dropdown = gr.Dropdown( - label='Save trained model as', - choices=[ - 'same as source model', - 'ckpt', - 'diffusers', - "diffusers_safetensors", - 'safetensors', - ], - value='same as source model' - ) - with gr.Row(): - v2_input = gr.Checkbox(label='v2', value=True) - v_parameterization_input = gr.Checkbox( - label='v_parameterization', value=False - ) - pretrained_model_name_or_path_input.change( - remove_doublequote, - inputs=[pretrained_model_name_or_path_input], - outputs=[pretrained_model_name_or_path_input], - ) - model_list.change( - set_pretrained_model_name_or_path_input, - inputs=[model_list, v2_input, v_parameterization_input], - outputs=[ - pretrained_model_name_or_path_input, - v2_input, - v_parameterization_input, - ], - ) - - with gr.Tab('Directories'): - with gr.Row(): - train_data_dir_input = gr.Textbox( - label='Image folder', - placeholder='Folder where the training folders containing the images are located', - ) - train_data_dir_input_folder = gr.Button( - '📂', elem_id='open_folder_small' - ) - train_data_dir_input_folder.click( - get_folder_path, outputs=train_data_dir_input - ) - reg_data_dir_input = gr.Textbox( - label='Regularisation folder', - placeholder='(Optional) Folder where where the regularization folders containing the images are located', - ) - reg_data_dir_input_folder = gr.Button( - '📂', elem_id='open_folder_small' - ) - reg_data_dir_input_folder.click( - get_folder_path, outputs=reg_data_dir_input - ) - with gr.Row(): - output_dir_input = gr.Textbox( - label='Output folder', - placeholder='Folder to output trained model', - ) - output_dir_input_folder = gr.Button( - '📂', elem_id='open_folder_small' - ) - output_dir_input_folder.click( - get_folder_path, outputs=output_dir_input - ) - logging_dir_input = gr.Textbox( - label='Logging folder', - placeholder='Optional: enable logging and output TensorBoard log to this folder', - ) - logging_dir_input_folder = gr.Button( - '📂', elem_id='open_folder_small' - ) - logging_dir_input_folder.click( - get_folder_path, outputs=logging_dir_input - ) - train_data_dir_input.change( - remove_doublequote, - inputs=[train_data_dir_input], - outputs=[train_data_dir_input], - ) - reg_data_dir_input.change( - remove_doublequote, - inputs=[reg_data_dir_input], - outputs=[reg_data_dir_input], - ) - output_dir_input.change( - remove_doublequote, - inputs=[output_dir_input], - outputs=[output_dir_input], - ) - logging_dir_input.change( - remove_doublequote, - inputs=[logging_dir_input], - outputs=[logging_dir_input], - ) - with gr.Tab('Training parameters'): - with gr.Row(): - learning_rate_input = gr.Textbox(label='Learning rate', value=1e-6) - lr_scheduler_input = gr.Dropdown( - label='LR Scheduler', - choices=[ - 'constant', - 'constant_with_warmup', - 'cosine', - 'cosine_with_restarts', - 'linear', - 'polynomial', - ], - value='constant', - ) - lr_warmup_input = gr.Textbox(label='LR warmup', value=0) - with gr.Row(): - train_batch_size_input = gr.Slider( - minimum=1, - maximum=32, - label='Train batch size', - value=1, - step=1, - ) - epoch_input = gr.Textbox(label='Epoch', value=1) - save_every_n_epochs_input = gr.Textbox( - label='Save every N epochs', value=1 - ) - with gr.Row(): - mixed_precision_input = gr.Dropdown( - label='Mixed precision', - choices=[ - 'no', - 'fp16', - 'bf16', - ], - value='fp16', - ) - save_precision_input = gr.Dropdown( - label='Save precision', - choices=[ - 'float', - 'fp16', - 'bf16', - ], - value='fp16', - ) - num_cpu_threads_per_process_input = gr.Slider( - minimum=1, - maximum=os.cpu_count(), - step=1, - label='Number of CPU threads per process', - value=os.cpu_count(), - ) - with gr.Row(): - seed_input = gr.Textbox(label='Seed', value=1234) - max_resolution_input = gr.Textbox( - label='Max resolution', value='512,512', placeholder='512,512' - ) - with gr.Row(): - caption_extention_input = gr.Textbox( - label='Caption Extension', - placeholder='(Optional) Extension for caption files. default: .caption', - ) - stop_text_encoder_training_input = gr.Slider( - minimum=0, - maximum=100, - value=0, - step=1, - label='Stop text encoder training', - ) - with gr.Row(): - full_fp16_input = gr.Checkbox( - label='Full fp16 training (experimental)', value=False - ) - no_token_padding_input = gr.Checkbox( - label='No token padding', value=False - ) - gradient_checkpointing_input = gr.Checkbox( - label='Gradient checkpointing', value=False + with gr.Tab('Directories'): + with gr.Row(): + train_data_dir_input = gr.Textbox( + label='Image folder', + placeholder='Folder where the training folders containing the images are located', + ) + train_data_dir_input_folder = gr.Button( + '📂', elem_id='open_folder_small' + ) + train_data_dir_input_folder.click( + get_folder_path, outputs=train_data_dir_input + ) + reg_data_dir_input = gr.Textbox( + label='Regularisation folder', + placeholder='(Optional) Folder where where the regularization folders containing the images are located', + ) + reg_data_dir_input_folder = gr.Button( + '📂', elem_id='open_folder_small' + ) + reg_data_dir_input_folder.click( + get_folder_path, outputs=reg_data_dir_input + ) + with gr.Row(): + output_dir_input = gr.Textbox( + label='Output folder', + placeholder='Folder to output trained model', + ) + output_dir_input_folder = gr.Button( + '📂', elem_id='open_folder_small' + ) + output_dir_input_folder.click( + get_folder_path, outputs=output_dir_input + ) + logging_dir_input = gr.Textbox( + label='Logging folder', + placeholder='Optional: enable logging and output TensorBoard log to this folder', + ) + logging_dir_input_folder = gr.Button( + '📂', elem_id='open_folder_small' + ) + logging_dir_input_folder.click( + get_folder_path, outputs=logging_dir_input + ) + train_data_dir_input.change( + remove_doublequote, + inputs=[train_data_dir_input], + outputs=[train_data_dir_input], ) - with gr.Row(): - enable_bucket_input = gr.Checkbox( - label='Enable buckets', value=True + reg_data_dir_input.change( + remove_doublequote, + inputs=[reg_data_dir_input], + outputs=[reg_data_dir_input], ) - cache_latent_input = gr.Checkbox(label='Cache latent', value=True) - use_8bit_adam_input = gr.Checkbox( - label='Use 8bit adam', value=True + output_dir_input.change( + remove_doublequote, + inputs=[output_dir_input], + outputs=[output_dir_input], ) - xformers_input = gr.Checkbox(label='Use xformers', value=True) + logging_dir_input.change( + remove_doublequote, + inputs=[logging_dir_input], + outputs=[logging_dir_input], + ) + with gr.Tab('Training parameters'): + with gr.Row(): + learning_rate_input = gr.Textbox(label='Learning rate', value=1e-6) + lr_scheduler_input = gr.Dropdown( + label='LR Scheduler', + choices=[ + 'constant', + 'constant_with_warmup', + 'cosine', + 'cosine_with_restarts', + 'linear', + 'polynomial', + ], + value='constant', + ) + lr_warmup_input = gr.Textbox(label='LR warmup', value=0) + with gr.Row(): + train_batch_size_input = gr.Slider( + minimum=1, + maximum=32, + label='Train batch size', + value=1, + step=1, + ) + epoch_input = gr.Textbox(label='Epoch', value=1) + save_every_n_epochs_input = gr.Textbox( + label='Save every N epochs', value=1 + ) + with gr.Row(): + mixed_precision_input = gr.Dropdown( + label='Mixed precision', + choices=[ + 'no', + 'fp16', + 'bf16', + ], + value='fp16', + ) + save_precision_input = gr.Dropdown( + label='Save precision', + choices=[ + 'float', + 'fp16', + 'bf16', + ], + value='fp16', + ) + num_cpu_threads_per_process_input = gr.Slider( + minimum=1, + maximum=os.cpu_count(), + step=1, + label='Number of CPU threads per process', + value=os.cpu_count(), + ) + with gr.Row(): + seed_input = gr.Textbox(label='Seed', value=1234) + max_resolution_input = gr.Textbox( + label='Max resolution', value='512,512', placeholder='512,512' + ) + with gr.Row(): + caption_extention_input = gr.Textbox( + label='Caption Extension', + placeholder='(Optional) Extension for caption files. default: .caption', + ) + stop_text_encoder_training_input = gr.Slider( + minimum=0, + maximum=100, + value=0, + step=1, + label='Stop text encoder training', + ) + with gr.Row(): + full_fp16_input = gr.Checkbox( + label='Full fp16 training (experimental)', value=False + ) + no_token_padding_input = gr.Checkbox( + label='No token padding', value=False + ) + + gradient_checkpointing_input = gr.Checkbox( + label='Gradient checkpointing', value=False + ) + with gr.Row(): + enable_bucket_input = gr.Checkbox( + label='Enable buckets', value=True + ) + cache_latent_input = gr.Checkbox(label='Cache latent', value=True) + use_8bit_adam_input = gr.Checkbox( + label='Use 8bit adam', value=True + ) + xformers_input = gr.Checkbox(label='Use xformers', value=True) + + button_run = gr.Button('Train model') with gr.Tab('Utilities'): - # Dreambooth folder creation tab + with gr.Tab('Captioning'): + gradio_basic_caption_gui_tab() + gradio_blip_caption_gui_tab() + gradio_wd14_caption_gui_tab() gradio_dreambooth_folder_creation_tab( train_data_dir_input, reg_data_dir_input, output_dir_input, logging_dir_input, ) - # Captionning tab - gradio_caption_gui_tab() gradio_dataset_balancing_tab() gradio_convert_model_tab() - # with gr.Tab('Model conversion'): - # convert_to_safetensors_input = gr.Checkbox( - # label='Convert to SafeTensors', value=True - # ) - # convert_to_ckpt_input = gr.Checkbox( - # label='Convert to CKPT', value=False - # ) - button_run = gr.Button('Train model') + button_open_config.click( open_configuration, diff --git a/library/caption_gui.py b/library/basic_caption_gui.py similarity index 91% rename from library/caption_gui.py rename to library/basic_caption_gui.py index de3808d..377a4b3 100644 --- a/library/caption_gui.py +++ b/library/basic_caption_gui.py @@ -41,10 +41,10 @@ def caption_images( ### -def gradio_caption_gui_tab(): - with gr.Tab('Captioning'): +def gradio_basic_caption_gui_tab(): + with gr.Tab('Basic Captioning'): gr.Markdown( - 'This utility will allow the creation of caption files for each images in a folder.' + 'This utility will allow the creation of simple caption files for each images in a folder.' ) with gr.Row(): caption_text_input = gr.Textbox( @@ -64,7 +64,7 @@ def gradio_caption_gui_tab(): ) with gr.Row(): images_dir_input = gr.Textbox( - label='Image forder to caption', + label='Image folder to caption', placeholder='Directory containing the images to caption', interactive=True, ) diff --git a/library/blip_caption_gui.py b/library/blip_caption_gui.py new file mode 100644 index 0000000..fe33ca7 --- /dev/null +++ b/library/blip_caption_gui.py @@ -0,0 +1,111 @@ +import gradio as gr +from easygui import msgbox +import subprocess +from .common_gui import get_folder_path + + +def caption_images( + train_data_dir, + caption_file_ext, + batch_size, + num_beams, + top_p, + max_length, + min_length, + beam_search, +): + # Check for caption_text_input + # if caption_text_input == "": + # msgbox("Caption text is missing...") + # return + + # Check for images_dir_input + if train_data_dir == '': + msgbox('Image folder is missing...') + return + + print(f'Captioning files in {train_data_dir}...') + run_cmd = f'.\\venv\\Scripts\\python.exe "./BLIP_caption/make_captions.py"' + run_cmd += f' --batch_size="{int(batch_size)}"' + run_cmd += f' --num_beams="{int(num_beams)}"' + run_cmd += f' --top_p="{top_p}"' + run_cmd += f' --max_length="{int(max_length)}"' + run_cmd += f' --min_length="{int(min_length)}"' + if beam_search: + run_cmd += f' --beam_search' + if caption_file_ext != '': + run_cmd += f' --caption_extension="{caption_file_ext}"' + run_cmd += f' "{train_data_dir}"' + run_cmd += f' "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth"' + + print(run_cmd) + + # Run the command + subprocess.run(run_cmd) + + print('...captioning done') + + +### +# Gradio UI +### + + +def gradio_blip_caption_gui_tab(): + with gr.Tab('BLIP Captioning'): + gr.Markdown( + 'This utility will use BLIP to caption files for each images in a folder.' + ) + with gr.Row(): + train_data_dir = gr.Textbox( + label='Image folder to caption', + placeholder='Directory containing the images to caption', + interactive=True, + ) + button_train_data_dir_input = gr.Button( + '📂', elem_id='open_folder_small' + ) + button_train_data_dir_input.click( + get_folder_path, outputs=train_data_dir + ) + + caption_file_ext = gr.Textbox( + label='Caption file extension', + placeholder='(Optional) Default: .caption', + interactive=True, + ) + + batch_size = gr.Number( + value=1, label='Batch size', interactive=True + ) + + with gr.Row(): + beam_search = gr.Checkbox( + label='Use beam search', interactive=True, value=True + ) + num_beams = gr.Number( + value=1, label='Number of beams', interactive=True + ) + top_p = gr.Number(value=0.9, label='Top p', interactive=True) + max_length = gr.Number( + value=75, label='Max length', interactive=True + ) + min_length = gr.Number( + value=5, label='Min length', interactive=True + ) + + caption_button = gr.Button('Caption images') + + caption_button.click( + caption_images, + inputs=[ + train_data_dir, + caption_file_ext, + batch_size, + num_beams, + top_p, + max_length, + min_length, + beam_search, + ], + ) diff --git a/library/common_gui.py b/library/common_gui.py index 9c90417..7e34057 100644 --- a/library/common_gui.py +++ b/library/common_gui.py @@ -1,16 +1,17 @@ -from easygui import diropenbox, fileopenbox +from tkinter import filedialog, Tk - -def get_folder_path(): - folder_path = diropenbox('Select the directory to use') - - return folder_path - - -def get_file_path(file_path): - file_path = fileopenbox( - 'Select the config file to load', default=file_path, filetypes='*.json', - ) +def get_file_path(file_path='', defaultextension='.json'): + current_file_path = file_path + # print(f'current file path: {current_file_path}') + + root = Tk() + root.wm_attributes('-topmost', 1) + root.withdraw() + file_path = filedialog.askopenfilename(filetypes = (("Config files", "*.json"), ("All files", "*")), defaultextension=defaultextension) + root.destroy() + + if file_path == '': + file_path = current_file_path return file_path @@ -20,3 +21,34 @@ def remove_doublequote(file_path): file_path = file_path.replace('"', '') return file_path + + +def get_folder_path(folder_path=''): + current_folder_path = folder_path + + root = Tk() + root.wm_attributes('-topmost', 1) + root.withdraw() + folder_path = filedialog.askdirectory() + root.destroy() + + if folder_path == '': + folder_path = current_folder_path + + return folder_path + +def get_saveasfile_path(file_path='', defaultextension='.json'): + current_file_path = file_path + # print(f'current file path: {current_file_path}') + + root = Tk() + root.wm_attributes('-topmost', 1) + root.withdraw() + file_path = filedialog.asksaveasfile(filetypes = (("Config files", "*.json"), ("All files", "*")), defaultextension=defaultextension) + root.destroy() + + file_path = file_path.name + if file_path == '': + file_path = current_file_path + + return file_path \ No newline at end of file diff --git a/library/convert_model_gui.py b/library/convert_model_gui.py index fed761e..8f6751a 100644 --- a/library/convert_model_gui.py +++ b/library/convert_model_gui.py @@ -50,12 +50,15 @@ def convert_model(source_model_input, source_model_type, target_model_folder_inp if not target_save_precision_type == 'unspecified': run_cmd += f' --{target_save_precision_type}' - if target_model_type == "diffuser": + if target_model_type == "diffuser" or target_model_type == "diffuser_safetensors": run_cmd += f' --reference_model="{source_model_type}"' + if target_model_type == 'diffuser_safetensors': + run_cmd += ' --use_safetensors' + run_cmd += f' "{source_model_input}"' - if target_model_type == "diffuser": + if target_model_type == "diffuser" or target_model_type == "diffuser_safetensors": target_model_path = os.path.join(target_model_folder_input, target_model_name_input) run_cmd += f' "{target_model_path}"' else: @@ -67,7 +70,7 @@ def convert_model(source_model_input, source_model_type, target_model_folder_inp # Run the command subprocess.run(run_cmd) - if not target_model_type == "diffuser": + if not target_model_type == "diffuser" or target_model_type == "diffuser_safetensors": v2_models = ['stabilityai/stable-diffusion-2-1-base', 'stabilityai/stable-diffusion-2-base',] @@ -171,6 +174,7 @@ def gradio_convert_model_tab(): ) target_model_type = gr.Dropdown(label="Target model type", choices=[ 'diffuser', + 'diffuser_safetensors', 'ckpt', 'safetensors', ],) diff --git a/library/wd14_caption_gui.py b/library/wd14_caption_gui.py new file mode 100644 index 0000000..b575ec2 --- /dev/null +++ b/library/wd14_caption_gui.py @@ -0,0 +1,73 @@ +import gradio as gr +from easygui import msgbox +import subprocess +from .common_gui import get_folder_path + + +def caption_images(train_data_dir, caption_extension, batch_size, thresh): + # Check for caption_text_input + # if caption_text_input == "": + # msgbox("Caption text is missing...") + # return + + # Check for images_dir_input + if train_data_dir == '': + msgbox('Image folder is missing...') + return + + print(f'Captioning files in {train_data_dir}...') + run_cmd = f'accelerate launch "./script/tag_images_by_wd14_tagger.py"' + run_cmd += f' --batch_size="{int(batch_size)}"' + run_cmd += f' --thresh="{thresh}"' + if caption_extension != '': + run_cmd += f' --caption_extension="{caption_extension}"' + run_cmd += f' "{train_data_dir}"' + + print(run_cmd) + + # Run the command + subprocess.run(run_cmd) + + print('...captioning done') + + +### +# Gradio UI +### + + +def gradio_wd14_caption_gui_tab(): + with gr.Tab('WD14 Captioning'): + gr.Markdown( + 'This utility will use WD14 to caption files for each images in a folder.' + ) + with gr.Row(): + train_data_dir = gr.Textbox( + label='Image folder to caption', + placeholder='Directory containing the images to caption', + interactive=True, + ) + button_train_data_dir_input = gr.Button( + '📂', elem_id='open_folder_small' + ) + button_train_data_dir_input.click( + get_folder_path, outputs=train_data_dir + ) + + caption_extension = gr.Textbox( + label='Caption file extension', + placeholder='(Optional) Default: .caption', + interactive=True, + ) + thresh = gr.Number(value=0.35, label='Threshold') + + batch_size = gr.Number( + value=1, label='Batch size', interactive=True + ) + + caption_button = gr.Button('Caption images') + + caption_button.click( + caption_images, + inputs=[train_data_dir, caption_extension, batch_size, thresh], + ) diff --git a/mytraining.ps b/mytraining.ps new file mode 100644 index 0000000..296d96c --- /dev/null +++ b/mytraining.ps @@ -0,0 +1,609 @@ +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned.ckpt" ` + --train_data_dir="D:\dreambooth\train_bernard\train_man" ` + --reg_data_dir="D:\dreambooth\train_bernard\reg_man" ` + --output_dir="D:\dreambooth\train_bernard" ` + --prior_loss_weight=1.0 ` + --resolution="512,512" ` + --train_batch_size=1 ` + --learning_rate=1e-6 ` + --max_train_steps=3000 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --gradient_checkpointing ` + --save_every_n_epochs=1 + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6.py ` + --pretrained_model_name_or_path="D:\models\bernard\asd man-3000-remgb-sd15.ckpt" ` + --train_data_dir="D:\dreambooth\train_bernard\train_man" ` + --reg_data_dir="D:\dreambooth\train_bernard\reg_man" ` + --output_dir="D:\dreambooth\train_bernard" ` + --prior_loss_weight=1.0 ` + --resolution="512,512" ` + --train_batch_size=1 ` + --learning_rate=1e-6 ` + --max_train_steps=1500 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --gradient_checkpointing ` + --save_every_n_epochs=1 + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\train_bernard\train_man" ` + --reg_data_dir="D:\dreambooth\train_bernard\reg_man" ` + --output_dir="D:\dreambooth\train_bernard" ` + --prior_loss_weight=1.0 ` + --resolution="512,512" ` + --train_batch_size=1 ` + --learning_rate=1e-6 ` + --max_train_steps=4500 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --gradient_checkpointing ` + --no_token_padding ` + --save_every_n_epochs=1 + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\source\alex\train" ` + --output_dir="D:\dreambooth\train_alex" ` + --prior_loss_weight=1.0 ` + --resolution="448,640" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=4500 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --shuffle_caption + +# -fine_tuning + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\source\alex\train\50_portrait-pp" ` + --output_dir="D:\dreambooth\train_alex" ` + --resolution="448,640" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=4500 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --shuffle_caption + +Resume: + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\source\alet_et_bernard\landscape-pp" ` + --output_dir="D:\dreambooth\train_alex_and_bernard" ` + --resolution="640,448" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=550 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=200 ` + --seed=23 ` + --save_half + +# Mollie Monger + +e1: + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\train_mollie_monger\landscape-pp" ` + --output_dir="D:\dreambooth\train_mollie_monger\output" ` + --resolution="640,448" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=625 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=200 ` + --seed=23 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\mollie_monger-kohya-l-200-sd15.ckpt" ` + --train_data_dir="D:\dreambooth\train_mollie_monger\portrait-pp" ` + --output_dir="D:\dreambooth\train_mollie_monger\output" ` + --resolution="448,640" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=1275 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=200 ` + --seed=23 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\mollie_monger-kohya-l+p-200-sd15.ckpt" ` + --train_data_dir="D:\dreambooth\train_mollie_monger\square-pp" ` + --output_dir="D:\dreambooth\train_mollie_monger\output" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=500 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=200 ` + --seed=23 ` + --save_half + +e2: + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\mollie_monger\mollie_monger-kohya-l+p+s-r200-e1-sd15.ckpt" ` + --train_data_dir="D:\dreambooth\train_mollie_monger\landscape-pp" ` + --output_dir="D:\dreambooth\train_mollie_monger\output" ` + --resolution="640,448" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=625 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=200 ` + --seed=23 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\mollie_monger\last.ckpt" ` + --train_data_dir="D:\dreambooth\train_mollie_monger\portrait-pp" ` + --output_dir="D:\dreambooth\train_mollie_monger\output" ` + --resolution="448,640" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=1275 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=200 ` + --seed=23 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\mollie_monger\last.ckpt" ` + --train_data_dir="D:\dreambooth\train_mollie_monger\square-pp" ` + --output_dir="D:\dreambooth\train_mollie_monger\output" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=500 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=200 ` + --seed=23 ` + --save_half + + + Midjourney images download: + + https://storage.googleapis.com/dream-machines-output/2932e6e4-ddef-410e-947b-2a6275e31f35/0_3.png + + # Midjourney + + accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\train_midjourney_v4\all data" ` + --output_dir="D:\dreambooth\train_midjourney_v4\model" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=528 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=12 ` + --seed=23 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\midjourney_v4-khoya-r100-e1-sd15.ckpt" ` + --train_data_dir="D:\dreambooth\train_midjourney_v4\data2" ` + --output_dir="D:\dreambooth\train_midjourney_v4\model" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=850 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=100 ` + --seed=23 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\models\midjourney_v4_finetune\epoch-000001.ckpt" ` + --train_data_dir="D:\dreambooth\train_midjourney_v4\newdata3" ` + --output_dir="D:\dreambooth\train_midjourney_v4\model" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=159 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=24 ` + --seed=23 ` + --save_half + +# train n + + # Midjourney + + accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v6-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\train_childrens_drawings\model\last2.ckpt" ` + --train_data_dir="D:\dreambooth\train_childrens_drawings\data2-pp" ` + --output_dir="D:\dreambooth\train_childrens_drawings\model" ` + --resolution="704,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=312 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --fine_tuning_repeat=48 ` + --seed=23 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\train_childrens_drawings\model\last2.ckpt" ` + --train_data_dir="D:\dreambooth\train_childrens_drawings\data2-pp" ` + --output_dir="D:\dreambooth\train_childrens_drawings\model" ` + --resolution="704,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=312 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=48 ` + --seed=23 ` + --save_half + +# twq + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\source\bernardv2-ft" ` + --output_dir="D:\dreambooth\train_bernard\model" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=720 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=48 ` + --save_half + +# the white queen + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\landscape-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l" ` + --resolution="704,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=520 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=40 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\portrait-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p" ` + --resolution="512,704" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=260 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=40 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l+p\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\square-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p+s" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=220 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=40 ` + --seed=23 ` + --save_half + +# the white queen slow progress init phase + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\landscape-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l" ` + --resolution="704,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=260 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=80 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\portrait-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p" ` + --resolution="512,704" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=130 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=80 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l+p\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\square-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p+s" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=90 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=80 ` + --seed=23 ` + --save_half + +# the white queen slow progress extra steps phase + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l+p+s\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\landscape-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l" ` + --resolution="704,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=130 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=40 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\portrait-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p" ` + --resolution="512,704" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=65 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=40 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l+p\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\square-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p+s" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=45 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=40 ` + --seed=23 ` + --save_half + +# the queen of heart init phase + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\models\v1-5-pruned-mse-vae.ckpt" ` + --train_data_dir="D:\dreambooth\train_qoh\landscape-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l" ` + --resolution="704,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=260 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=80 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\portrait-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p" ` + --resolution="512,704" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=130 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=80 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l+p\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\square-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p+s" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=90 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=80 ` + --seed=23 ` + --save_half + +# the white queen slow progress extra steps phase + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l+p+s\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\landscape-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l" ` + --resolution="704,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=130 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=40 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\portrait-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p" ` + --resolution="512,704" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=65 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=40 ` + --save_half + +accelerate launch --num_cpu_threads_per_process 6 train_db_fixed_v7-ber.py ` + --pretrained_model_name_or_path="D:\dreambooth\training_twq\the_white_queen\model+l+p\last.ckpt" ` + --train_data_dir="D:\dreambooth\training_twq\the_white_queen\square-ft" ` + --output_dir="D:\dreambooth\training_twq\the_white_queen\model+l+p+s" ` + --resolution="512,512" ` + --train_batch_size=8 ` + --learning_rate=1e-6 ` + --max_train_steps=45 ` + --use_8bit_adam ` + --xformers ` + --mixed_precision="fp16" ` + --cache_latents ` + --save_every_n_epochs=1 ` + --fine_tuning ` + --dataset_repeats=40 ` + --seed=23 ` + --save_half \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index dde1116..eae6693 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,13 @@ safetensors==0.2.6 gradio altair easygui +tkinter +# for BLIP captioning +requests +timm +fairscale +# for WD14 captioning +tensorflow<2.11 +huggingface-hub +# for kohya_ss library . \ No newline at end of file diff --git a/tools/convert_diffusers20_original_sd.py b/tools/convert_diffusers20_original_sd.py index 652b196..d7f2c1e 100644 --- a/tools/convert_diffusers20_original_sd.py +++ b/tools/convert_diffusers20_original_sd.py @@ -1,12 +1,17 @@ # convert Diffusers v1.x/v2.0 model to original Stable Diffusion # v1: initial version +# v2: support safetensors +# v3: fix to support another format +# v4: support safetensors in Diffusers import argparse import os import torch from diffusers import StableDiffusionPipeline + from library import model_util as model_util + def convert(args): # 引数を確認する load_dtype = torch.float16 if args.fp16 else None @@ -56,7 +61,7 @@ def convert(args): print(f"model saved. total converted state_dict keys: {key_count}") else: print(f"copy scheduler/tokenizer config from: {args.reference_model}") - model_util.save_diffusers_checkpoint(v2_model, args.model_to_save, text_encoder, unet, args.reference_model, vae) + model_util.save_diffusers_checkpoint(v2_model, args.model_to_save, text_encoder, unet, args.reference_model, vae, args.use_safetensors) print(f"model saved.") @@ -76,6 +81,8 @@ if __name__ == '__main__': help='global_step to write to checkpoint / checkpointに記録するglobal_stepの値') parser.add_argument("--reference_model", type=str, default=None, help="reference model for schduler/tokenizer, required in saving Diffusers, copy schduler/tokenizer from this / scheduler/tokenizerのコピー元のDiffusersモデル、Diffusers形式で保存するときに必要") + parser.add_argument("--use_safetensors", action='store_true', + help="use safetensors format to save Diffusers model (checkpoint depends on the file extension) / Duffusersモデルをsafetensors形式で保存する(checkpointは拡張子で自動判定)") parser.add_argument("model_to_load", type=str, default=None, help="model to load: checkpoint file or Diffusers model's directory / 読み込むモデル、checkpointかDiffusers形式モデルのディレクトリ")