infer styles from prompts, and an option to control the behavior
This commit is contained in:
parent
30bbb8bce3
commit
f98f4f73aa
@ -265,6 +265,14 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
|
|||||||
else:
|
else:
|
||||||
prompt += ("" if prompt == "" else "\n") + line
|
prompt += ("" if prompt == "" else "\n") + line
|
||||||
|
|
||||||
|
if shared.opts.infotext_styles != "Ignore":
|
||||||
|
found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt)
|
||||||
|
|
||||||
|
if shared.opts.infotext_styles == "Apply":
|
||||||
|
res["Styles array"] = found_styles
|
||||||
|
elif shared.opts.infotext_styles == "Apply if any" and found_styles:
|
||||||
|
res["Styles array"] = found_styles
|
||||||
|
|
||||||
res["Prompt"] = prompt
|
res["Prompt"] = prompt
|
||||||
res["Negative prompt"] = negative_prompt
|
res["Negative prompt"] = negative_prompt
|
||||||
|
|
||||||
|
@ -260,6 +260,10 @@ class OptionInfo:
|
|||||||
self.comment_after += f"<span class='info'>({info})</span>"
|
self.comment_after += f"<span class='info'>({info})</span>"
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def html(self, html):
|
||||||
|
self.comment_after += html
|
||||||
|
return self
|
||||||
|
|
||||||
def needs_restart(self):
|
def needs_restart(self):
|
||||||
self.comment_after += " <span class='info'>(requires restart)</span>"
|
self.comment_after += " <span class='info'>(requires restart)</span>"
|
||||||
return self
|
return self
|
||||||
@ -488,7 +492,14 @@ options_templates.update(options_section(('infotext', "Infotext"), {
|
|||||||
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
|
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
|
||||||
"add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
|
"add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
|
||||||
"add_version_to_infotext": OptionInfo(True, "Add program version to generation information"),
|
"add_version_to_infotext": OptionInfo(True, "Add program version to generation information"),
|
||||||
"disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
|
"disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"),
|
||||||
|
"infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""<ul style='margin-left: 1.5em'>
|
||||||
|
<li>Ignore: keep prompt and styles dropdown as it is.</li>
|
||||||
|
<li>Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).</li>
|
||||||
|
<li>Discard: remove style text from prompt, keep styles dropdown as it is.</li>
|
||||||
|
<li>Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.</li>
|
||||||
|
</ul>"""),
|
||||||
|
|
||||||
}))
|
}))
|
||||||
|
|
||||||
options_templates.update(options_section(('ui', "Live previews"), {
|
options_templates.update(options_section(('ui', "Live previews"), {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import csv
|
import csv
|
||||||
import os
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
|
import re
|
||||||
import typing
|
import typing
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
@ -28,6 +29,44 @@ def apply_styles_to_prompt(prompt, styles):
|
|||||||
return prompt
|
return prompt
|
||||||
|
|
||||||
|
|
||||||
|
re_spaces = re.compile(" +")
|
||||||
|
|
||||||
|
|
||||||
|
def extract_style_text_from_prompt(style_text, prompt):
|
||||||
|
stripped_prompt = re.sub(re_spaces, " ", prompt.strip())
|
||||||
|
stripped_style_text = re.sub(re_spaces, " ", style_text.strip())
|
||||||
|
if "{prompt}" in stripped_style_text:
|
||||||
|
left, right = stripped_style_text.split("{prompt}", 2)
|
||||||
|
if stripped_prompt.startswith(left) and stripped_prompt.endswith(right):
|
||||||
|
prompt = stripped_prompt[len(left):len(stripped_prompt)-len(right)]
|
||||||
|
return True, prompt
|
||||||
|
else:
|
||||||
|
if stripped_prompt.endswith(stripped_style_text):
|
||||||
|
prompt = stripped_prompt[:len(stripped_prompt)-len(stripped_style_text)]
|
||||||
|
|
||||||
|
if prompt.endswith(', '):
|
||||||
|
prompt = prompt[:-2]
|
||||||
|
|
||||||
|
return True, prompt
|
||||||
|
|
||||||
|
return False, prompt
|
||||||
|
|
||||||
|
|
||||||
|
def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt):
|
||||||
|
if not style.prompt and not style.negative_prompt:
|
||||||
|
return False, prompt, negative_prompt
|
||||||
|
|
||||||
|
match_positive, extracted_positive = extract_style_text_from_prompt(style.prompt, prompt)
|
||||||
|
if not match_positive:
|
||||||
|
return False, prompt, negative_prompt
|
||||||
|
|
||||||
|
match_negative, extracted_negative = extract_style_text_from_prompt(style.negative_prompt, negative_prompt)
|
||||||
|
if not match_negative:
|
||||||
|
return False, prompt, negative_prompt
|
||||||
|
|
||||||
|
return True, extracted_positive, extracted_negative
|
||||||
|
|
||||||
|
|
||||||
class StyleDatabase:
|
class StyleDatabase:
|
||||||
def __init__(self, path: str):
|
def __init__(self, path: str):
|
||||||
self.no_style = PromptStyle("None", "", "")
|
self.no_style = PromptStyle("None", "", "")
|
||||||
@ -67,10 +106,34 @@ class StyleDatabase:
|
|||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
shutil.copy(path, f"{path}.bak")
|
shutil.copy(path, f"{path}.bak")
|
||||||
|
|
||||||
fd = os.open(path, os.O_RDWR|os.O_CREAT)
|
fd = os.open(path, os.O_RDWR | os.O_CREAT)
|
||||||
with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file:
|
with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file:
|
||||||
# _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple,
|
# _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple,
|
||||||
# and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict()
|
# and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict()
|
||||||
writer = csv.DictWriter(file, fieldnames=PromptStyle._fields)
|
writer = csv.DictWriter(file, fieldnames=PromptStyle._fields)
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
writer.writerows(style._asdict() for k, style in self.styles.items())
|
writer.writerows(style._asdict() for k, style in self.styles.items())
|
||||||
|
|
||||||
|
def extract_styles_from_prompt(self, prompt, negative_prompt):
|
||||||
|
extracted = []
|
||||||
|
|
||||||
|
applicable_styles = list(self.styles.values())
|
||||||
|
|
||||||
|
while True:
|
||||||
|
found_style = None
|
||||||
|
|
||||||
|
for style in applicable_styles:
|
||||||
|
is_match, new_prompt, new_neg_prompt = extract_style_from_prompts(style, prompt, negative_prompt)
|
||||||
|
if is_match:
|
||||||
|
found_style = style
|
||||||
|
prompt = new_prompt
|
||||||
|
negative_prompt = new_neg_prompt
|
||||||
|
break
|
||||||
|
|
||||||
|
if not found_style:
|
||||||
|
break
|
||||||
|
|
||||||
|
applicable_styles.remove(found_style)
|
||||||
|
extracted.append(found_style.name)
|
||||||
|
|
||||||
|
return list(reversed(extracted)), prompt, negative_prompt
|
||||||
|
@ -621,6 +621,7 @@ def create_ui():
|
|||||||
(subseed_strength, "Variation seed strength"),
|
(subseed_strength, "Variation seed strength"),
|
||||||
(seed_resize_from_w, "Seed resize from-1"),
|
(seed_resize_from_w, "Seed resize from-1"),
|
||||||
(seed_resize_from_h, "Seed resize from-2"),
|
(seed_resize_from_h, "Seed resize from-2"),
|
||||||
|
(txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
|
||||||
(denoising_strength, "Denoising strength"),
|
(denoising_strength, "Denoising strength"),
|
||||||
(enable_hr, lambda d: "Denoising strength" in d),
|
(enable_hr, lambda d: "Denoising strength" in d),
|
||||||
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
|
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
|
||||||
@ -1036,6 +1037,7 @@ def create_ui():
|
|||||||
(subseed_strength, "Variation seed strength"),
|
(subseed_strength, "Variation seed strength"),
|
||||||
(seed_resize_from_w, "Seed resize from-1"),
|
(seed_resize_from_w, "Seed resize from-1"),
|
||||||
(seed_resize_from_h, "Seed resize from-2"),
|
(seed_resize_from_h, "Seed resize from-2"),
|
||||||
|
(img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
|
||||||
(denoising_strength, "Denoising strength"),
|
(denoising_strength, "Denoising strength"),
|
||||||
(mask_blur, "Mask blur"),
|
(mask_blur, "Mask blur"),
|
||||||
*modules.scripts.scripts_img2img.infotext_fields
|
*modules.scripts.scripts_img2img.infotext_fields
|
||||||
|
Loading…
Reference in New Issue
Block a user