WebUI/webui.py

102 lines
2.9 KiB
Python
Raw Normal View History

import os
import signal
2022-09-26 14:29:50 +00:00
import threading
2022-09-26 14:29:50 +00:00
import modules.codeformer_model as codeformer
2022-09-11 15:48:36 +00:00
import modules.extras
2022-09-26 14:29:50 +00:00
import modules.face_restoration
import modules.gfpgan_model as gfpgan
import modules.img2img
2022-09-26 14:29:50 +00:00
import modules.lowvram
import modules.paths
2022-09-26 14:29:50 +00:00
import modules.scripts
import modules.sd_hijack
import modules.sd_models
2022-09-26 14:29:50 +00:00
import modules.shared as shared
import modules.txt2img
import modules.ui
from modules import devices
2022-09-26 15:27:18 +00:00
from modules import modelloader
2022-09-26 14:29:50 +00:00
from modules.paths import script_path
from modules.shared import cmd_opts
2022-09-26 15:27:18 +00:00
modelloader.cleanup_models()
modules.sd_models.setup_model(cmd_opts.ckpt_dir)
2022-09-26 14:29:50 +00:00
codeformer.setup_model(cmd_opts.codeformer_models_path)
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
2022-09-07 09:32:28 +00:00
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
modelloader.load_upscalers()
queue_lock = threading.Lock()
2022-09-11 15:48:36 +00:00
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
2022-08-22 14:15:46 +00:00
return res
return f
def wrap_gradio_gpu_call(func):
def f(*args, **kwargs):
devices.torch_gc()
shared.state.sampling_step = 0
shared.state.job_count = -1
shared.state.job_no = 0
2022-09-25 12:45:20 +00:00
shared.state.job_timestamp = shared.state.get_job_timestamp()
2022-09-06 16:33:51 +00:00
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
shared.state.interrupted = False
with queue_lock:
res = func(*args, **kwargs)
shared.state.job = ""
shared.state.job_count = 0
devices.torch_gc()
return res
return modules.ui.wrap_gradio_call(f)
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
def webui():
2022-09-06 05:54:11 +00:00
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
2022-09-06 16:33:51 +00:00
print(f'Interrupted with signal {sig} in {frame}')
2022-09-06 05:54:11 +00:00
os._exit(0)
2022-08-22 14:15:46 +00:00
2022-09-06 05:54:11 +00:00
signal.signal(signal.SIGINT, sigint_handler)
2022-09-06 05:54:11 +00:00
demo = modules.ui.create_ui(
txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
2022-09-25 23:22:12 +00:00
run_pnginfo=modules.extras.run_pnginfo,
run_modelmerger=modules.extras.run_modelmerger
2022-09-06 05:54:11 +00:00
)
demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
2022-09-12 12:52:16 +00:00
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
)
2022-09-11 15:48:36 +00:00
if __name__ == "__main__":
webui()