initial prototype by borrowing contracts

This commit is contained in:
arcticfaded 2022-10-17 06:58:42 +00:00 committed by AUTOMATIC1111
parent cccc5a20fc
commit 60251c9456
4 changed files with 102 additions and 31 deletions

60
modules/api/api.py Normal file
View File

@ -0,0 +1,60 @@
from modules.api.processing import StableDiffusionProcessingAPI
from modules.processing import StableDiffusionProcessingTxt2Img, process_images
import modules.shared as shared
import uvicorn
from fastapi import FastAPI, Body, APIRouter
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field, Json
import json
import io
import base64
app = FastAPI()
class TextToImageResponse(BaseModel):
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: Json
info: Json
class Api:
def __init__(self, txt2img, img2img, run_extras, run_pnginfo):
self.router = APIRouter()
app.add_api_route("/v1/txt2img", self.text2imgapi, methods=["POST"])
def text2imgapi(self, txt2imgreq: StableDiffusionProcessingAPI ):
print(txt2imgreq)
p = StableDiffusionProcessingTxt2Img(**vars(txt2imgreq))
p.sd_model = shared.sd_model
print(p)
processed = process_images(p)
b64images = []
for i in processed.images:
buffer = io.BytesIO()
i.save(buffer, format="png")
b64images.append(base64.b64encode(buffer.getvalue()))
response = {
"images": b64images,
"info": processed.js(),
"parameters": json.dumps(vars(txt2imgreq))
}
return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info))
def img2imgendoint(self):
raise NotImplementedError
def extrasendoint(self):
raise NotImplementedError
def pnginfoendoint(self):
raise NotImplementedError
def launch(self, server_name, port):
app.include_router(self.router)
uvicorn.run(app, host=server_name, port=port)

View File

@ -723,4 +723,4 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
del x del x
devices.torch_gc() devices.torch_gc()
return samples return samples

View File

@ -74,7 +74,7 @@ parser.add_argument("--disable-console-progressbars", action='store_true', help=
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None) parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the api instead of the webui")
cmd_opts = parser.parse_args() cmd_opts = parser.parse_args()
restricted_opts = [ restricted_opts = [

View File

@ -97,40 +97,51 @@ def webui():
os._exit(0) os._exit(0)
signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGINT, sigint_handler)
if cmd_opts.api:
from modules.api.api import Api
api = Api(txt2img=modules.txt2img.txt2img,
img2img=modules.img2img.img2img,
run_extras=modules.extras.run_extras,
run_pnginfo=modules.extras.run_pnginfo)
while 1: api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1",
port=cmd_opts.port if cmd_opts.port else 7861)
demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
app, local_url, share_url = demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
prevent_thread_lock=True
)
app.add_middleware(GZipMiddleware, minimum_size=1000)
else:
while 1: while 1:
time.sleep(0.5)
if getattr(demo, 'do_restart', False):
time.sleep(0.5)
demo.close()
time.sleep(0.5)
break
sd_samplers.set_samplers() demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
print('Reloading Custom Scripts') app, local_url, share_url = demo.launch(
modules.scripts.reload_scripts(os.path.join(script_path, "scripts")) share=cmd_opts.share,
print('Reloading modules: modules.ui') server_name="0.0.0.0" if cmd_opts.listen else None,
importlib.reload(modules.ui) server_port=cmd_opts.port,
print('Refreshing Model List') debug=cmd_opts.gradio_debug,
modules.sd_models.list_models() auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
print('Restarting Gradio') inbrowser=cmd_opts.autolaunch,
prevent_thread_lock=True
)
app.add_middleware(GZipMiddleware, minimum_size=1000)
while 1:
time.sleep(0.5)
if getattr(demo, 'do_restart', False):
time.sleep(0.5)
demo.close()
time.sleep(0.5)
break
sd_samplers.set_samplers()
print('Reloading Custom Scripts')
modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
print('Reloading modules: modules.ui')
importlib.reload(modules.ui)
print('Refreshing Model List')
modules.sd_models.list_models()
print('Restarting Gradio')
if __name__ == "__main__": if __name__ == "__main__":