【ComfyUI】python调用生图API,实现批量出图
官方给的示例:
https://github.com/comfyanonymous/ComfyUI/blob/master/script_examples/websockets_api_example.pyhttps://github.com/comfyanonymous/ComfyUI/blob/master/script_examples/websockets_api_example.pyhttps://github.com/comfyanonymous/ComfyUI/blob/master/script_examples/websockets_api_example.py
一、环境准备
1.1 comfyui环境安装参考
【本地运行AI绘画】ComfyUI的安装与使用(一)(windows+1660ti 6G显存)
1.2 调用可能需要安装的python包
对应 import websocket
pip install websocket-client
pip install json
1.3 comfyui打开开发者模式,可以直接下载任意工作流 api文件
1.3.1 在英文最新界面打开
1.3.2 中文老界面打开
1.4 加载默认工作流,下载api,看看
保存结果是基本json 与工作流相似 不要弄混了
二、 代码
2.1 指定任意模型进行生图(找到名字和在api中的位置)
局部代码
def text_to_image(prompt_text,
model_name="SD1.X/dreamshaper_8.safetensors",
seed=6,
steps=20,
width=512,
height=256,
batch_size=2,
local_save_dir='./output',
api_file='api_demo.json'
):
prompt = read_json(api_file)
prompt["3"]["inputs"]["seed"] = seed # 换一个数字不一样的图
prompt["3"]["inputs"]["steps"] =steps # 换一个数字不一样的图
prompt["4"]["inputs"]["ckpt_name"] = model_name
prompt["5"]["inputs"]["width"] = width
prompt["5"]["inputs"]["height"] = height
prompt["5"]["inputs"]["batch_size"]=batch_size
prompt["6"]["inputs"]["text"] = prompt_text
2.1.1 运行结果
2.1.2 参数解析
在comfyui的界面里确认有的模型名免得传错
打开保存的 api_demo.json,找到相应的代码改写
2.1.3 完整代码
#This is an example that uses the websockets api to know when a prompt execution is done
#Once the prompt execution is done it downloads the images using the /history endpoint
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client) # pip install websocket-client
import uuid
import json
import urllib.request
import urllib.parse
import os
server_address = "127.0.0.1:8188"
client_id = str(uuid.uuid4())
def queue_prompt(prompt):
p = {"prompt": prompt, "client_id": client_id}
data = json.dumps(p).encode('utf-8')
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
return json.loads(urllib.request.urlopen(req).read())
def get_image(filename, subfolder, folder_type):
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
url_values = urllib.parse.urlencode(data)
with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
return response.read()
def get_history(prompt_id):
with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
return json.loads(response.read())
def get_images(ws, prompt):
prompt_id = queue_prompt(prompt)['prompt_id']
output_images = {}
while True:
out = ws.recv()
if isinstance(out, str):
message = json.loads(out)
if message['type'] == 'executing':
data = message['data']
if data['node'] is None and data['prompt_id'] == prompt_id:
break #Execution is done
else:
# If you want to be able to decode the binary stream for latent previews, here is how you can do it:
# bytesIO = BytesIO(out[8:])
# preview_image = Image.open(bytesIO) # This is your preview in PIL image format, store it in a global
continue #previews are binary data
history = get_history(prompt_id)[prompt_id]
for node_id in history['outputs']:
node_output = history['outputs'][node_id]
images_output = []
if 'images' in node_output:
for image in node_output['images']:
image_data = get_image(image['filename'], image['subfolder'], image['type'])
images_output.append(image_data)
output_images[node_id] = images_output
return output_images
def read_json(api_file="controlnet_api.json"):
# print('api_file',api_file)
with open (api_file,"r",encoding="utf-8") as file_json:
prompt_text=json.load(file_json)
return prompt_text
def text_to_image(prompt_text,
model_name="SD1.X/dreamshaper_8.safetensors",
seed=6,
steps=20,
width=512,
height=256,
batch_size=2,
local_save_dir='./output',
api_file='api_demo.json'
):
prompt = read_json(api_file)
prompt["3"]["inputs"]["seed"] = seed # 换一个数字不一样的图
prompt["3"]["inputs"]["steps"] =steps # 换一个数字不一样的图
prompt["4"]["inputs"]["ckpt_name"] = model_name
prompt["5"]["inputs"]["width"] = width
prompt["5"]["inputs"]["height"] = height
prompt["5"]["inputs"]["batch_size"]=batch_size
prompt["6"]["inputs"]["text"] = prompt_text
ws = websocket.WebSocket()
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
images = get_images(ws, prompt)
os.makedirs(local_save_dir, exist_ok=True)
for node_id in images:
for i,image_data in enumerate(images[node_id]):
from PIL import Image
import io
image = Image.open(io.BytesIO(image_data))
# image.show()
save_path=f"{local_save_dir}/{prompt_text[:20]}_{i}.png"
image.save(save_path)
print(f"Saved image to {save_path}")
ws.close()
if __name__ == "__main__":
text_to_image(
prompt_text="a boy",
api_file=r'D:\deploy2\api_demo.json')