本文档总结了从零开始部署 ComfyUI、下载模型、配置 API 工作流,并通过 Python 调用 API 自动生成图像的完整流程。
bash# 克隆仓库(使用国内镜像加速)
git clone https://gitclone.com/github.com/Comfy-Org/ComfyUI.git
# 或直接使用官方地址
git clone https://github.com/Comfy-Org/ComfyUI.git
cd ComfyUI
python -m venv venv
source venv/bin/activate
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124
pip install -r requirements.txt
bash# 限制使用 GPU 0 和 1,开启外部访问
CUDA_VISIBLE_DEVICES=0,1 python main.py --listen 0.0.0.0 --port 32794
如果出现
RuntimeError: The NVIDIA driver on your system is too old,请升级驱动到 550+ 或安装 CUDA 12.1 版本的 PyTorch。
bashexport HF_ENDPOINT=https://hf-mirror.com
# 永久生效
echo 'export HF_ENDPOINT=https://hf-mirror.com' >> ~/.bashrc
source ~/.bashrc
bashcd ~/comfyUI/ComfyUI
mkdir -p models/diffusion_models models/text_encoders models/vae
# 主模型
wget -c https://hf-mirror.com/Comfy-Org/z_image_turbo/resolve/main/split_files/diffusion_models/z_image_turbo_bf16.safetensors -P models/diffusion_models/
# 文本编码器
wget -c https://hf-mirror.com/Comfy-Org/z_image_turbo/resolve/main/split_files/text_encoders/qwen_3_4b.safetensors -P models/text_encoders/
# VAE
wget -c https://hf-mirror.com/Comfy-Org/z_image_turbo/resolve/main/split_files/vae/ae.safetensors -P models/vae/
bashmkdir -p models/loras
# 主模型
wget -c https://hf-mirror.com/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_2512_fp8_e4m3fn.safetensors -P models/diffusion_models/
# LoRA
wget -c https://hf-mirror.com/lightx2v/Qwen-Image-2512-Lightning/resolve/main/Qwen-Image-2512-Lightning-4steps-V1.0-fp32.safetensors -P models/loras/
# 文本编码器
wget -c https://hf-mirror.com/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors -P models/text_encoders/
# VAE
wget -c https://hf-mirror.com/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/vae/qwen_image_vae.safetensors -P models/vae/
bash# DreamShaper XL(单文件版)
wget -c https://hf-mirror.com/jayparmr/DreamShaper_XL1_0_Alpha2/resolve/main/DreamShaper_XL1_0_Alpha2.safetensors -P models/checkpoints/
workflow_api.json。普通 “Save” 保存的文件包含界面布局信息,不适合 API 调用。
pythonimport json
import time
import random
import requests
from urllib.parse import urljoin
def find_node_by_class(workflow, class_type):
for node_id, node in workflow.items():
if node.get("class_type") == class_type:
return node_id
return None
def queue_prompt(workflow, server_url):
resp = requests.post(urljoin(server_url, "/prompt"), json={"prompt": workflow})
resp.raise_for_status()
return resp.json()["prompt_id"]
def get_history(prompt_id, server_url, max_wait=120):
url = urljoin(server_url, f"/history/{prompt_id}")
start = time.time()
while time.time() - start < max_wait:
resp = requests.get(url)
if resp.status_code == 200:
data = resp.json()
if prompt_id in data:
return data[prompt_id]
time.sleep(1)
return None
def save_images(history, server_url, output_prefix):
outputs = history.get("outputs", {})
saved = []
for node_out in outputs.values():
for img in node_out.get("images", []):
filename = img["filename"]
img_data = requests.get(urljoin(server_url, f"/view?filename={filename}")).content
local_name = f"{output_prefix}_{filename}"
with open(local_name, "wb") as f:
f.write(img_data)
print(f"已保存: {local_name}")
saved.append(local_name)
return saved
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--prompt", required=True)
parser.add_argument("--seed", type=int, default=None)
parser.add_argument("--steps", type=int, default=None)
parser.add_argument("--width", type=int, default=None)
parser.add_argument("--height", type=int, default=None)
parser.add_argument("--server", default="http://127.0.0.1:32794")
parser.add_argument("--workflow", default="workflow_api.json")
parser.add_argument("--output", default="output")
args = parser.parse_args()
with open(args.workflow, "r") as f:
workflow = json.load(f)
# 设置提示词
text_node = find_node_by_class(workflow, "CLIPTextEncode")
if text_node:
workflow[text_node]["inputs"]["text"] = args.prompt
# 设置采样器
sampler_node = find_node_by_class(workflow, "KSampler") or find_node_by_class(workflow, "SamplerCustom")
if sampler_node:
workflow[sampler_node]["inputs"]["seed"] = args.seed or random.randint(1, 10**9)
if args.steps:
workflow[sampler_node]["inputs"]["steps"] = args.steps
# 设置尺寸
latent_node = find_node_by_class(workflow, "EmptyLatentImage") or find_node_by_class(workflow, "EmptySD3LatentImage")
if latent_node and (args.width or args.height):
if args.width:
workflow[latent_node]["inputs"]["width"] = args.width
if args.height:
workflow[latent_node]["inputs"]["height"] = args.height
prompt_id = queue_prompt(workflow, args.server)
print(f"任务 ID: {prompt_id}")
history = get_history(prompt_id, args.server)
if history:
save_images(history, args.server, args.output)
else:
print("超时或生成失败")
if __name__ == "__main__":
main()
bash# 基本生成
python comfyui_api.py -p "a cute cat" --server http://111.2.199.31:32794 --workflow 文生图.json
# 完整参数
python comfyui_api.py \
-p "Traditional Chinese landscape, misty mountains" \
--seed 88888888 \
--steps 8 \
--width 1024 --height 1024 \
--output landscape \
--server http://111.2.199.31:32794
bashcd ~/comfyUI/ComfyUI/custom_nodes
git clone https://github.com/ltdrdata/ComfyUI-Manager.git
重启 ComfyUI 时添加 --enable-manager 参数。
若遇到 security_level must be normal or below,修改配置文件:
bashnano ~/comfyUI/ComfyUI/user/default/ComfyUI-Manager/config.ini
添加:
ini[default]
security_level = normal-
network_mode = personal_cloud
重启 ComfyUI。
bashcd custom_nodes
git clone https://github.com/Fannovel16/comfyui_controlnet_aux.git
git clone https://github.com/cubiq/ComfyUI_IPAdapter_plus.git
git clone https://github.com/chflame163/ComfyUI_LayerStyle.git
git clone https://github.com/ssitu/ComfyUI_UltimateSDUpscale.git
安装后重启 ComfyUI。
| 问题 | 解决方法 |
|---|---|
| 连接不上 ComfyUI 服务 | 确认启动时加了 --listen 0.0.0.0,防火墙放行端口 |
| 模型找不到 | 检查 models/ 目录结构,使用 extra_model_paths.yaml 添加外部路径 |
| 节点报红 | 通过 Manager 安装缺失节点,或手动 git clone |
| 生成图片全黑/报错 | 查看 ComfyUI 终端日志,通常是模型路径错误或显存不足 |
| 下载模型慢 | 使用 hf-mirror.com 镜像,或 aria2c 多线程下载 |
通过上述步骤,你已经能够:
现在你可以将这套流程集成到电商自动化系统中,实现“输入提示词 → 自动生成产品图”的完整链路。如有任何问题,欢迎继续交流。
本文作者:苏皓明
本文链接:
版权声明:本博客所有文章除特别声明外,均采用 BY-NC-SA 许可协议。转载请注明出处!