forked from blib-la/runpod-worker-comfy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDockerfile
129 lines (98 loc) · 6.17 KB
/
Dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# Stage 1: Base image with common dependencies
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04 as base
# Prevents prompts from packages asking for user input during installation
ENV DEBIAN_FRONTEND=noninteractive
# Prefer binary wheels over source distributions for faster pip installations
ENV PIP_PREFER_BINARY=1
# Ensures output from python is printed immediately to the terminal without buffering
ENV PYTHONUNBUFFERED=1
# Install Python, git and other necessary tools
RUN apt-get update && apt-get install -y \
python3.10 \
python3-pip \
git \
wget
# Clean up to reduce image size
RUN apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/*
# Clone ComfyUI repository
RUN git clone https://github.com/comfyanonymous/ComfyUI.git /comfyui
# Change working directory to ComfyUI
WORKDIR /comfyui
# Install ComfyUI dependencies
RUN pip3 install --upgrade --no-cache-dir torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 \
&& pip3 install --upgrade -r requirements.txt
# Install runpod
RUN pip3 install runpod requests
# Support for the network volume
ADD src/extra_model_paths.yaml ./
# Go back to the root
WORKDIR /
# Add the start and the handler
ADD src/start.sh src/rp_handler.py test_input.json ./
RUN chmod +x /start.sh
# Stage 2: Download models
FROM base as downloader
ARG HUGGINGFACE
ARG MODEL_TYPE
# Change working directory to ComfyUI
WORKDIR /comfyui
# Download checkpoints/vae/LoRA to include in image based on model type
RUN if [ "$MODEL_TYPE" = "sdxl" ]; then \
wget -O models/checkpoints/sd_xl_base_1.0.safetensors https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors && \
wget -O models/vae/sdxl_vae.safetensors https://huggingface.co/stabilityai/sdxl-vae/resolve/main/sdxl_vae.safetensors && \
wget -O models/vae/sdxl-vae-fp16-fix.safetensors https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl_vae.safetensors; \
elif [ "$MODEL_TYPE" = "sd3" ]; then \
wget --header="Authorization: Bearer ${HUGGINGFACE}" -O models/checkpoints/sd3_medium_incl_clips_t5xxlfp8.safetensors https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips_t5xxlfp8.safetensors; \
elif [ "$MODEL_TYPE" = "flux1-schnell" ]; then \
wget -O models/unet/flux1-schnell.safetensors https://huggingface.co/black-forest-labs/FLUX.1-schnell/resolve/main/flux1-schnell.safetensors && \
wget -O models/clip/clip_l.safetensors https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/clip_l.safetensors && \
wget -O models/clip/t5xxl_fp8_e4m3fn.safetensors https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors && \
wget -O models/vae/ae.safetensors https://huggingface.co/black-forest-labs/FLUX.1-schnell/resolve/main/ae.safetensors; \
elif [ "$MODEL_TYPE" = "flux1-dev" ]; then \
wget --header="Authorization: Bearer ${HUGGINGFACE}" -O models/unet/flux1-dev.safetensors https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/flux1-dev.safetensors && \
wget -O models/clip/clip_l.safetensors https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/clip_l.safetensors && \
wget -O models/clip/t5xxl_fp8_e4m3fn.safetensors https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors && \
wget --header="Authorization: Bearer ${HUGGINGFACE}" -O models/vae/ae.safetensors https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/ae.safetensors; \
fi
# Clone IDM-VTON into the custom_nodes directory
RUN git clone https://github.com/TemryL/ComfyUI-IDM-VTON.git custom_nodes/ComfyUI-IDM-VTON
# Navigate to the IDM-VTON directory and install dependencies
WORKDIR /comfyui/custom_nodes/ComfyUI-IDM-VTON
RUN python3 install.py
# Clone comfyui_controlnet_aux
WORKDIR /comfyui/custom_nodes
RUN git clone https://github.com/Fannovel16/comfyui_controlnet_aux/
# Install requirements
WORKDIR /comfyui/custom_nodes/comfyui_controlnet_aux
RUN pip3 install --no-cache-dir -r requirements.txt
# Set appropriate permissions
RUN chmod -R 755 /comfyui/custom_nodes/comfyui_controlnet_aux
# Download SAM model
RUN mkdir -p /comfyui/models/sams && \
wget -O /comfyui/models/sams/sam_vit_l_0b3195.pth https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth
# Download GroundingDINO config and model
RUN mkdir -p /comfyui/models/grounding-dino && \
wget -O /comfyui/models/grounding-dino/GroundingDINO_SwinB.cfg.py https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/GroundingDINO_SwinB.cfg.py && \
wget -O /comfyui/models/grounding-dino/groundingdino_swinb_cogcoor.pth https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/groundingdino_swinb_cogcoor.pth
# Clone comfyui-mixlab-nodes
WORKDIR /comfyui/custom_nodes
RUN git clone https://github.com/shadowcz007/comfyui-mixlab-nodes.git
# Install requirements
WORKDIR /comfyui/custom_nodes/comfyui-mixlab-nodes
RUN pip3 install --no-cache-dir -r requirements.txt
# Set appropriate permissions
RUN chmod -R 755 /comfyui/custom_nodes/comfyui-mixlab-nodes
# # Download bert-base-uncased model (optional)
# RUN mkdir -p /comfyui/models/bert-base-uncased && \
# wget -O /comfyui/models/bert-base-uncased/config.json https://huggingface.co/google-bert/bert-base-uncased/resolve/main/config.json && \
# wget -O /comfyui/models/bert-base-uncased/model.safetensors https://huggingface.co/google-bert/bert-base-uncased/resolve/main/model.safetensors && \
# wget -O /comfyui/models/bert-base-uncased/tokenizer_config.json https://huggingface.co/google-bert/bert-base-uncased/resolve/main/tokenizer_config.json && \
# wget -O /comfyui/models/bert-base-uncased/tokenizer.json https://huggingface.co/google-bert/bert-base-uncased/resolve/main/tokenizer.json && \
# wget -O /comfyui/models/bert-base-uncased/vocab.txt https://huggingface.co/google-bert/bert-base-uncased/resolve/main/vocab.txt
# Stage 3: Final image
FROM base as final
# Copy models from stage 2 to the final image
COPY --from=downloader /comfyui/models /comfyui/models
COPY --from=downloader /comfyui/custom_nodes /comfyui/custom_nodes
# Start the container
CMD /start.sh