diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 1a40e407..d5e58a7f 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -9,12 +9,12 @@ env: UBUNTU_VERSION: 22.04 BUILDX_NO_DEFAULT_ATTESTATIONS: 1 # Conservative defaults for cloud providers - LATEST_CUDA: "pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04" - LATEST_CUDA_JUPYTER: "jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04" - LATEST_ROCM: "pytorch-2.1.1-py3.10-rocm-5.6-runtime-22.04" - LATEST_ROCM_JUPYTER: "jupyter-pytorch-2.1.1-py3.10-rocm-5.6-runtime-22.04" - LATEST_CPU: "pytorch-2.1.1-py3.10-cpu-22.04" - LATEST_CPU_JUPYTER: "jupyter-pytorch-2.1.1-py3.10-cpu-22.04" + LATEST_CUDA: "pytorch-2.2.0-py3.10-cuda-11.8.0-runtime-22.04" + LATEST_CUDA_JUPYTER: "jupyter-pytorch-2.2.0-py3.10-cuda-11.8.0-runtime-22.04" + LATEST_ROCM: "pytorch-2.2.0-py3.10-rocm-5.7-runtime-22.04" + LATEST_ROCM_JUPYTER: "jupyter-pytorch-2.2.0-py3.10-rocm-5.7-runtime-22.04" + LATEST_CPU: "pytorch-2.2.0-py3.10-cpu-22.04" + LATEST_CPU_JUPYTER: "jupyter-pytorch-2.2.0-py3.10-cpu-22.04" jobs: cpu-base: @@ -28,17 +28,20 @@ jobs: python: - "3.10" pytorch: - - "2.0.1" - "2.1.1" + - "2.1.2" + - "2.2.0" steps: - name: Free Space run: | df -h - rm -rf /usr/share/dotnet - rm -rf /opt/ghc - rm -rf /usr/local/share/boost - rm -rf "$AGENT_TOOLSDIRECTORY" + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf /usr/local/.ghcup + sudo rm -rf /usr/local/share/boost + sudo rm -rf /usr/local/lib/android + sudo rm -rf "$AGENT_TOOLSDIRECTORY" df -h - name: Env Setter @@ -91,25 +94,25 @@ jobs: python: - "3.10" pytorch: - - "2.0.1" - "2.1.1" + - "2.1.2" + - "2.2.0" cuda: - "11.8.0" - "12.1.0" level: - - "base" - exclude: - - cuda: "12.1.0" - pytorch: "2.0.1" + - "runtime" steps: - name: Free Space run: | df -h - rm -rf /usr/share/dotnet - rm -rf /opt/ghc - rm -rf /usr/local/share/boost - rm -rf "$AGENT_TOOLSDIRECTORY" + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf /usr/local/.ghcup + sudo rm -rf /usr/local/share/boost + sudo rm -rf /usr/local/lib/android + sudo rm -rf "$AGENT_TOOLSDIRECTORY" df -h - name: Env Setter @@ -162,28 +165,33 @@ jobs: python: - "3.10" pytorch: - - "2.0.1" - "2.1.1" + - "2.1.2" + - "2.2.0" rocm: - - "5.4.2" - - "5.6" + - "5.7" + - "5.7" level: - "runtime" exclude: - - rocm: "5.4.2" - pytorch: "2.1.1" - rocm: "5.6" - pytorch: "2.0.1" + pytorch: "2.2.0" + - rocm: "5.7" + pytorch: "2.1.2" + - rocm: "5.7" + pytorch: "2.1.1" steps: - name: Free Space run: | df -h - rm -rf /usr/share/dotnet - rm -rf /opt/ghc - rm -rf /usr/local/share/boost - rm -rf "$AGENT_TOOLSDIRECTORY" + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf /usr/local/.ghcup + sudo rm -rf /usr/local/share/boost + sudo rm -rf /usr/local/lib/android + sudo rm -rf "$AGENT_TOOLSDIRECTORY" df -h - name: Env Setter diff --git a/README.md b/README.md index 09989c7f..157cedc5 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Run [ComfyUI](https://github.com/comfyanonymous/ComfyUI) in a docker container l These container images are tested extensively at [Vast.ai](https://link.ai-dock.org/template-vast-comfyui-jupyter) & [Runpod.io](https://link.ai-dock.org/template-runpod-comfyui-jupyter) but compatibility with other GPU cloud services is expected. ->[!NOTE] +>[!NOTE] >These images do not bundle models or third-party configurations. You should use a [provisioning script](#provisioning-script) to automatically configure your container. You can find examples in `config/provisioning`. ## Quick Start @@ -64,23 +64,23 @@ Tags follow these patterns: ##### _CUDA_ - `:pytorch-[pytorch-version]-py[python-version]-cuda-[x.x.x]-base-[ubuntu-version]` -- `:latest-cuda` → `:pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04` +- `:latest-cuda` → `:pytorch-2.2.0-py3.10-cuda-11.8.0-base-22.04` -- `:latest-cuda-jupyter` → `:jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04` +- `:latest-cuda-jupyter` → `:jupyter-pytorch-2.2.0-py3.10-cuda-11.8.0-base-22.04` ##### _ROCm_ - `:pytorch-[pytorch-version]-py[python-version]-rocm-[x.x.x]-runtime-[ubuntu-version]` -- `:latest-rocm` → `:pytorch-2.1.1-py3.10-rocm-5.6-runtime-22.04` +- `:latest-rocm` → `:pytorch-2.2.0-py3.10-rocm-5.7-runtime-22.04` -- `:latest-rocm-jupyter` → `:jupyter-pytorch-2.1.1-py3.10-rocm-5.6-runtime-22.04` +- `:latest-rocm-jupyter` → `:jupyter-pytorch-2.2.0-py3.10-rocm-5.7-runtime-22.04` ##### _CPU_ - `:pytorch-[pytorch-version]-py[python-version]-ubuntu-[ubuntu-version]` -- `:latest-cpu` → `:pytorch-2.1.1-py3.10-cpu-22.04` +- `:latest-cpu` → `:pytorch-2.2.0-py3.10-cpu-22.04` -- `:latest-cpu-jupyter` → `:jupyter-pytorch-2.1.1-py3.10-cpu-22.04` +- `:latest-cpu-jupyter` → `:jupyter-pytorch-2.2.0-py3.10-cpu-22.04` Browse [here](https://github.com/ai-dock/comfyui/pkgs/container/comfyui) for an image suitable for your target environment. @@ -88,7 +88,7 @@ You can also [build from source](#building-images) by editing `.env` and running Supported Python versions: `3.11`, `3.10` -Supported Pytorch versions: `2.1.1`, `2.0.1` +Supported Pytorch versions: `2.2.0`, `2.1.2`, `2.1.1` Supported Platforms: `NVIDIA CUDA`, `AMD ROCm`, `CPU` @@ -177,7 +177,7 @@ You can use the included `cloudflared` service to make secure connections withou | Variable | Description | | ------------------------ | ----------- | | `CF_TUNNEL_TOKEN` | Cloudflare zero trust tunnel token - See [documentation](https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/). | -| `CF_QUICK_TUNNELS` | Create ephemeral Cloudflare tunnels for web services (default `false`) | +| `CF_QUICK_TUNNELS` | Create ephemeral Cloudflare tunnels for web services (default `true`) | | `COMFYUI_BRANCH` | ComfyUI branch/commit hash. Defaults to `master` | | `COMFYUI_FLAGS` | Startup flags. eg. `--gpu-only --highvram` | | `COMFYUI_PORT` | ComfyUI interface port (default `8188`) | @@ -188,12 +188,13 @@ You can use the included `cloudflared` service to make secure connections withou | `RCLONE_*` | Rclone configuration - See [rclone documentation](https://rclone.org/docs/#config-file) | | `SKIP_ACL` | Set `true` to skip modifying workspace ACL | | `SSH_PORT_LOCAL` | Set a non-standard port for SSH (default `22`) | -| `SSH_PUBKEY` | Your public key for SSH | +| `USER_NAME` | System account username (default `user`)| +| `USER_PASSWORD` | System account username (default `password`)| | `WEB_ENABLE_AUTH` | Enable password protection for web services (default `true`) | | `WEB_USER` | Username for web services (default `user`) | -| `WEB_PASSWORD` | Password for web services (default `password`) | +| `WEB_PASSWORD` | Password for web services (default `auto generated`) | | `WORKSPACE` | A volume path. Defaults to `/workspace/` | -| `WORKSPACE_SYNC` | Move mamba environments and services to workspace if mounted (default `true`) | +| `WORKSPACE_SYNC` | Move mamba environments and services to workspace if mounted (default `false`) | Environment variables can be specified by using any of the standard methods (`docker-compose.yaml`, `docker run -e...`). Additionally, environment variables can also be passed as parameters of `init.sh`. @@ -203,19 +204,32 @@ Example usage: `docker run -e STANDARD_VAR1="this value" -e STANDARD_VAR2="that ## Security -By default, all exposed web services other than the port redirect page are protected by HTTP basic authentication. +All ai-dock containers are interactive and will not drop root privileges. You should ensure that your docker daemon runs as an unprivileged user. -The default username is `user` and the password is `password`. +### System -You can set your credentials by passing environment variables as shown above. +A system user will be created at startup. The UID will be either 1000 or will match the UID of the `$WORKSPACE` bind mount. + +The user will share the root user's ssh public key. + +Some processes may start in the user context for convenience only. + +### Web Services + +By default, all exposed web services are protected by a single login form at `:1111/login`. -The password is stored as a bcrypt hash. If you prefer not to pass a plain text password to the container you can pre-hash and use the variable `WEB_PASSWORD_HASH`. +The default username is `user` and the password is auto generated unless you have passed a value in the environment variable `WEB_PASSWORD`. To find the auto-generated password and related tokens you should type `env | grep WEB_` from inside the container. + +You can set your credentials by passing environment variables as shown above. If you are running the image locally on a trusted network, you may disable authentication by setting the environment variable `WEB_ENABLE_AUTH=false`. ->[!NOTE] ->You can use `set-web-credentials.sh ` change the username and password in a running container. +If you need to connect programmatically to the web services you can authenticate using either `Bearer $WEB_TOKEN` or `Basic $WEB_PASSWORD_B64`. +The security measures included aim to be as secure as basic authentication, i.e. not secure without HTTPS. Please use the provided cloudflare connections wherever possible. + +>[!NOTE] +>You can use `set-web-credentials.sh ` to change the username and password in a running container. ## Provisioning script @@ -228,8 +242,7 @@ The URL must point to a plain text file - GitHub Gists/Pastebin (raw) are suitab If you are running locally you may instead opt to mount a script at `/opt/ai-dock/bin/provisioning.sh`. >[!NOTE] ->If configured, `sshd`, `caddy`, `cloudflared`, `rclone`, `serviceportal`, `storagemonitor` & `logtail` will be launched before provisioning; Any other processes will launch after. - +>If configured, `sshd`, `caddy`, `cloudflared`, `serviceportal`, `storagemonitor` & `logtail` will be launched before provisioning; Any other processes will launch after. >[!WARNING] >Only use scripts that you trust and which cannot be changed without your consent. @@ -283,8 +296,6 @@ As docker containers generally run as the root user, new files created in /works To ensure that the files remain accessible to the local user that owns the directory, the docker entrypoint will set a default ACL on the directory by executing the commamd `setfacl -d -m u:${WORKSPACE_UID}:rwx /workspace`. -If you do not want this, you can set the environment variable `SKIP_ACL=true`. - ## Running Services This image will spawn multiple processes upon starting a container because some of our remote environments do not support more than one container per instance. @@ -309,7 +320,7 @@ To manage this service you can use `supervisorctl [start|stop|restart] comfyui`. This service is available on port `8188` and is used to test the [RunPod serverless](https://link.ai-dock.org/runpod-serverless) API. -You can access the api directly at `/rp-api/runsync` or you can use the Swager/openAPI playground at `/rp-api/docs`. +You can access the api directly at `/rp-api/runsync` or you can use the Swager/openAPI playground at `/rp-api`. There are several [example payloads](https://github.com/ai-dock/comfyui/tree/main/build/COPY_ROOT/opt/serverless/docs/example_payloads) included in this repository. @@ -411,33 +422,6 @@ See [this guide](https://link.ai-dock.org/guide-sshd-do) by DigitalOcean for an >[!NOTE] >_SSHD is included because the end-user should be able to know the version prior to deloyment. Using a providers add-on, if available, does not guarantee this._ -### Rclone mount - -Rclone allows you to access your cloud storage from within the container by configuring one or more remotes. If you are unfamiliar with the project you can find out more at the [Rclone website](https://rclone.org/). - -Any Rclone remotes that you have specified, either through mounting the config directory or via setting environment variables will be mounted at `/workspace/remote/[remote name]`. For this service to start, the following conditions must be met: - -- Fuse3 installed in the host operating system -- Kernel module `fuse` loaded in the host -- Host `/etc/passwd` mounted in the container -- Host `/etc/group` mounted in the container -- Host device `/dev/fuse` made available to the container -- Container must run with `cap-add SYS_ADMIN` -- Container must run with `securiry-opt apparmor:unconfined` -- At least one remote must be configured - -The provided docker-compose.yaml includes a working configuration (add your own remotes). - -In the event that the conditions listed cannot be met, `rclone` will still be available to use via the CLI - only mounts will be unavailable. - -If you intend to use the `rclone create` command to interactively generate remote configurations you should ensure port `53682` is accessible. See https://rclone.org/remote_setup/ for further details. - ->[!NOTE] ->_Rclone is included to give the end-user an opportunity to easily transfer files between the instance and their cloud storage provider._ - ->[!WARNING] ->You should only provide auth tokens in secure cloud environments. - ### Logtail This script follows and prints the log files for each of the above services to stdout. This allows you to follow the progress of all running services through docker's own logging system. diff --git a/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/comfyui.conf b/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/comfyui.conf index 010adb8e..a12f73a3 100644 --- a/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/comfyui.conf +++ b/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/comfyui.conf @@ -1,4 +1,6 @@ [program:comfyui] +user=$USER_NAME +environment=PROC_NAME="%(program_name)s",USER=$USER_NAME,HOME=/home/$USER_NAME command=/opt/ai-dock/bin/supervisor-comfyui.sh process_name=%(program_name)s numprocs=1 @@ -18,4 +20,4 @@ stdout_logfile_backups=1 stderr_logfile=/dev/null stderr_logfile_maxbytes=0 stderr_logfile_backups=0 -environment=PROC_NAME="%(program_name)s" \ No newline at end of file +redirect_stderr=true \ No newline at end of file diff --git a/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/comfyui_rp_api.conf b/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/comfyui_rp_api.conf index e5ee01bc..171fddc9 100644 --- a/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/comfyui_rp_api.conf +++ b/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/comfyui_rp_api.conf @@ -1,8 +1,10 @@ [program:comfyui_rp_api] +user=$USER_NAME +environment=PROC_NAME="%(program_name)s",USER=$USER_NAME,HOME=/home/$USER_NAME command=/opt/ai-dock/bin/supervisor-comfyui-rp-api.sh process_name=%(program_name)s numprocs=1 -directory=/opt/serverless/providers/runpod +directory=/home/$USER_NAME priority=100 autostart=true startsecs=5 @@ -18,4 +20,4 @@ stdout_logfile_backups=1 stderr_logfile=/dev/null stderr_logfile_maxbytes=0 stderr_logfile_backups=0 -environment=PROC_NAME="%(program_name)s" \ No newline at end of file +redirect_stderr=true \ No newline at end of file diff --git a/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/serverless.conf b/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/serverless.conf index 65e7da0f..5f36bfdc 100644 --- a/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/serverless.conf +++ b/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/serverless.conf @@ -1,4 +1,6 @@ [program:serverless] +user=$USER_NAME +environment=PROC_NAME="%(program_name)s",USER=$USER_NAME,HOME=/home/$USER_NAME command=/opt/ai-dock/bin/supervisor-serverless.sh process_name=%(program_name)s numprocs=1 diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh index 00fce203..4a350564 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh @@ -3,3 +3,8 @@ # Tidy up and keep image small apt-get clean -y micromamba clean -ay + +fix-permissions.sh -o container + +rm /etc/ld.so.cache +ldconfig \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh index 291faa5a..a874f187 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh @@ -11,19 +11,25 @@ build_common_main() { build_common_create_env() { apt-get update - $APT_INSTALL libgl1 \ - libgoogle-perftools4 + $APT_INSTALL \ + libgl1-mesa-glx \ + libtcmalloc-minimal4 + #libgoogle-perftools4 - ln -sf $(ldconfig -p | grep -Po "libtcmalloc.so.\d" | head -n 1) \ + ln -sf $(ldconfig -p | grep -Po "libtcmalloc_minimal.so.\d" | head -n 1) \ /lib/x86_64-linux-gnu/libtcmalloc.so - + + #$MAMBA_INSTALL -n ${MAMBA_DEFAULT_ENV} pocl + # A new pytorch env costs ~ 300Mb exported_env=/tmp/${MAMBA_DEFAULT_ENV}.yaml micromamba env export -n ${MAMBA_DEFAULT_ENV} > "${exported_env}" $MAMBA_CREATE -n comfyui --file "${exported_env}" + printf "/opt/micromamba/envs/comfyui/lib\n" >> /etc/ld.so.conf.d/x86_64-linux-gnu.micromamba.10-comfyui.conf # RunPod serverless support $MAMBA_CREATE -n serverless python=3.10 + printf "/opt/micromamba/envs/serverless/lib\n" >> /etc/ld.so.conf.d/x86_64-linux-gnu.micromamba.20-serverless.conf $MAMBA_INSTALL -n serverless \ python-magic micromamba run -n serverless $PIP_INSTALL \ diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh index bedaae44..b63e20b8 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh @@ -2,6 +2,7 @@ # Must exit and fail to build if any command fails set -eo pipefail +umask 002 source /opt/ai-dock/bin/build/layer0/common.sh diff --git a/build/COPY_ROOT/opt/ai-dock/bin/preflight.sh b/build/COPY_ROOT/opt/ai-dock/bin/preflight.sh index b16bd685..bfdc8c71 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/preflight.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/preflight.sh @@ -11,7 +11,6 @@ function preflight_main() { function preflight_serverless() { printf "Skipping ComfyUI updates in serverless mode\n" printf "%s" "${COMFYUI_FLAGS}" > /etc/comfyui_flags.conf - } function preflight_copy_notebook() { @@ -30,6 +29,9 @@ function preflight_update_comfyui() { fi } +# move this to base-image +sudo chown user.ai-dock /var/log/timing_data + if [[ ${SERVERLESS,,} != "true" ]]; then preflight_main "$@" else diff --git a/build/COPY_ROOT/opt/ai-dock/bin/supervisor-comfyui-rp-api.sh b/build/COPY_ROOT/opt/ai-dock/bin/supervisor-comfyui-rp-api.sh index 31dd07a6..1a6fe476 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/supervisor-comfyui-rp-api.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/supervisor-comfyui-rp-api.sh @@ -9,7 +9,6 @@ function cleanup() { kill $(jobs -p) > /dev/null 2>&1 } - function start() { if [[ ${SERVERLESS,,} = "true" ]]; then printf "Refusing to start hosted API service in serverless mode\n" @@ -18,7 +17,7 @@ function start() { printf "Starting %s...\n" ${SERVICE_NAME} - kill $(lsof -t -i:$LISTEN_PORT) > /dev/null 2>&1 & + fuser -k -SIGTERM ${LISTEN_PORT}/tcp > /dev/null 2>&1 & wait -n cd /opt/serverless/providers/runpod && \ diff --git a/build/COPY_ROOT/opt/ai-dock/bin/supervisor-comfyui.sh b/build/COPY_ROOT/opt/ai-dock/bin/supervisor-comfyui.sh index 1a4d8888..570c2dc9 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/supervisor-comfyui.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/supervisor-comfyui.sh @@ -5,10 +5,13 @@ trap cleanup EXIT LISTEN_PORT=${COMFYUI_PORT_LOCAL:-18188} METRICS_PORT=${COMFYUI_METRICS_PORT:-28188} PROXY_SECURE=true +QUICKTUNNELS=true function cleanup() { kill $(jobs -p) > /dev/null 2>&1 rm /run/http_ports/$PROXY_PORT > /dev/null 2>&1 + fuser -k -SIGTERM ${LISTEN_PORT}/tcp > /dev/null 2>&1 & + wait -n } function start() { @@ -67,14 +70,17 @@ function start() { printf "%s started: %s\n" "${SERVICE_NAME}" "$(date +"%x %T.%3N")" >> /var/log/timing_data printf "Starting %s...\n" "${SERVICE_NAME}" - kill $(lsof -t -i:$LISTEN_PORT) > /dev/null 2>&1 & - wait -n + fuser -k -SIGKILL ${LISTEN_PORT}/tcp > /dev/null 2>&1 & FLAGS_COMBINED="${PLATFORM_FLAGS} ${BASE_FLAGS} $(cat /etc/comfyui_flags.conf)" printf "Starting %s...\n" "${SERVICE_NAME}" cd /opt/ComfyUI && \ - micromamba run -n comfyui -e LD_PRELOAD=libtcmalloc.so python main.py \ + # elevate comfyui libs + micromamba run -n comfyui \ + -e LD_PRELOAD=libtcmalloc.so \ + -e LD_LIBRARY_PATH=/opt/micromamba/envs/comfyui/lib:${LD_LIBRARY_PATH} \ + python main.py \ ${FLAGS_COMBINED} --port ${LISTEN_PORT} } diff --git a/build/COPY_ROOT/opt/caddy/share/service_config_18188 b/build/COPY_ROOT/opt/caddy/share/service_config_18188 index 8ed5eeb3..79295ae2 100644 --- a/build/COPY_ROOT/opt/caddy/share/service_config_18188 +++ b/build/COPY_ROOT/opt/caddy/share/service_config_18188 @@ -1,12 +1,37 @@ :!PROXY_PORT { - handle_path /openapi.json { - root * /opt/serverless/docs/swagger/openapi.yaml - file_server + import universal-config + + @openapi { + path /openapi.json } - handle_path /rp-api* { - reverse_proxy localhost:38188 + @rp-api { + path /rp-api* } - - reverse_proxy localhost:!LISTEN_PORT -} + + header @authenticating_bearer Set-Cookie "ai_dock_token={$WEB_TOKEN}; Path=/ ;Max-Age=604800; HttpOnly; SameSite=lax" + header @authenticating_basic Set-Cookie "ai_dock_token={$WEB_PASSWORD_B64}; Path=/ ;Max-Age=604800; HttpOnly; SameSite=lax" + + route @openapi { + handle_path /openapi.json { + root * /opt/serverless/docs/swagger/openapi.yaml + file_server @authorized + } + redir {$SERVICEPORTAL_LOGIN} + } + + route @rp-api { + uri strip_prefix /rp-api + reverse_proxy @authorized localhost:38188 + redir {$SERVICEPORTAL_LOGIN} + } + + route @preauth { + redir @authorized / + } + + route @default { + reverse_proxy @authorized localhost:!LISTEN_PORT + redir {$SERVICEPORTAL_LOGIN} + } +} \ No newline at end of file diff --git a/build/COPY_ROOT/opt/caddy/share/service_config_18188_auth b/build/COPY_ROOT/opt/caddy/share/service_config_18188_auth deleted file mode 100644 index b83a6443..00000000 --- a/build/COPY_ROOT/opt/caddy/share/service_config_18188_auth +++ /dev/null @@ -1,17 +0,0 @@ -:!PROXY_PORT { - basicauth * { - import /opt/caddy/etc/basicauth - } - - handle_path /openapi.json { - root * /opt/serverless/docs/swagger/openapi.yaml - file_server - } - - handle_path /rp-api* { - reverse_proxy localhost:38188 - } - - reverse_proxy localhost:!LISTEN_PORT -} - diff --git a/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh b/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh index e679b6a6..25651d35 100755 --- a/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh +++ b/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh @@ -125,4 +125,6 @@ function build_extra_download() { wget -qnc --content-disposition --show-progress -e dotbytes="${3:-4M}" -P "$2" "$1" } -build_extra_start \ No newline at end of file +umask 002 +build_extra_start +fix-permissions.sh -o container \ No newline at end of file diff --git a/config/provisioning/animated.sh b/config/provisioning/animated.sh index d76d4589..7bdb77bc 100644 --- a/config/provisioning/animated.sh +++ b/config/provisioning/animated.sh @@ -1,4 +1,4 @@ -#!/bin/false +#!/bin/bash # This file will be sourced in init.sh diff --git a/config/provisioning/get-models-sd-official.sh b/config/provisioning/get-models-sd-official.sh index 425fe74f..f0251f23 100644 --- a/config/provisioning/get-models-sd-official.sh +++ b/config/provisioning/get-models-sd-official.sh @@ -1,4 +1,4 @@ -#!/bin/false +#!/bin/bash # This file will be sourced in init.sh diff --git a/config/provisioning/seargedp-seargesdxl.sh b/config/provisioning/seargedp-seargesdxl.sh index d738fed5..8e606e47 100644 --- a/config/provisioning/seargedp-seargesdxl.sh +++ b/config/provisioning/seargedp-seargesdxl.sh @@ -1,4 +1,4 @@ -#!/bin/false +#!/bin/bash # This file will be sourced in init.sh diff --git a/docker-compose.yaml b/docker-compose.yaml index bfc99dba..db4f19be 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -5,11 +5,11 @@ services: build: context: ./build args: - IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/jupyter-pytorch:2.1.1-py3.10-cuda-11.8.0-base-22.04} + IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/jupyter-pytorch:2.2.0-py3.10-cuda-11.8.0-runtime-22.04} tags: - - "ghcr.io/ai-dock/comfyui:${IMAGE_TAG:-jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04}" + - "ghcr.io/ai-dock/comfyui:${IMAGE_TAG:-jupyter-pytorch-2.2.0-py3.10-cuda-11.8.0-runtime-22.04}" - image: ghcr.io/ai-dock/comfyui:${IMAGE_TAG:-jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04} + image: ghcr.io/ai-dock/comfyui:${IMAGE_TAG:-jupyter-pytorch-2.2.0-py3.10-cuda-11.8.0-runtime-22.04} ## For Nvidia GPU's - You probably want to uncomment this #deploy: @@ -20,33 +20,13 @@ services: # count: all # capabilities: [gpu] - security_opt: - ## For Rclone mount - - apparmor:unconfined - ## For AMD GPU - #- seccomp:unconfined - - cap_add: - ## For Rclone mount - - SYS_ADMIN - devices: - ## For Rclone mount - - "/dev/fuse:/dev/fuse" + - "/dev/dri:/dev/dri" ## For AMD GPU #- "/dev/kfd:/dev/kfd" - #- "/dev/dri:/dev/dri" - ## For AMD GPU - #group_add: - # - video - # - render volumes: - ## For Rclone mount - - /etc/passwd:/etc/passwd:ro - - /etc/group:/etc/group:ro - - ./config/rclone:/etc/rclone ## Workspace - ./workspace:${WORKSPACE:-/workspace/}:rshared # You can share /workspace/storage with other non-ComfyUI containers. See README @@ -58,7 +38,7 @@ services: - ./config/provisioning/default.sh:/opt/ai-dock/bin/provisioning.sh ports: # SSH available on host machine port 2222 to avoid conflict. Change to suit - - ${SSH_PORT_HOST:-2222}:${SSH_PORT_LOCAL:-22} + - ${SSH_PORT_HOST:-2222}:22 # Caddy port for service portal - ${SERVICEPORTAL_PORT_HOST:-1111}:${SERVICEPORTAL_PORT_HOST:-1111} # ComfyUI web interface @@ -80,15 +60,11 @@ services: - WEB_USER=${WEB_USER:-user} - WEB_PASSWORD=${WEB_PASSWORD:-password} - SSH_PORT_HOST=${SSH_PORT_HOST:-2222} - - SSH_PORT_LOCAL=${SSH_PORT_LOCAL:-22} - SERVICEPORTAL_PORT_HOST=${SERVICEPORTAL_PORT_HOST:-1111} - - SERVICEPORTAL_PORT_LOCAL=${SERVICEPORTAL_PORT_LOCAL:-11111} - SERVICEPORTAL_METRICS_PORT=${SERVICEPORTAL_METRICS_PORT:-21111} - COMFYUI_FLAGS=${COMFYUI_FLAGS:-} - COMFYUI_PORT_HOST=${COMFYUI_PORT_HOST:-8188} - - COMFYUI_PORT_LOCAL=${COMFYUI_PORT_LOCAL:-18188} - COMFYUI_METRICS_PORT=${COMFYUI_METRICS_PORT:-28188} - JUPYTER_PORT_HOST=${JUPYTER_PORT_HOST:-8888} - - JUPYTER_PORT_LOCAL=${JUPYTER_PORT_LOCAL:-18888} - JUPYTER_METRICS_PORT=${JUPYTER_METRICS_PORT:-28888} - SERVERLESS=${SERVERLESS:-false}