name: immich_remote_ml services: immich-machine-learning-gpu: container_name: immich_machine_learning_gpu # For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag. # Example tag: ${IMMICH_VERSION:-release}-cuda image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-openvino extends: file: hwaccel.ml.yml service: openvino # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable volumes: - model-cache-gpu:/cache # - /etc/timezone:/etc/timezone:ro - /etc/localtime:/etc/localtime:ro restart: always ports: - 3004:3003 environment: - TZ=${TZ} immich-machine-learning-cpu: container_name: immich_machine_learning_cpu # For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag. # Example tag: ${IMMICH_VERSION:-release}-cuda image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} extends: file: hwaccel.ml.yml service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable volumes: - model-cache-cpu:/cache # - /etc/timezone:/etc/timezone:ro - /etc/localtime:/etc/localtime:ro restart: always ports: - 3003:3003 environment: - TZ=${TZ} volumes: model-cache-gpu: model-cache-cpu: