Skip to content

Commit

Permalink
single compose file
Browse files Browse the repository at this point in the history
  • Loading branch information
mertalev committed Dec 3, 2024
1 parent 52247c3 commit 1134133
Show file tree
Hide file tree
Showing 5 changed files with 324 additions and 161 deletions.
128 changes: 107 additions & 21 deletions docker/docker-compose.dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@ name: immich-dev
services:
immich-server:
container_name: immich_server
command: ['/usr/src/app/bin/immich-dev']
command: [ '/usr/src/app/bin/immich-dev' ]
image: immich-server-dev:latest
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
extends:
file: docker-compose.dev.yml
service: transcoding-${IMMICH_TRANSCODING_BACKEND:-cpu}
build:
context: ../
dockerfile: server/Dockerfile
Expand Down Expand Up @@ -53,6 +53,7 @@ services:
- database
healthcheck:
disable: false
profiles: !reset []

immich-web:
container_name: immich_web
Expand All @@ -61,7 +62,7 @@ services:
# user: 0:0
build:
context: ../web
command: ['/usr/src/app/bin/immich-web']
command: [ '/usr/src/app/bin/immich-web' ]
env_file:
- .env
ports:
Expand All @@ -83,14 +84,14 @@ services:
immich-machine-learning:
container_name: immich_machine_learning
image: immich-machine-learning-dev:latest
# extends:
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
extends:
file: docker-compose.dev.yml
service: ml-${IMMICH_MACHINE_LEARNING_BACKEND:-cpu}
build:
context: ../machine-learning
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
- DEVICE=${IMMICH_MACHINE_LEARNING_BACKEND:-cpu}
ports:
- 3003:3003
volumes:
Expand All @@ -103,6 +104,7 @@ services:
restart: unless-stopped
healthcheck:
disable: false
profiles: !reset []

redis:
container_name: immich_redis
Expand All @@ -126,22 +128,12 @@ services:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# immich-prometheus:
Expand All @@ -164,6 +156,100 @@ services:
# volumes:
# - grafana-data:/var/lib/grafana

# NOTE: the following services exist to be extended above; you can ignore them
ml-armnn:
devices:
- /dev/mali0:/dev/mali0
volumes:
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
profiles: [ ignore ]

ml-cpu:
profiles: [ ignore ]

ml-cuda:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
profiles: [ ignore ]

ml-openvino:
device_cgroup_rules:
- 'c 189:* rmw'
devices:
- /dev/dri:/dev/dri
volumes:
- /dev/bus/usb:/dev/bus/usb
profiles: [ ignore ]

ml-openvino-wsl:
devices:
- /dev/dri:/dev/dri
- /dev/dxg:/dev/dxg
volumes:
- /dev/bus/usb:/dev/bus/usb
- /usr/lib/wsl:/usr/lib/wsl
profiles: [ ignore ]

transcoding-cpu:
profiles: [ ignore ]

transcoding-nvenc:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
- compute
- video
profiles: ignore

transcoding-quicksync:
devices:
- /dev/dri:/dev/dri
profiles: [ ignore ]

transcoding-rkmpp:
security_opt:
# enables full access to /sys and /proc, still far better than privileged: true
- systempaths=unconfined
- apparmor=unconfined
group_add:
- video
devices:
- /dev/rga:/dev/rga
- /dev/dma_heap:/dev/dma_heap
- /dev/mpp_service:/dev/mpp_service
- /dev/mali0:/dev/mali0 # only required to enable OpenCL-accelerated HDR -> SDR tonemapping, can be removed
volumes:
- /etc/OpenCL:/etc/OpenCL:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping, can be removed
- /usr/lib/aarch64-linux-gnu/libmali.so.1:/usr/lib/aarch64-linux-gnu/libmali.so.1:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping, can be removed
profiles: [ ignore ]

transcoding-vaapi:
devices:
- /dev/dri:/dev/dri
profiles: [ ignore ]

transcoding-vaapi-wsl:
# use this for VAAPI if you're running Immich in WSL2
devices:
- /dev/dri:/dev/dri
volumes:
- /usr/lib/wsl:/usr/lib/wsl
environment:
- LIBVA_DRIVER_NAME=d3d12
profiles: [ ignore ]

volumes:
model-cache:
prometheus-data:
Expand Down
128 changes: 107 additions & 21 deletions docker/docker-compose.prod.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ services:
immich-server:
container_name: immich_server
image: immich-server:latest
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
extends:
file: docker-compose.prod.yml
service: transcoding-${IMMICH_TRANSCODING_BACKEND:-cpu}
build:
context: ../
dockerfile: server/Dockerfile
Expand All @@ -23,18 +23,19 @@ services:
restart: always
healthcheck:
disable: false
profiles: !reset []

immich-machine-learning:
container_name: immich_machine_learning
image: immich-machine-learning:latest
# extends:
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
extends:
file: docker-compose.prod.yml
service: ml-${IMMICH_MACHINE_LEARNING_BACKEND:-cpu}
build:
context: ../machine-learning
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
- DEVICE=${IMMICH_MACHINE_LEARNING_BACKEND:-cpu}
ports:
- 3003:3003
volumes:
Expand All @@ -44,6 +45,7 @@ services:
restart: always
healthcheck:
disable: false
profiles: !reset []

redis:
container_name: immich_redis
Expand All @@ -54,7 +56,7 @@ services:

database:
container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.3.0
env_file:
- .env
environment:
Expand All @@ -68,22 +70,12 @@ services:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
restart: always

# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
Expand All @@ -100,13 +92,107 @@ services:
# add data source for http://immich-prometheus:9090 to get started
immich-grafana:
container_name: immich_grafana
command: ['./run.sh', '-disable-reporting']
command: [ './run.sh', '-disable-reporting' ]
ports:
- 3000:3000
image: grafana/grafana:11.3.0-ubuntu@sha256:51587e148ac0214d7938e7f3fe8512182e4eb6141892a3ffb88bba1901b49285
volumes:
- grafana-data:/var/lib/grafana

# NOTE: the following services exist to be extended above; you can ignore them
ml-armnn:
devices:
- /dev/mali0:/dev/mali0
volumes:
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
profiles: [ ignore ]

ml-cpu:
profiles: [ ignore ]

ml-cuda:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
profiles: [ ignore ]

ml-openvino:
device_cgroup_rules:
- 'c 189:* rmw'
devices:
- /dev/dri:/dev/dri
volumes:
- /dev/bus/usb:/dev/bus/usb
profiles: [ ignore ]

ml-openvino-wsl:
devices:
- /dev/dri:/dev/dri
- /dev/dxg:/dev/dxg
volumes:
- /dev/bus/usb:/dev/bus/usb
- /usr/lib/wsl:/usr/lib/wsl
profiles: [ ignore ]

transcoding-cpu:
profiles: [ ignore ]

transcoding-nvenc:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
- compute
- video
profiles: [ ignore ]

transcoding-quicksync:
devices:
- /dev/dri:/dev/dri
profiles: [ ignore ]

transcoding-rkmpp:
security_opt:
# enables full access to /sys and /proc, still far better than privileged: true
- systempaths=unconfined
- apparmor=unconfined
group_add:
- video
devices:
- /dev/rga:/dev/rga
- /dev/dma_heap:/dev/dma_heap
- /dev/mpp_service:/dev/mpp_service
- /dev/mali0:/dev/mali0 # only required to enable OpenCL-accelerated HDR -> SDR tonemapping, can be removed
volumes:
- /etc/OpenCL:/etc/OpenCL:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping, can be removed
- /usr/lib/aarch64-linux-gnu/libmali.so.1:/usr/lib/aarch64-linux-gnu/libmali.so.1:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping, can be removed
profiles: [ ignore ]

transcoding-vaapi:
devices:
- /dev/dri:/dev/dri
profiles: [ ignore ]

transcoding-vaapi-wsl:
# use this for VAAPI if you're running Immich in WSL2
devices:
- /dev/dri:/dev/dri
volumes:
- /usr/lib/wsl:/usr/lib/wsl
environment:
- LIBVA_DRIVER_NAME=d3d12
profiles: [ ignore ]

volumes:
model-cache:
prometheus-data:
Expand Down
Loading

0 comments on commit 1134133

Please sign in to comment.