immich/docker/hwaccel.ml.yml
Mert d8eca168ca
feat(server): fully accelerated nvenc (#9452)
* use arrayContaining

* libplacebo for nvenc

update dockerfile

* tweaks

* update nvenc options

* tweak settings

* refactor

* toggle for hardware decoding, software / hardware decoding for nvenc and rkmpp

* fix software tone-mapping not being applied

* separate configs for hw/sw

* update api

* add hw decode toggle

* fix mutating config

* remove `version` flag

* fix config type

* remove submodule

* handle temporal AQ

* remove duplicate tests

* use `tonemap_opencl`

* wording

* update docs
2024-05-16 13:30:26 -04:00

44 lines
1.2 KiB
YAML

# Configurations for hardware-accelerated machine learning
# If using Unraid or another platform that doesn't allow multiple Compose files,
# you can inline the config for a backend by copying its contents
# into the immich-machine-learning service in the docker-compose.yml file.
# See https://immich.app/docs/features/ml-hardware-acceleration for info on usage.
services:
armnn:
devices:
- /dev/mali0:/dev/mali0
volumes:
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
cpu: {}
cuda:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
openvino:
device_cgroup_rules:
- 'c 189:* rmw'
devices:
- /dev/dri:/dev/dri
volumes:
- /dev/bus/usb:/dev/bus/usb
openvino-wsl:
devices:
- /dev/dri:/dev/dri
- /dev/dxg:/dev/dxg
volumes:
- /dev/bus/usb:/dev/bus/usb
- /usr/lib/wsl:/usr/lib/wsl