Spaces:
Running
on
T4
Running
on
T4
File size: 1,438 Bytes
4581383 313814b 39ee116 608e57c 313814b 181dbbf 313814b 79e36a5 00db026 313814b 4581383 39ee116 608e57c 313814b 181dbbf 313814b 79e36a5 00db026 c22dada |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
# TODO: https://docs.astral.sh/uv/guides/integration/docker/#configuring-watch-with-docker-compose
services:
faster-whisper-server-cuda:
image: fedirz/faster-whisper-server:latest-cuda
build:
dockerfile: Dockerfile.cuda
context: .
platforms:
- linux/amd64
- linux/arm64
restart: unless-stopped
ports:
- 8000:8000
volumes:
- hugging_face_cache:/root/.cache/huggingface
develop:
watch:
- path: faster_whisper_server
action: rebuild
deploy:
resources:
reservations:
devices:
- capabilities: ["gpu"]
# If you have CDI feature enabled use the following instead
# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/cdi-support.html
# https://docs.docker.com/reference/cli/dockerd/#enable-cdi-devices
# - driver: cdi
# device_ids:
# - nvidia.com/gpu=all
faster-whisper-server-cpu:
image: fedirz/faster-whisper-server:latest-cpu
build:
dockerfile: Dockerfile.cpu
context: .
platforms:
- linux/amd64
- linux/arm64
restart: unless-stopped
ports:
- 8000:8000
volumes:
- hugging_face_cache:/root/.cache/huggingface
develop:
watch:
- path: faster_whisper_server
action: rebuild
volumes:
hugging_face_cache:
|