61 lines
2.0 KiB
YAML
61 lines
2.0 KiB
YAML
services:
|
|
ollama:
|
|
image: ollama/ollama:latest
|
|
ports:
|
|
- 11434:11434
|
|
volumes:
|
|
- ./docker-volumes/ollama:/root/.ollama
|
|
container_name: ollama
|
|
tty: true
|
|
restart: unless-stopped
|
|
# GPU SUPPORT NOTES:
|
|
# 1. The "deploy" section is ignored by classic 'docker-compose'; it's honored in Swarm.
|
|
# 2. For local 'docker compose up' with NVIDIA GPUs you need the host configured with
|
|
# nvidia-container-toolkit. Then either:
|
|
# a) Leave the reservation block (Compose V2 now honors it) OR
|
|
# b) Start with: docker compose up --build (Compose will request GPUs) OR
|
|
# c) Explicitly override: docker compose run --gpus all ollama
|
|
# 3. If your Docker/Compose version does NOT honor the reservation below, uncomment the
|
|
# 'devices' section further down as a fallback (less portable).
|
|
|
|
## UNCOMMENT THE FOLLOWING BLOCK FOR NVIDIA GPU SUPPORT ###
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
count: all
|
|
capabilities: [gpu]
|
|
|
|
environment:
|
|
# Visible devices / capabilities for the NVIDIA container runtime
|
|
- NVIDIA_VISIBLE_DEVICES=all
|
|
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
|
## ---------- END GPU SUPPORT BLOCK ------------###
|
|
|
|
|
|
# Fallback (UNCOMMENT ONLY if the reservation above is ignored and you still get errors):
|
|
# devices:
|
|
# - /dev/nvidiactl:/dev/nvidiactl
|
|
# - /dev/nvidia-uvm:/dev/nvidia-uvm
|
|
# - /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools
|
|
# - /dev/nvidia0:/dev/nvidia0
|
|
|
|
open-webui:
|
|
image: ghcr.io/open-webui/open-webui:main
|
|
container_name: open-webui
|
|
volumes:
|
|
- ./docker-volumes/open-webui:/app/backend/data
|
|
depends_on:
|
|
- ollama
|
|
ports:
|
|
- 3000:8080
|
|
environment:
|
|
- 'OLLAMA_BASE_URL=http://ollama:11434'
|
|
- 'ENABLE_OLLAMA_API=true'
|
|
- 'WEBUI_SECRET_KEY='
|
|
|
|
extra_hosts:
|
|
- host.docker.internal:host-gateway
|
|
restart: unless-stopped
|