Ollama

Ollama

Large Language Model API

Get up and running with Llama 3.2, Mistral, Gemma 2, and other large language models.

https://github.com/ollama/ollama

name: ollama
services:
  ollama:
    # Enable for GPU support (see ollama documentation for CUDA driver setup)
    # deploy:
    #   resources:
    #     reservations:
    #       devices:
    #         - driver: nvidia
    #           count: 1
    #           capabilities:
    #             - gpu
    image: ollama/ollama:0.9.0
    container_name: ollama
    restart: unless-stopped
    # https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler
    # https://batey.info/cgroup-cpu-shares-for-docker.html
    cpu_shares: 512
    volumes:
      - ${DOCKER_VOLUMES}/ollama:/root/.ollama
    networks:
      - proxy
    labels:
      traefik.enable: true
      traefik.http.routers.ollama.middlewares: localaccess@file
      traefik.http.services.ollama.loadbalancer.server.port: 11434
      homepage.group: AI
      homepage.name: "Ollama"
      homepage.icon: ollama.png
      homepage.href: https://ollama.${MYDOMAIN}/
      homepage.description: "Large Language Model API"

networks:
  proxy:
    external: true