llama-cli-cuda.Dockerfile 864 B

1234567891011121314151617181920212223242526272829303132333435
  1. ARG UBUNTU_VERSION=22.04
  2. # This needs to generally match the container host's environment.
  3. ARG CUDA_VERSION=11.7.1
  4. # Target the CUDA build image
  5. ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
  6. # Target the CUDA runtime image
  7. ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
  8. FROM ${BASE_CUDA_DEV_CONTAINER} AS build
  9. # Unless otherwise specified, we make a fat build.
  10. ARG CUDA_DOCKER_ARCH=all
  11. RUN apt-get update && \
  12. apt-get install -y build-essential git
  13. WORKDIR /app
  14. COPY . .
  15. # Set nvcc architecture
  16. ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
  17. # Enable CUDA
  18. ENV GGML_CUDA=1
  19. RUN make -j$(nproc) llama-cli
  20. FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
  21. RUN apt-get update && \
  22. apt-get install -y libgomp1
  23. COPY --from=build /app/llama-cli /llama-cli
  24. ENTRYPOINT [ "/llama-cli" ]