llama-server-cuda.Dockerfile 1020 B

123456789101112131415161718192021222324252627282930313233343536373839
  1. ARG UBUNTU_VERSION=22.04
  2. # This needs to generally match the container host's environment.
  3. ARG CUDA_VERSION=11.7.1
  4. # Target the CUDA build image
  5. ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
  6. # Target the CUDA runtime image
  7. ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
  8. FROM ${BASE_CUDA_DEV_CONTAINER} as build
  9. # Unless otherwise specified, we make a fat build.
  10. ARG CUDA_DOCKER_ARCH=all
  11. RUN apt-get update && \
  12. apt-get install -y build-essential git libcurl4-openssl-dev
  13. WORKDIR /app
  14. COPY . .
  15. # Set nvcc architecture
  16. ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
  17. # Enable CUDA
  18. ENV GGML_CUDA=1
  19. # Enable cURL
  20. ENV LLAMA_CURL=1
  21. RUN make -j$(nproc) llama-server
  22. FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
  23. RUN apt-get update && \
  24. apt-get install -y libcurl4-openssl-dev libgomp1 curl
  25. COPY --from=build /app/llama-server /llama-server
  26. HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
  27. ENTRYPOINT [ "/llama-server" ]