llama-server-cuda.Dockerfile 1.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. ARG UBUNTU_VERSION=22.04
  2. # This needs to generally match the container host's environment.
  3. ARG CUDA_VERSION=11.7.1
  4. # Target the CUDA build image
  5. ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
  6. # Target the CUDA runtime image
  7. ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
  8. FROM ${BASE_CUDA_DEV_CONTAINER} AS build
  9. # Unless otherwise specified, we make a fat build.
  10. ARG CUDA_DOCKER_ARCH=all
  11. RUN apt-get update && \
  12. apt-get install -y build-essential git libcurl4-openssl-dev
  13. WORKDIR /app
  14. COPY . .
  15. # Set nvcc architecture
  16. ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
  17. # Enable CUDA
  18. ENV GGML_CUDA=1
  19. # Enable cURL
  20. ENV LLAMA_CURL=1
  21. # Must be set to 0.0.0.0 so it can listen to requests from host machine
  22. ENV LLAMA_ARG_HOST=0.0.0.0
  23. RUN make -j$(nproc) llama-server
  24. FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
  25. RUN apt-get update && \
  26. apt-get install -y libcurl4-openssl-dev libgomp1 curl
  27. COPY --from=build /app/llama-server /llama-server
  28. HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
  29. ENTRYPOINT [ "/llama-server" ]