llama-server.Dockerfile 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. ARG UBUNTU_VERSION=22.04
  2. FROM ubuntu:$UBUNTU_VERSION AS build
  3. RUN apt-get update && \
  4. apt-get install -y build-essential git cmake libcurl4-openssl-dev
  5. WORKDIR /app
  6. COPY . .
  7. RUN \
  8. # Build multiple versions of the CPU backend
  9. scripts/build-cpu.sh avx -DGGML_AVX=ON -DGGML_AVX2=OFF && \
  10. scripts/build-cpu.sh avx2 -DGGML_AVX=ON -DGGML_AVX2=ON && \
  11. scripts/build-cpu.sh avx512 -DGGML_AVX=ON -DGGML_AVX2=ON -DGGML_AVX512=ON && \
  12. scripts/build-cpu.sh amx -DGGML_AVX=ON -DGGML_AVX2=ON -DGGML_AVX512=ON -DGGML_AVX_VNNI=ON -DGGML_AVX512_VNNI=ON -DGGML_AMX_TILE=ON -DGGML_AMX_INT8=ON && \
  13. # Build llama-server
  14. cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \
  15. cmake --build build --target llama-server -j $(nproc) && \
  16. # Copy the built libraries to /app/lib
  17. mkdir -p /app/lib && \
  18. mv libggml-cpu* /app/lib/ && \
  19. find build -name "*.so" -exec cp {} /app/lib/ \;
  20. FROM ubuntu:$UBUNTU_VERSION AS runtime
  21. RUN apt-get update && \
  22. apt-get install -y libcurl4-openssl-dev libgomp1 curl
  23. COPY --from=build /app/build/bin/llama-server /llama-server
  24. COPY --from=build /app/lib/ /
  25. ENV LC_ALL=C.utf8
  26. # Must be set to 0.0.0.0 so it can listen to requests from host machine
  27. ENV LLAMA_ARG_HOST=0.0.0.0
  28. HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
  29. ENTRYPOINT [ "/llama-server" ]