s390x.Dockerfile 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. ARG GCC_VERSION=15.2.0
  2. ARG UBUNTU_VERSION=24.04
  3. ### Build Llama.cpp stage
  4. FROM --platform=linux/s390x gcc:${GCC_VERSION} AS build
  5. RUN --mount=type=cache,target=/var/cache/apt \
  6. --mount=type=cache,target=/var/lib/apt/lists \
  7. apt update -y && \
  8. apt upgrade -y && \
  9. apt install -y --no-install-recommends \
  10. git cmake ccache ninja-build \
  11. # WARNING: Do not use libopenblas-openmp-dev. libopenblas-dev is faster.
  12. libopenblas-dev libcurl4-openssl-dev && \
  13. rm -rf /var/lib/apt/lists/*
  14. WORKDIR /app
  15. COPY . .
  16. RUN --mount=type=cache,target=/root/.ccache \
  17. --mount=type=cache,target=/app/build \
  18. cmake -S . -B build -G Ninja \
  19. -DCMAKE_BUILD_TYPE=Release \
  20. -DCMAKE_C_COMPILER_LAUNCHER=ccache \
  21. -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
  22. -DLLAMA_BUILD_TESTS=OFF \
  23. -DGGML_BACKEND_DL=OFF \
  24. -DGGML_NATIVE=OFF \
  25. -DGGML_BLAS=ON \
  26. -DGGML_BLAS_VENDOR=OpenBLAS && \
  27. cmake --build build --config Release -j $(nproc) && \
  28. cmake --install build --prefix /opt/llama.cpp
  29. COPY *.py /opt/llama.cpp/bin
  30. COPY .devops/tools.sh /opt/llama.cpp/bin
  31. COPY gguf-py /opt/llama.cpp/gguf-py
  32. COPY requirements.txt /opt/llama.cpp/gguf-py
  33. COPY requirements /opt/llama.cpp/gguf-py/requirements
  34. ### Collect all llama.cpp binaries, libraries and distro libraries
  35. FROM --platform=linux/s390x scratch AS collector
  36. # Copy llama.cpp binaries and libraries
  37. COPY --from=build /opt/llama.cpp/bin /llama.cpp/bin
  38. COPY --from=build /opt/llama.cpp/lib /llama.cpp/lib
  39. COPY --from=build /opt/llama.cpp/gguf-py /llama.cpp/gguf-py
  40. ### Base image
  41. FROM --platform=linux/s390x ubuntu:${UBUNTU_VERSION} AS base
  42. RUN --mount=type=cache,target=/var/cache/apt \
  43. --mount=type=cache,target=/var/lib/apt/lists \
  44. apt update -y && \
  45. apt install -y --no-install-recommends \
  46. # WARNING: Do not use libopenblas-openmp-dev. libopenblas-dev is faster.
  47. curl libgomp1 libopenblas-dev && \
  48. apt autoremove -y && \
  49. apt clean -y && \
  50. rm -rf /tmp/* /var/tmp/* && \
  51. find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \
  52. find /var/cache -type f -delete
  53. # Copy llama.cpp libraries
  54. COPY --from=collector /llama.cpp/lib /usr/lib/s390x-linux-gnu
  55. ### Full
  56. FROM --platform=linux/s390x base AS full
  57. ENV PATH="/root/.cargo/bin:${PATH}"
  58. WORKDIR /app
  59. RUN --mount=type=cache,target=/var/cache/apt \
  60. --mount=type=cache,target=/var/lib/apt/lists \
  61. apt update -y && \
  62. apt install -y \
  63. git cmake libjpeg-dev \
  64. python3 python3-pip python3-dev && \
  65. apt autoremove -y && \
  66. apt clean -y && \
  67. rm -rf /tmp/* /var/tmp/* && \
  68. find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \
  69. find /var/cache -type f -delete
  70. RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
  71. COPY --from=collector /llama.cpp/bin /app
  72. COPY --from=collector /llama.cpp/gguf-py /app/gguf-py
  73. RUN pip install --no-cache-dir --break-system-packages \
  74. -r /app/gguf-py/requirements.txt
  75. ENTRYPOINT [ "/app/tools.sh" ]
  76. ### CLI Only
  77. FROM --platform=linux/s390x base AS light
  78. WORKDIR /llama.cpp/bin
  79. # Copy llama.cpp binaries and libraries
  80. COPY --from=collector /llama.cpp/bin/llama-cli /llama.cpp/bin
  81. ENTRYPOINT [ "/llama.cpp/bin/llama-cli" ]
  82. ### Server
  83. FROM --platform=linux/s390x base AS server
  84. ENV LLAMA_ARG_HOST=0.0.0.0
  85. WORKDIR /llama.cpp/bin
  86. # Copy llama.cpp binaries and libraries
  87. COPY --from=collector /llama.cpp/bin/llama-server /llama.cpp/bin
  88. EXPOSE 8080
  89. ENTRYPOINT [ "/llama.cpp/bin/llama-server" ]