Просмотр исходного кода

build : Fix docker build warnings (#8535) (#8537)

Al Mochkin 1 год назад
Родитель
Сommit
b3283448ce

+ 1 - 1
.devops/full-cuda.Dockerfile

@@ -6,7 +6,7 @@ ARG CUDA_VERSION=11.7.1
 # Target the CUDA build image
 # Target the CUDA build image
 ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
 ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
 
 
-FROM ${BASE_CUDA_DEV_CONTAINER} as build
+FROM ${BASE_CUDA_DEV_CONTAINER} AS build
 
 
 # Unless otherwise specified, we make a fat build.
 # Unless otherwise specified, we make a fat build.
 ARG CUDA_DOCKER_ARCH=all
 ARG CUDA_DOCKER_ARCH=all

+ 1 - 1
.devops/full-rocm.Dockerfile

@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
 # Target the CUDA build image
 # Target the CUDA build image
 ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
 ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
 
 
-FROM ${BASE_ROCM_DEV_CONTAINER} as build
+FROM ${BASE_ROCM_DEV_CONTAINER} AS build
 
 
 # Unless otherwise specified, we make a fat build.
 # Unless otherwise specified, we make a fat build.
 # List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
 # List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878

+ 1 - 1
.devops/full.Dockerfile

@@ -1,6 +1,6 @@
 ARG UBUNTU_VERSION=22.04
 ARG UBUNTU_VERSION=22.04
 
 
-FROM ubuntu:$UBUNTU_VERSION as build
+FROM ubuntu:$UBUNTU_VERSION AS build
 
 
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1
     apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1

+ 2 - 2
.devops/llama-cli-cuda.Dockerfile

@@ -6,7 +6,7 @@ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VER
 # Target the CUDA runtime image
 # Target the CUDA runtime image
 ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
 ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
 
 
-FROM ${BASE_CUDA_DEV_CONTAINER} as build
+FROM ${BASE_CUDA_DEV_CONTAINER} AS build
 
 
 # Unless otherwise specified, we make a fat build.
 # Unless otherwise specified, we make a fat build.
 ARG CUDA_DOCKER_ARCH=all
 ARG CUDA_DOCKER_ARCH=all
@@ -25,7 +25,7 @@ ENV GGML_CUDA=1
 
 
 RUN make -j$(nproc) llama-cli
 RUN make -j$(nproc) llama-cli
 
 
-FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
+FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
 
 
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y libgomp1
     apt-get install -y libgomp1

+ 2 - 2
.devops/llama-cli-intel.Dockerfile

@@ -1,6 +1,6 @@
 ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
 ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
 
 
-FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
+FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
 
 
 ARG GGML_SYCL_F16=OFF
 ARG GGML_SYCL_F16=OFF
 RUN apt-get update && \
 RUN apt-get update && \
@@ -17,7 +17,7 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
     cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
     cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
     cmake --build build --config Release --target llama-cli
     cmake --build build --config Release --target llama-cli
 
 
-FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
+FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
 
 
 COPY --from=build /app/build/bin/llama-cli /llama-cli
 COPY --from=build /app/build/bin/llama-cli /llama-cli
 
 

+ 1 - 1
.devops/llama-cli-rocm.Dockerfile

@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
 # Target the CUDA build image
 # Target the CUDA build image
 ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
 ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
 
 
-FROM ${BASE_ROCM_DEV_CONTAINER} as build
+FROM ${BASE_ROCM_DEV_CONTAINER} AS build
 
 
 # Unless otherwise specified, we make a fat build.
 # Unless otherwise specified, we make a fat build.
 # List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
 # List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878

+ 1 - 1
.devops/llama-cli-vulkan.Dockerfile

@@ -1,6 +1,6 @@
 ARG UBUNTU_VERSION=jammy
 ARG UBUNTU_VERSION=jammy
 
 
-FROM ubuntu:$UBUNTU_VERSION as build
+FROM ubuntu:$UBUNTU_VERSION AS build
 
 
 # Install build tools
 # Install build tools
 RUN apt update && apt install -y git build-essential cmake wget libgomp1
 RUN apt update && apt install -y git build-essential cmake wget libgomp1

+ 2 - 2
.devops/llama-cli.Dockerfile

@@ -1,6 +1,6 @@
 ARG UBUNTU_VERSION=22.04
 ARG UBUNTU_VERSION=22.04
 
 
-FROM ubuntu:$UBUNTU_VERSION as build
+FROM ubuntu:$UBUNTU_VERSION AS build
 
 
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y build-essential git
     apt-get install -y build-essential git
@@ -11,7 +11,7 @@ COPY . .
 
 
 RUN make -j$(nproc) llama-cli
 RUN make -j$(nproc) llama-cli
 
 
-FROM ubuntu:$UBUNTU_VERSION as runtime
+FROM ubuntu:$UBUNTU_VERSION AS runtime
 
 
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y libgomp1
     apt-get install -y libgomp1

+ 2 - 2
.devops/llama-server-cuda.Dockerfile

@@ -6,7 +6,7 @@ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VER
 # Target the CUDA runtime image
 # Target the CUDA runtime image
 ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
 ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
 
 
-FROM ${BASE_CUDA_DEV_CONTAINER} as build
+FROM ${BASE_CUDA_DEV_CONTAINER} AS build
 
 
 # Unless otherwise specified, we make a fat build.
 # Unless otherwise specified, we make a fat build.
 ARG CUDA_DOCKER_ARCH=all
 ARG CUDA_DOCKER_ARCH=all
@@ -27,7 +27,7 @@ ENV LLAMA_CURL=1
 
 
 RUN make -j$(nproc) llama-server
 RUN make -j$(nproc) llama-server
 
 
-FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
+FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
 
 
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y libcurl4-openssl-dev libgomp1 curl
     apt-get install -y libcurl4-openssl-dev libgomp1 curl

+ 2 - 2
.devops/llama-server-intel.Dockerfile

@@ -1,6 +1,6 @@
 ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
 ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
 
 
-FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
+FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
 
 
 ARG GGML_SYCL_F16=OFF
 ARG GGML_SYCL_F16=OFF
 RUN apt-get update && \
 RUN apt-get update && \
@@ -17,7 +17,7 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
     cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
     cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
     cmake --build build --config Release --target llama-server
     cmake --build build --config Release --target llama-server
 
 
-FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
+FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
 
 
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y libcurl4-openssl-dev curl
     apt-get install -y libcurl4-openssl-dev curl

+ 1 - 1
.devops/llama-server-rocm.Dockerfile

@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
 # Target the CUDA build image
 # Target the CUDA build image
 ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
 ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
 
 
-FROM ${BASE_ROCM_DEV_CONTAINER} as build
+FROM ${BASE_ROCM_DEV_CONTAINER} AS build
 
 
 # Unless otherwise specified, we make a fat build.
 # Unless otherwise specified, we make a fat build.
 # List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
 # List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878

+ 1 - 1
.devops/llama-server-vulkan.Dockerfile

@@ -1,6 +1,6 @@
 ARG UBUNTU_VERSION=jammy
 ARG UBUNTU_VERSION=jammy
 
 
-FROM ubuntu:$UBUNTU_VERSION as build
+FROM ubuntu:$UBUNTU_VERSION AS build
 
 
 # Install build tools
 # Install build tools
 RUN apt update && apt install -y git build-essential cmake wget
 RUN apt update && apt install -y git build-essential cmake wget

+ 2 - 2
.devops/llama-server.Dockerfile

@@ -1,6 +1,6 @@
 ARG UBUNTU_VERSION=22.04
 ARG UBUNTU_VERSION=22.04
 
 
-FROM ubuntu:$UBUNTU_VERSION as build
+FROM ubuntu:$UBUNTU_VERSION AS build
 
 
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y build-essential git libcurl4-openssl-dev curl
     apt-get install -y build-essential git libcurl4-openssl-dev curl
@@ -13,7 +13,7 @@ ENV LLAMA_CURL=1
 
 
 RUN make -j$(nproc) llama-server
 RUN make -j$(nproc) llama-server
 
 
-FROM ubuntu:$UBUNTU_VERSION as runtime
+FROM ubuntu:$UBUNTU_VERSION AS runtime
 
 
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y libcurl4-openssl-dev libgomp1
     apt-get install -y libcurl4-openssl-dev libgomp1