release.yml 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. name: Release
  2. on:
  3. workflow_dispatch: # allows manual triggering
  4. inputs:
  5. create_release:
  6. description: 'Create new release'
  7. required: true
  8. type: boolean
  9. push:
  10. branches:
  11. - master
  12. paths: ['.github/workflows/release.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp']
  13. concurrency:
  14. group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
  15. cancel-in-progress: true
  16. env:
  17. BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
  18. CMAKE_ARGS: "-DLLAMA_BUILD_EXAMPLES=OFF -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=ON -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON"
  19. jobs:
  20. macOS-arm64:
  21. runs-on: macos-14
  22. steps:
  23. - name: Clone
  24. id: checkout
  25. uses: actions/checkout@v4
  26. with:
  27. fetch-depth: 0
  28. - name: ccache
  29. uses: ggml-org/ccache-action@v1.2.16
  30. with:
  31. key: macOS-latest-cmake-arm64
  32. evict-old-files: 1d
  33. - name: Dependencies
  34. id: depends
  35. continue-on-error: true
  36. run: |
  37. brew update
  38. brew install curl
  39. - name: Build
  40. id: cmake_build
  41. run: |
  42. sysctl -a
  43. cmake -B build \
  44. -DCMAKE_INSTALL_RPATH='@loader_path' \
  45. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  46. -DLLAMA_FATAL_WARNINGS=ON \
  47. -DGGML_METAL_USE_BF16=ON \
  48. -DGGML_METAL_EMBED_LIBRARY=ON \
  49. -DGGML_RPC=ON \
  50. ${{ env.CMAKE_ARGS }}
  51. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  52. - name: Determine tag name
  53. id: tag
  54. uses: ./.github/actions/get-tag-name
  55. - name: Pack artifacts
  56. id: pack_artifacts
  57. run: |
  58. cp LICENSE ./build/bin/
  59. zip -y -r llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip ./build/bin/*
  60. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz -s ",./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  61. - name: Upload artifacts (zip)
  62. uses: actions/upload-artifact@v4
  63. with:
  64. path: llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip
  65. name: llama-bin-macos-arm64.zip
  66. - name: Upload artifacts (tar)
  67. uses: actions/upload-artifact@v4
  68. with:
  69. path: llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz
  70. name: llama-bin-macos-arm64.tar.gz
  71. macOS-x64:
  72. runs-on: macos-15-intel
  73. steps:
  74. - name: Clone
  75. id: checkout
  76. uses: actions/checkout@v4
  77. with:
  78. fetch-depth: 0
  79. - name: ccache
  80. uses: ggml-org/ccache-action@v1.2.16
  81. with:
  82. key: macOS-latest-cmake-x64
  83. evict-old-files: 1d
  84. - name: Dependencies
  85. id: depends
  86. continue-on-error: true
  87. run: |
  88. brew update
  89. brew install curl
  90. - name: Build
  91. id: cmake_build
  92. run: |
  93. sysctl -a
  94. # Metal is disabled due to intermittent failures with Github runners not having a GPU:
  95. # https://github.com/ggml-org/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
  96. cmake -B build \
  97. -DCMAKE_INSTALL_RPATH='@loader_path' \
  98. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  99. -DLLAMA_FATAL_WARNINGS=ON \
  100. -DGGML_METAL=OFF \
  101. -DGGML_RPC=ON \
  102. -DCMAKE_OSX_DEPLOYMENT_TARGET=13.3
  103. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  104. - name: Determine tag name
  105. id: tag
  106. uses: ./.github/actions/get-tag-name
  107. - name: Pack artifacts
  108. id: pack_artifacts
  109. run: |
  110. cp LICENSE ./build/bin/
  111. zip -y -r llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip ./build/bin/*
  112. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz -s ",./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  113. - name: Upload artifacts (zip)
  114. uses: actions/upload-artifact@v4
  115. with:
  116. path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip
  117. name: llama-bin-macos-x64.zip
  118. - name: Upload artifacts (tar)
  119. uses: actions/upload-artifact@v4
  120. with:
  121. path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz
  122. name: llama-bin-macos-x64.tar.gz
  123. ubuntu-22-cpu:
  124. strategy:
  125. matrix:
  126. include:
  127. - build: 'x64'
  128. os: ubuntu-22.04
  129. - build: 's390x'
  130. os: ubuntu-24.04-s390x
  131. # GGML_BACKEND_DL and GGML_CPU_ALL_VARIANTS are not currently supported on arm
  132. # - build: 'arm64'
  133. # os: ubuntu-22.04-arm
  134. runs-on: ${{ matrix.os }}
  135. steps:
  136. - name: Clone
  137. id: checkout
  138. uses: actions/checkout@v4
  139. with:
  140. fetch-depth: 0
  141. - name: ccache
  142. uses: ggml-org/ccache-action@v1.2.16
  143. with:
  144. key: ubuntu-cpu-cmake-${{ matrix.build }}
  145. evict-old-files: 1d
  146. - name: Dependencies
  147. id: depends
  148. run: |
  149. sudo apt-get update
  150. sudo apt-get install build-essential libcurl4-openssl-dev
  151. - name: Build
  152. id: cmake_build
  153. run: |
  154. cmake -B build \
  155. -DCMAKE_INSTALL_RPATH='$ORIGIN' \
  156. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  157. -DGGML_BACKEND_DL=ON \
  158. -DGGML_NATIVE=OFF \
  159. -DGGML_CPU_ALL_VARIANTS=ON \
  160. -DLLAMA_FATAL_WARNINGS=ON \
  161. ${{ env.CMAKE_ARGS }}
  162. cmake --build build --config Release -j $(nproc)
  163. - name: Determine tag name
  164. id: tag
  165. uses: ./.github/actions/get-tag-name
  166. - name: Pack artifacts
  167. id: pack_artifacts
  168. run: |
  169. cp LICENSE ./build/bin/
  170. zip -y -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.zip ./build/bin/*
  171. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  172. - name: Upload artifacts (zip)
  173. uses: actions/upload-artifact@v4
  174. with:
  175. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.zip
  176. name: llama-bin-ubuntu-${{ matrix.build }}.zip
  177. - name: Upload artifacts (tar)
  178. uses: actions/upload-artifact@v4
  179. with:
  180. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.tar.gz
  181. name: llama-bin-ubuntu-${{ matrix.build }}.tar.gz
  182. ubuntu-22-vulkan:
  183. runs-on: ubuntu-22.04
  184. steps:
  185. - name: Clone
  186. id: checkout
  187. uses: actions/checkout@v4
  188. with:
  189. fetch-depth: 0
  190. - name: ccache
  191. uses: ggml-org/ccache-action@v1.2.16
  192. with:
  193. key: ubuntu-22-cmake-vulkan
  194. evict-old-files: 1d
  195. - name: Dependencies
  196. id: depends
  197. run: |
  198. wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
  199. sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
  200. sudo apt-get update -y
  201. sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libcurl4-openssl-dev
  202. - name: Build
  203. id: cmake_build
  204. run: |
  205. cmake -B build \
  206. -DCMAKE_INSTALL_RPATH='$ORIGIN' \
  207. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  208. -DGGML_BACKEND_DL=ON \
  209. -DGGML_NATIVE=OFF \
  210. -DGGML_CPU_ALL_VARIANTS=ON \
  211. -DGGML_VULKAN=ON \
  212. ${{ env.CMAKE_ARGS }}
  213. cmake --build build --config Release -j $(nproc)
  214. - name: Determine tag name
  215. id: tag
  216. uses: ./.github/actions/get-tag-name
  217. - name: Pack artifacts
  218. id: pack_artifacts
  219. run: |
  220. cp LICENSE ./build/bin/
  221. zip -y -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip ./build/bin/*
  222. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  223. - name: Upload artifacts (zip)
  224. uses: actions/upload-artifact@v4
  225. with:
  226. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip
  227. name: llama-bin-ubuntu-vulkan-x64.zip
  228. - name: Upload artifacts (tar)
  229. uses: actions/upload-artifact@v4
  230. with:
  231. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz
  232. name: llama-bin-ubuntu-vulkan-x64.tar.gz
  233. windows-cpu:
  234. runs-on: windows-2025
  235. strategy:
  236. matrix:
  237. include:
  238. - arch: 'x64'
  239. - arch: 'arm64'
  240. steps:
  241. - name: Clone
  242. uses: actions/checkout@v4
  243. with:
  244. fetch-depth: 0
  245. - name: ccache
  246. uses: ggml-org/ccache-action@v1.2.16
  247. with:
  248. key: windows-latest-cmake-cpu-${{ matrix.arch }}
  249. variant: ccache
  250. evict-old-files: 1d
  251. - name: Install Ninja
  252. run: |
  253. choco install ninja
  254. - name: libCURL
  255. id: get_libcurl
  256. uses: ./.github/actions/windows-setup-curl
  257. with:
  258. architecture: ${{ matrix.arch == 'x64' && 'win64' || 'win64a' }}
  259. - name: Build
  260. shell: cmd
  261. env:
  262. CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
  263. run: |
  264. call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ${{ matrix.arch == 'x64' && 'x64' || 'amd64_arm64' }}
  265. cmake -S . -B build -G "Ninja Multi-Config" ^
  266. -D CMAKE_TOOLCHAIN_FILE=cmake/${{ matrix.arch }}-windows-llvm.cmake ^
  267. -DGGML_NATIVE=OFF ^
  268. -DGGML_BACKEND_DL=ON ^
  269. -DGGML_CPU_ALL_VARIANTS=${{ matrix.arch == 'x64' && 'ON' || 'OFF' }} ^
  270. -DGGML_OPENMP=ON ^
  271. -DCURL_LIBRARY="%CURL_PATH%/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="%CURL_PATH%/include" ^
  272. ${{ env.CMAKE_ARGS }}
  273. cmake --build build --config Release
  274. - name: Pack artifacts
  275. id: pack_artifacts
  276. env:
  277. CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
  278. run: |
  279. Copy-Item $env:CURL_PATH\bin\libcurl-${{ matrix.arch }}.dll .\build\bin\Release\
  280. Copy-Item "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Redist\MSVC\14.44.35112\debug_nonredist\${{ matrix.arch }}\Microsoft.VC143.OpenMP.LLVM\libomp140.${{ matrix.arch == 'x64' && 'x86_64' || 'aarch64' }}.dll" .\build\bin\Release\
  281. 7z a -snl llama-bin-win-cpu-${{ matrix.arch }}.zip .\build\bin\Release\*
  282. - name: Upload artifacts
  283. uses: actions/upload-artifact@v4
  284. with:
  285. path: llama-bin-win-cpu-${{ matrix.arch }}.zip
  286. name: llama-bin-win-cpu-${{ matrix.arch }}.zip
  287. windows:
  288. runs-on: windows-2025
  289. env:
  290. OPENBLAS_VERSION: 0.3.23
  291. VULKAN_VERSION: 1.4.313.2
  292. strategy:
  293. matrix:
  294. include:
  295. - backend: 'vulkan'
  296. arch: 'x64'
  297. defines: '-DGGML_VULKAN=ON'
  298. target: 'ggml-vulkan'
  299. - backend: 'opencl-adreno'
  300. arch: 'arm64'
  301. defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
  302. target: 'ggml-opencl'
  303. steps:
  304. - name: Clone
  305. id: checkout
  306. uses: actions/checkout@v4
  307. - name: ccache
  308. uses: ggml-org/ccache-action@v1.2.16
  309. with:
  310. key: windows-latest-cmake-${{ matrix.backend }}-${{ matrix.arch }}
  311. variant: ccache
  312. evict-old-files: 1d
  313. - name: Install Vulkan SDK
  314. id: get_vulkan
  315. if: ${{ matrix.backend == 'vulkan' }}
  316. run: |
  317. curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/vulkansdk-windows-X64-${env:VULKAN_VERSION}.exe"
  318. & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install
  319. Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}"
  320. Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin"
  321. - name: Install Ninja
  322. id: install_ninja
  323. run: |
  324. choco install ninja
  325. - name: Install OpenCL Headers and Libs
  326. id: install_opencl
  327. if: ${{ matrix.backend == 'opencl-adreno' && matrix.arch == 'arm64' }}
  328. run: |
  329. git clone https://github.com/KhronosGroup/OpenCL-Headers
  330. cd OpenCL-Headers
  331. cmake -B build `
  332. -DBUILD_TESTING=OFF `
  333. -DOPENCL_HEADERS_BUILD_TESTING=OFF `
  334. -DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF `
  335. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  336. cmake --build build --target install
  337. git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader
  338. cd OpenCL-ICD-Loader
  339. cmake -B build-arm64-release `
  340. -A arm64 `
  341. -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" `
  342. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  343. cmake --build build-arm64-release --target install --config release
  344. - name: Build
  345. id: cmake_build
  346. run: |
  347. cmake -S . -B build ${{ matrix.defines }} -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_BACKEND_DL=ON -DLLAMA_CURL=OFF
  348. cmake --build build --config Release --target ${{ matrix.target }}
  349. - name: Pack artifacts
  350. id: pack_artifacts
  351. run: |
  352. 7z a -snl llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip .\build\bin\Release\${{ matrix.target }}.dll
  353. - name: Upload artifacts
  354. uses: actions/upload-artifact@v4
  355. with:
  356. path: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  357. name: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  358. windows-cuda:
  359. runs-on: windows-2022
  360. strategy:
  361. matrix:
  362. cuda: ['12.4', '13.1']
  363. steps:
  364. - name: Clone
  365. id: checkout
  366. uses: actions/checkout@v4
  367. - name: Install ccache
  368. uses: ggml-org/ccache-action@v1.2.16
  369. with:
  370. key: windows-cuda-${{ matrix.cuda }}
  371. variant: ccache
  372. evict-old-files: 1d
  373. - name: Install Cuda Toolkit
  374. uses: ./.github/actions/windows-setup-cuda
  375. with:
  376. cuda_version: ${{ matrix.cuda }}
  377. - name: Install Ninja
  378. id: install_ninja
  379. run: |
  380. choco install ninja
  381. - name: Build
  382. id: cmake_build
  383. shell: cmd
  384. run: |
  385. call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
  386. cmake -S . -B build -G "Ninja Multi-Config" ^
  387. -DGGML_BACKEND_DL=ON ^
  388. -DGGML_NATIVE=OFF ^
  389. -DGGML_CPU=OFF ^
  390. -DGGML_CUDA=ON ^
  391. -DLLAMA_CURL=OFF
  392. set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1
  393. cmake --build build --config Release -j %NINJA_JOBS% --target ggml-cuda
  394. - name: Pack artifacts
  395. id: pack_artifacts
  396. run: |
  397. 7z a -snl llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip .\build\bin\Release\ggml-cuda.dll
  398. - name: Upload artifacts
  399. uses: actions/upload-artifact@v4
  400. with:
  401. path: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  402. name: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  403. - name: Copy and pack Cuda runtime
  404. run: |
  405. echo "Cuda install location: ${{ env.CUDA_PATH }}"
  406. $dst='.\build\bin\cudart\'
  407. robocopy "${{env.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  408. robocopy "${{env.CUDA_PATH}}\lib" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  409. robocopy "${{env.CUDA_PATH}}\bin\x64" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  410. 7z a cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip $dst\*
  411. - name: Upload Cuda runtime
  412. uses: actions/upload-artifact@v4
  413. with:
  414. path: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  415. name: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  416. windows-sycl:
  417. runs-on: windows-2022
  418. defaults:
  419. run:
  420. shell: bash
  421. env:
  422. WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/24751ead-ddc5-4479-b9e6-f9fe2ff8b9f2/intel-deep-learning-essentials-2025.2.1.25_offline.exe
  423. WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
  424. ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
  425. steps:
  426. - name: Clone
  427. id: checkout
  428. uses: actions/checkout@v4
  429. - name: ccache
  430. uses: ggml-org/ccache-action@v1.2.16
  431. with:
  432. key: windows-latest-cmake-sycl
  433. variant: ccache
  434. evict-old-files: 1d
  435. - name: Install
  436. run: |
  437. scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL
  438. - name: Build
  439. id: cmake_build
  440. shell: cmd
  441. run: |
  442. call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
  443. cmake -G "Ninja" -B build ^
  444. -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx ^
  445. -DCMAKE_BUILD_TYPE=Release ^
  446. -DGGML_BACKEND_DL=ON -DBUILD_SHARED_LIBS=ON ^
  447. -DGGML_CPU=OFF -DGGML_SYCL=ON ^
  448. -DLLAMA_CURL=OFF
  449. cmake --build build --target ggml-sycl -j
  450. - name: Build the release package
  451. id: pack_artifacts
  452. run: |
  453. echo "cp oneAPI running time dll files in ${{ env.ONEAPI_ROOT }} to ./build/bin"
  454. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.5.dll" ./build/bin
  455. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_core.2.dll" ./build/bin
  456. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin
  457. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero.dll" ./build/bin
  458. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero_v2.dll" ./build/bin
  459. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_opencl.dll" ./build/bin
  460. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_loader.dll" ./build/bin
  461. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_win_proxy_loader.dll" ./build/bin
  462. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl8.dll" ./build/bin
  463. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin
  464. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin
  465. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin
  466. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl-ls.exe" ./build/bin
  467. cp "${{ env.ONEAPI_ROOT }}/dnnl/latest/bin/dnnl.dll" ./build/bin
  468. cp "${{ env.ONEAPI_ROOT }}/tbb/latest/bin/tbb12.dll" ./build/bin
  469. cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/tcm.dll" ./build/bin
  470. cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/libhwloc-15.dll" ./build/bin
  471. cp "${{ env.ONEAPI_ROOT }}/umf/latest/bin/umf.dll" ./build/bin
  472. echo "cp oneAPI running time dll files to ./build/bin done"
  473. 7z a -snl llama-bin-win-sycl-x64.zip ./build/bin/*
  474. - name: Upload the release package
  475. uses: actions/upload-artifact@v4
  476. with:
  477. path: llama-bin-win-sycl-x64.zip
  478. name: llama-bin-win-sycl-x64.zip
  479. windows-hip:
  480. runs-on: windows-2022
  481. env:
  482. HIPSDK_INSTALLER_VERSION: "25.Q3"
  483. strategy:
  484. matrix:
  485. include:
  486. - name: "radeon"
  487. gpu_targets: "gfx1151;gfx1200;gfx1201;gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032"
  488. steps:
  489. - name: Clone
  490. id: checkout
  491. uses: actions/checkout@v4
  492. - name: Grab rocWMMA package
  493. id: grab_rocwmma
  494. run: |
  495. curl -o rocwmma.deb "https://repo.radeon.com/rocm/apt/7.0.1/pool/main/r/rocwmma-dev/rocwmma-dev_2.0.0.70001-42~24.04_amd64.deb"
  496. 7z x rocwmma.deb
  497. 7z x data.tar
  498. - name: Cache ROCm Installation
  499. id: cache-rocm
  500. uses: actions/cache@v4
  501. with:
  502. path: C:\Program Files\AMD\ROCm
  503. key: rocm-${{ env.HIPSDK_INSTALLER_VERSION }}-${{ runner.os }}
  504. - name: ccache
  505. uses: ggml-org/ccache-action@v1.2.16
  506. with:
  507. key: windows-latest-cmake-hip-${{ env.HIPSDK_INSTALLER_VERSION }}-${{ matrix.name }}-x64
  508. evict-old-files: 1d
  509. - name: Install ROCm
  510. if: steps.cache-rocm.outputs.cache-hit != 'true'
  511. id: depends
  512. run: |
  513. $ErrorActionPreference = "Stop"
  514. write-host "Downloading AMD HIP SDK Installer"
  515. Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-${{ env.HIPSDK_INSTALLER_VERSION }}-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
  516. write-host "Installing AMD HIP SDK"
  517. $proc = Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -PassThru
  518. $completed = $proc.WaitForExit(600000)
  519. if (-not $completed) {
  520. Write-Error "ROCm installation timed out after 10 minutes. Killing the process"
  521. $proc.Kill()
  522. exit 1
  523. }
  524. if ($proc.ExitCode -ne 0) {
  525. Write-Error "ROCm installation failed with exit code $($proc.ExitCode)"
  526. exit 1
  527. }
  528. write-host "Completed AMD HIP SDK installation"
  529. - name: Verify ROCm
  530. id: verify
  531. run: |
  532. # Find and test ROCm installation
  533. $clangPath = Get-ChildItem 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | Select-Object -First 1
  534. if (-not $clangPath) {
  535. Write-Error "ROCm installation not found"
  536. exit 1
  537. }
  538. & $clangPath.FullName --version
  539. - name: Build
  540. id: cmake_build
  541. run: |
  542. $env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
  543. $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
  544. cmake -G "Unix Makefiles" -B build -S . `
  545. -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" `
  546. -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" `
  547. -DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/opt/rocm-7.0.1/include/ -Wno-ignored-attributes -Wno-nested-anon-types" `
  548. -DCMAKE_BUILD_TYPE=Release `
  549. -DGGML_BACKEND_DL=ON `
  550. -DGGML_NATIVE=OFF `
  551. -DGGML_CPU=OFF `
  552. -DAMDGPU_TARGETS="${{ matrix.gpu_targets }}" `
  553. -DGGML_HIP_ROCWMMA_FATTN=ON `
  554. -DGGML_HIP=ON `
  555. -DLLAMA_CURL=OFF
  556. cmake --build build --target ggml-hip -j ${env:NUMBER_OF_PROCESSORS}
  557. md "build\bin\rocblas\library\"
  558. md "build\bin\hipblaslt\library"
  559. cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\"
  560. cp "${env:HIP_PATH}\bin\hipblaslt.dll" "build\bin\"
  561. cp "${env:HIP_PATH}\bin\rocblas.dll" "build\bin\"
  562. cp "${env:HIP_PATH}\bin\rocblas\library\*" "build\bin\rocblas\library\"
  563. cp "${env:HIP_PATH}\bin\hipblaslt\library\*" "build\bin\hipblaslt\library\"
  564. - name: Pack artifacts
  565. id: pack_artifacts
  566. run: |
  567. 7z a -snl llama-bin-win-hip-${{ matrix.name }}-x64.zip .\build\bin\*
  568. - name: Upload artifacts
  569. uses: actions/upload-artifact@v4
  570. with:
  571. path: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  572. name: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  573. ios-xcode-build:
  574. runs-on: macos-15
  575. steps:
  576. - name: Checkout code
  577. uses: actions/checkout@v4
  578. with:
  579. fetch-depth: 0
  580. - name: Setup Xcode
  581. run: |
  582. sudo xcode-select -s /Applications/Xcode_16.4.app
  583. - name: Build
  584. id: cmake_build
  585. run: |
  586. sysctl -a
  587. cmake -B build -G Xcode \
  588. -DGGML_METAL_USE_BF16=ON \
  589. -DGGML_METAL_EMBED_LIBRARY=ON \
  590. -DLLAMA_CURL=OFF \
  591. -DLLAMA_BUILD_EXAMPLES=OFF \
  592. -DLLAMA_BUILD_TOOLS=OFF \
  593. -DLLAMA_BUILD_TESTS=OFF \
  594. -DLLAMA_BUILD_SERVER=OFF \
  595. -DCMAKE_SYSTEM_NAME=iOS \
  596. -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
  597. -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
  598. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
  599. - name: xcodebuild for swift package
  600. id: xcodebuild
  601. run: |
  602. ./build-xcframework.sh
  603. - name: Build Xcode project
  604. run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build
  605. - name: Determine tag name
  606. id: tag
  607. uses: ./.github/actions/get-tag-name
  608. - name: Pack artifacts
  609. id: pack_artifacts
  610. run: |
  611. zip -y -r llama-${{ steps.tag.outputs.name }}-xcframework.zip build-apple/llama.xcframework
  612. tar -czvf llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz -C build-apple llama.xcframework
  613. - name: Upload artifacts (zip)
  614. uses: actions/upload-artifact@v4
  615. with:
  616. path: llama-${{ steps.tag.outputs.name }}-xcframework.zip
  617. name: llama-${{ steps.tag.outputs.name }}-xcframework.zip
  618. - name: Upload artifacts (tar)
  619. uses: actions/upload-artifact@v4
  620. with:
  621. path: llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz
  622. name: llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz
  623. release:
  624. if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
  625. # Fine-grant permission
  626. # https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
  627. permissions:
  628. contents: write # for creating release
  629. runs-on: ubuntu-latest
  630. needs:
  631. - windows
  632. - windows-cpu
  633. - windows-cuda
  634. - windows-sycl
  635. - windows-hip
  636. - ubuntu-22-cpu
  637. - ubuntu-22-vulkan
  638. - macOS-arm64
  639. - macOS-x64
  640. - ios-xcode-build
  641. steps:
  642. - name: Clone
  643. id: checkout
  644. uses: actions/checkout@v4
  645. with:
  646. fetch-depth: 0
  647. - name: Determine tag name
  648. id: tag
  649. uses: ./.github/actions/get-tag-name
  650. - name: Download artifacts
  651. id: download-artifact
  652. uses: actions/download-artifact@v4
  653. with:
  654. path: ./artifact
  655. merge-multiple: true
  656. - name: Move artifacts
  657. id: move_artifacts
  658. run: |
  659. mkdir -p release
  660. echo "Adding CPU backend files to existing zips..."
  661. for arch in x64 arm64; do
  662. cpu_zip="artifact/llama-bin-win-cpu-${arch}.zip"
  663. temp_dir=$(mktemp -d)
  664. echo "Extracting CPU backend for $arch..."
  665. unzip "$cpu_zip" -d "$temp_dir"
  666. echo "Adding CPU files to $arch zips..."
  667. for target_zip in artifact/llama-bin-win-*-${arch}.zip; do
  668. if [[ "$target_zip" == "$cpu_zip" ]]; then
  669. continue
  670. fi
  671. echo "Adding CPU backend to $(basename "$target_zip")"
  672. realpath_target_zip=$(realpath "$target_zip")
  673. (cd "$temp_dir" && zip -r "$realpath_target_zip" .)
  674. done
  675. rm -rf "$temp_dir"
  676. done
  677. echo "Renaming and moving zips to release..."
  678. for zip_file in artifact/llama-bin-win-*.zip; do
  679. base_name=$(basename "$zip_file" .zip)
  680. zip_name="llama-${{ steps.tag.outputs.name }}-${base_name#llama-}.zip"
  681. echo "Moving $zip_file to release/$zip_name"
  682. mv "$zip_file" "release/$zip_name"
  683. done
  684. echo "Moving other artifacts..."
  685. mv -v artifact/*.zip release
  686. mv -v artifact/*.tar.gz release
  687. - name: Create release
  688. id: create_release
  689. uses: ggml-org/action-create-release@v1
  690. env:
  691. GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
  692. with:
  693. tag_name: ${{ steps.tag.outputs.name }}
  694. body: |
  695. > [!WARNING]
  696. > **Release Format Update**: Linux releases will soon use .tar.gz archives instead of .zip. Please make the necessary changes to your deployment scripts.
  697. <details open>
  698. ${{ github.event.head_commit.message }}
  699. </details>
  700. **macOS/iOS:**
  701. - [macOS Apple Silicon (arm64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz)
  702. - [macOS Intel (x64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz)
  703. - [iOS XCFramework](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz)
  704. **Linux:**
  705. - [Ubuntu x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.tar.gz)
  706. - [Ubuntu x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz)
  707. - [Ubuntu s390x (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-s390x.tar.gz)
  708. **Windows:**
  709. - [Windows x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-x64.zip)
  710. - [Windows arm64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-arm64.zip)
  711. - [Windows x64 (CUDA 12)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-12.4-x64.zip)
  712. - [Windows x64 (CUDA 13)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-13.1-x64.zip)
  713. - [Windows x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-vulkan-x64.zip)
  714. - [Windows x64 (SYCL)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip)
  715. - [Windows x64 (HIP)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-hip-radeon-x64.zip)
  716. - name: Upload release
  717. id: upload_release
  718. uses: actions/github-script@v3
  719. with:
  720. github-token: ${{secrets.GITHUB_TOKEN}}
  721. script: |
  722. const path = require('path');
  723. const fs = require('fs');
  724. const release_id = '${{ steps.create_release.outputs.id }}';
  725. for (let file of await fs.readdirSync('./release')) {
  726. if (path.extname(file) === '.zip' || file.endsWith('.tar.gz')) {
  727. console.log('uploadReleaseAsset', file);
  728. await github.repos.uploadReleaseAsset({
  729. owner: context.repo.owner,
  730. repo: context.repo.repo,
  731. release_id: release_id,
  732. name: file,
  733. data: await fs.readFileSync(`./release/${file}`)
  734. });
  735. }
  736. }