release.yml 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889
  1. name: Release
  2. on:
  3. workflow_dispatch: # allows manual triggering
  4. inputs:
  5. create_release:
  6. description: 'Create new release'
  7. required: true
  8. type: boolean
  9. push:
  10. branches:
  11. - master
  12. paths: ['.github/workflows/release.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp']
  13. concurrency:
  14. group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
  15. cancel-in-progress: true
  16. env:
  17. BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
  18. CMAKE_ARGS: "-DLLAMA_BUILD_EXAMPLES=OFF -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=ON -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON"
  19. jobs:
  20. macOS-arm64:
  21. runs-on: macos-14
  22. steps:
  23. - name: Clone
  24. id: checkout
  25. uses: actions/checkout@v4
  26. with:
  27. fetch-depth: 0
  28. - name: ccache
  29. uses: ggml-org/ccache-action@v1.2.16
  30. with:
  31. key: macOS-latest-cmake-arm64
  32. evict-old-files: 1d
  33. - name: Build
  34. id: cmake_build
  35. run: |
  36. sysctl -a
  37. cmake -B build \
  38. -DCMAKE_INSTALL_RPATH='@loader_path' \
  39. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  40. -DLLAMA_FATAL_WARNINGS=ON \
  41. -DLLAMA_BUILD_BORINGSSL=ON \
  42. -DGGML_METAL_USE_BF16=ON \
  43. -DGGML_METAL_EMBED_LIBRARY=ON \
  44. -DGGML_RPC=ON \
  45. ${{ env.CMAKE_ARGS }}
  46. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  47. - name: Determine tag name
  48. id: tag
  49. uses: ./.github/actions/get-tag-name
  50. - name: Pack artifacts
  51. id: pack_artifacts
  52. run: |
  53. cp LICENSE ./build/bin/
  54. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz -s ",./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  55. - name: Upload artifacts
  56. uses: actions/upload-artifact@v4
  57. with:
  58. path: llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz
  59. name: llama-bin-macos-arm64.tar.gz
  60. macOS-x64:
  61. runs-on: macos-15-intel
  62. steps:
  63. - name: Clone
  64. id: checkout
  65. uses: actions/checkout@v4
  66. with:
  67. fetch-depth: 0
  68. - name: ccache
  69. uses: ggml-org/ccache-action@v1.2.16
  70. with:
  71. key: macOS-latest-cmake-x64
  72. evict-old-files: 1d
  73. - name: Build
  74. id: cmake_build
  75. run: |
  76. sysctl -a
  77. # Metal is disabled due to intermittent failures with Github runners not having a GPU:
  78. # https://github.com/ggml-org/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
  79. cmake -B build \
  80. -DCMAKE_INSTALL_RPATH='@loader_path' \
  81. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  82. -DLLAMA_FATAL_WARNINGS=ON \
  83. -DLLAMA_BUILD_BORINGSSL=ON \
  84. -DGGML_METAL=OFF \
  85. -DGGML_RPC=ON \
  86. -DCMAKE_OSX_DEPLOYMENT_TARGET=13.3
  87. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  88. - name: Determine tag name
  89. id: tag
  90. uses: ./.github/actions/get-tag-name
  91. - name: Pack artifacts
  92. id: pack_artifacts
  93. run: |
  94. cp LICENSE ./build/bin/
  95. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz -s ",./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  96. - name: Upload artifacts
  97. uses: actions/upload-artifact@v4
  98. with:
  99. path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz
  100. name: llama-bin-macos-x64.tar.gz
  101. ubuntu-22-cpu:
  102. strategy:
  103. matrix:
  104. include:
  105. - build: 'x64'
  106. os: ubuntu-22.04
  107. - build: 's390x'
  108. os: ubuntu-24.04-s390x
  109. # GGML_BACKEND_DL and GGML_CPU_ALL_VARIANTS are not currently supported on arm
  110. # - build: 'arm64'
  111. # os: ubuntu-22.04-arm
  112. runs-on: ${{ matrix.os }}
  113. steps:
  114. - name: Clone
  115. id: checkout
  116. uses: actions/checkout@v4
  117. with:
  118. fetch-depth: 0
  119. - name: ccache
  120. uses: ggml-org/ccache-action@v1.2.16
  121. with:
  122. key: ubuntu-cpu-cmake-${{ matrix.build }}
  123. evict-old-files: 1d
  124. - name: Dependencies
  125. id: depends
  126. run: |
  127. sudo apt-get update
  128. sudo apt-get install build-essential libssl-dev
  129. - name: Build
  130. id: cmake_build
  131. run: |
  132. cmake -B build \
  133. -DCMAKE_INSTALL_RPATH='$ORIGIN' \
  134. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  135. -DGGML_BACKEND_DL=ON \
  136. -DGGML_NATIVE=OFF \
  137. -DGGML_CPU_ALL_VARIANTS=ON \
  138. -DLLAMA_FATAL_WARNINGS=ON \
  139. ${{ env.CMAKE_ARGS }}
  140. cmake --build build --config Release -j $(nproc)
  141. - name: Determine tag name
  142. id: tag
  143. uses: ./.github/actions/get-tag-name
  144. - name: Pack artifacts
  145. id: pack_artifacts
  146. run: |
  147. cp LICENSE ./build/bin/
  148. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  149. - name: Upload artifacts
  150. uses: actions/upload-artifact@v4
  151. with:
  152. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.tar.gz
  153. name: llama-bin-ubuntu-${{ matrix.build }}.tar.gz
  154. ubuntu-22-vulkan:
  155. runs-on: ubuntu-22.04
  156. steps:
  157. - name: Clone
  158. id: checkout
  159. uses: actions/checkout@v4
  160. with:
  161. fetch-depth: 0
  162. - name: ccache
  163. uses: ggml-org/ccache-action@v1.2.16
  164. with:
  165. key: ubuntu-22-cmake-vulkan
  166. evict-old-files: 1d
  167. - name: Dependencies
  168. id: depends
  169. run: |
  170. wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
  171. sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
  172. sudo apt-get update -y
  173. sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libssl-dev
  174. - name: Build
  175. id: cmake_build
  176. run: |
  177. cmake -B build \
  178. -DCMAKE_INSTALL_RPATH='$ORIGIN' \
  179. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  180. -DGGML_BACKEND_DL=ON \
  181. -DGGML_NATIVE=OFF \
  182. -DGGML_CPU_ALL_VARIANTS=ON \
  183. -DGGML_VULKAN=ON \
  184. ${{ env.CMAKE_ARGS }}
  185. cmake --build build --config Release -j $(nproc)
  186. - name: Determine tag name
  187. id: tag
  188. uses: ./.github/actions/get-tag-name
  189. - name: Pack artifacts
  190. id: pack_artifacts
  191. run: |
  192. cp LICENSE ./build/bin/
  193. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  194. - name: Upload artifacts
  195. uses: actions/upload-artifact@v4
  196. with:
  197. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz
  198. name: llama-bin-ubuntu-vulkan-x64.tar.gz
  199. windows-cpu:
  200. runs-on: windows-2025
  201. strategy:
  202. matrix:
  203. include:
  204. - arch: 'x64'
  205. - arch: 'arm64'
  206. steps:
  207. - name: Clone
  208. uses: actions/checkout@v4
  209. with:
  210. fetch-depth: 0
  211. - name: ccache
  212. uses: ggml-org/ccache-action@v1.2.16
  213. with:
  214. key: windows-latest-cmake-cpu-${{ matrix.arch }}
  215. variant: ccache
  216. evict-old-files: 1d
  217. - name: Install Ninja
  218. run: |
  219. choco install ninja
  220. - name: Build
  221. shell: cmd
  222. run: |
  223. call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ${{ matrix.arch == 'x64' && 'x64' || 'amd64_arm64' }}
  224. cmake -S . -B build -G "Ninja Multi-Config" ^
  225. -D CMAKE_TOOLCHAIN_FILE=cmake/${{ matrix.arch }}-windows-llvm.cmake ^
  226. -DLLAMA_BUILD_BORINGSSL=ON ^
  227. -DGGML_NATIVE=OFF ^
  228. -DGGML_BACKEND_DL=ON ^
  229. -DGGML_CPU_ALL_VARIANTS=${{ matrix.arch == 'x64' && 'ON' || 'OFF' }} ^
  230. -DGGML_OPENMP=ON ^
  231. ${{ env.CMAKE_ARGS }}
  232. cmake --build build --config Release
  233. - name: Pack artifacts
  234. id: pack_artifacts
  235. run: |
  236. Copy-Item "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Redist\MSVC\14.44.35112\debug_nonredist\${{ matrix.arch }}\Microsoft.VC143.OpenMP.LLVM\libomp140.${{ matrix.arch == 'x64' && 'x86_64' || 'aarch64' }}.dll" .\build\bin\Release\
  237. 7z a -snl llama-bin-win-cpu-${{ matrix.arch }}.zip .\build\bin\Release\*
  238. - name: Upload artifacts
  239. uses: actions/upload-artifact@v4
  240. with:
  241. path: llama-bin-win-cpu-${{ matrix.arch }}.zip
  242. name: llama-bin-win-cpu-${{ matrix.arch }}.zip
  243. windows:
  244. runs-on: windows-2025
  245. env:
  246. OPENBLAS_VERSION: 0.3.23
  247. VULKAN_VERSION: 1.4.313.2
  248. strategy:
  249. matrix:
  250. include:
  251. - backend: 'vulkan'
  252. arch: 'x64'
  253. defines: '-DGGML_VULKAN=ON'
  254. target: 'ggml-vulkan'
  255. - backend: 'opencl-adreno'
  256. arch: 'arm64'
  257. defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
  258. target: 'ggml-opencl'
  259. steps:
  260. - name: Clone
  261. id: checkout
  262. uses: actions/checkout@v4
  263. - name: ccache
  264. uses: ggml-org/ccache-action@v1.2.16
  265. with:
  266. key: windows-latest-cmake-${{ matrix.backend }}-${{ matrix.arch }}
  267. variant: ccache
  268. evict-old-files: 1d
  269. - name: Install Vulkan SDK
  270. id: get_vulkan
  271. if: ${{ matrix.backend == 'vulkan' }}
  272. run: |
  273. curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/vulkansdk-windows-X64-${env:VULKAN_VERSION}.exe"
  274. & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install
  275. Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}"
  276. Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin"
  277. - name: Install Ninja
  278. id: install_ninja
  279. run: |
  280. choco install ninja
  281. - name: Install OpenCL Headers and Libs
  282. id: install_opencl
  283. if: ${{ matrix.backend == 'opencl-adreno' && matrix.arch == 'arm64' }}
  284. run: |
  285. git clone https://github.com/KhronosGroup/OpenCL-Headers
  286. cd OpenCL-Headers
  287. cmake -B build `
  288. -DBUILD_TESTING=OFF `
  289. -DOPENCL_HEADERS_BUILD_TESTING=OFF `
  290. -DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF `
  291. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  292. cmake --build build --target install
  293. git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader
  294. cd OpenCL-ICD-Loader
  295. cmake -B build-arm64-release `
  296. -A arm64 `
  297. -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" `
  298. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  299. cmake --build build-arm64-release --target install --config release
  300. - name: Build
  301. id: cmake_build
  302. run: |
  303. cmake -S . -B build ${{ matrix.defines }} -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_BACKEND_DL=ON -DLLAMA_BUILD_BORINGSSL=ON
  304. cmake --build build --config Release --target ${{ matrix.target }}
  305. - name: Pack artifacts
  306. id: pack_artifacts
  307. run: |
  308. 7z a -snl llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip .\build\bin\Release\${{ matrix.target }}.dll
  309. - name: Upload artifacts
  310. uses: actions/upload-artifact@v4
  311. with:
  312. path: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  313. name: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  314. windows-cuda:
  315. runs-on: windows-2022
  316. strategy:
  317. matrix:
  318. cuda: ['12.4', '13.1']
  319. steps:
  320. - name: Clone
  321. id: checkout
  322. uses: actions/checkout@v4
  323. - name: Install ccache
  324. uses: ggml-org/ccache-action@v1.2.16
  325. with:
  326. key: windows-cuda-${{ matrix.cuda }}
  327. variant: ccache
  328. evict-old-files: 1d
  329. - name: Install Cuda Toolkit
  330. uses: ./.github/actions/windows-setup-cuda
  331. with:
  332. cuda_version: ${{ matrix.cuda }}
  333. - name: Install Ninja
  334. id: install_ninja
  335. run: |
  336. choco install ninja
  337. - name: Build
  338. id: cmake_build
  339. shell: cmd
  340. # TODO: Remove GGML_CUDA_CUB_3DOT2 flag once CCCL 3.2 is bundled within CTK and that CTK version is used in this project
  341. run: |
  342. call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
  343. cmake -S . -B build -G "Ninja Multi-Config" ^
  344. -DGGML_BACKEND_DL=ON ^
  345. -DGGML_NATIVE=OFF ^
  346. -DGGML_CPU=OFF ^
  347. -DGGML_CUDA=ON ^
  348. -DLLAMA_BUILD_BORINGSSL=ON ^
  349. -DGGML_CUDA_CUB_3DOT2=ON
  350. set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1
  351. cmake --build build --config Release -j %NINJA_JOBS% --target ggml-cuda
  352. - name: Pack artifacts
  353. id: pack_artifacts
  354. run: |
  355. 7z a -snl llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip .\build\bin\Release\ggml-cuda.dll
  356. - name: Upload artifacts
  357. uses: actions/upload-artifact@v4
  358. with:
  359. path: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  360. name: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  361. - name: Copy and pack Cuda runtime
  362. run: |
  363. echo "Cuda install location: ${{ env.CUDA_PATH }}"
  364. $dst='.\build\bin\cudart\'
  365. robocopy "${{env.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  366. robocopy "${{env.CUDA_PATH}}\lib" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  367. robocopy "${{env.CUDA_PATH}}\bin\x64" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  368. 7z a cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip $dst\*
  369. - name: Upload Cuda runtime
  370. uses: actions/upload-artifact@v4
  371. with:
  372. path: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  373. name: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  374. windows-sycl:
  375. runs-on: windows-2022
  376. defaults:
  377. run:
  378. shell: bash
  379. env:
  380. WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/24751ead-ddc5-4479-b9e6-f9fe2ff8b9f2/intel-deep-learning-essentials-2025.2.1.25_offline.exe
  381. WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
  382. ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
  383. steps:
  384. - name: Clone
  385. id: checkout
  386. uses: actions/checkout@v4
  387. - name: ccache
  388. uses: ggml-org/ccache-action@v1.2.16
  389. with:
  390. key: windows-latest-cmake-sycl
  391. variant: ccache
  392. evict-old-files: 1d
  393. - name: Install
  394. run: |
  395. scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL
  396. - name: Build
  397. id: cmake_build
  398. shell: cmd
  399. run: |
  400. call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
  401. cmake -G "Ninja" -B build ^
  402. -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx ^
  403. -DCMAKE_BUILD_TYPE=Release ^
  404. -DGGML_BACKEND_DL=ON -DBUILD_SHARED_LIBS=ON ^
  405. -DGGML_CPU=OFF -DGGML_SYCL=ON ^
  406. -DLLAMA_BUILD_BORINGSSL=ON
  407. cmake --build build --target ggml-sycl -j
  408. - name: Build the release package
  409. id: pack_artifacts
  410. run: |
  411. echo "cp oneAPI running time dll files in ${{ env.ONEAPI_ROOT }} to ./build/bin"
  412. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.5.dll" ./build/bin
  413. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_core.2.dll" ./build/bin
  414. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin
  415. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero.dll" ./build/bin
  416. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero_v2.dll" ./build/bin
  417. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_opencl.dll" ./build/bin
  418. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_loader.dll" ./build/bin
  419. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_win_proxy_loader.dll" ./build/bin
  420. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl8.dll" ./build/bin
  421. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin
  422. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin
  423. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin
  424. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl-ls.exe" ./build/bin
  425. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libsycl-fallback-bfloat16.spv" ./build/bin
  426. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libsycl-native-bfloat16.spv" ./build/bin
  427. cp "${{ env.ONEAPI_ROOT }}/dnnl/latest/bin/dnnl.dll" ./build/bin
  428. cp "${{ env.ONEAPI_ROOT }}/tbb/latest/bin/tbb12.dll" ./build/bin
  429. cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/tcm.dll" ./build/bin
  430. cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/libhwloc-15.dll" ./build/bin
  431. cp "${{ env.ONEAPI_ROOT }}/umf/latest/bin/umf.dll" ./build/bin
  432. echo "cp oneAPI running time dll files to ./build/bin done"
  433. 7z a -snl llama-bin-win-sycl-x64.zip ./build/bin/*
  434. - name: Upload the release package
  435. uses: actions/upload-artifact@v4
  436. with:
  437. path: llama-bin-win-sycl-x64.zip
  438. name: llama-bin-win-sycl-x64.zip
  439. windows-hip:
  440. runs-on: windows-2022
  441. env:
  442. HIPSDK_INSTALLER_VERSION: "25.Q3"
  443. strategy:
  444. matrix:
  445. include:
  446. - name: "radeon"
  447. gpu_targets: "gfx1151;gfx1200;gfx1201;gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032"
  448. steps:
  449. - name: Clone
  450. id: checkout
  451. uses: actions/checkout@v4
  452. - name: Grab rocWMMA package
  453. id: grab_rocwmma
  454. run: |
  455. curl -o rocwmma.deb "https://repo.radeon.com/rocm/apt/7.0.1/pool/main/r/rocwmma-dev/rocwmma-dev_2.0.0.70001-42~24.04_amd64.deb"
  456. 7z x rocwmma.deb
  457. 7z x data.tar
  458. - name: Cache ROCm Installation
  459. id: cache-rocm
  460. uses: actions/cache@v4
  461. with:
  462. path: C:\Program Files\AMD\ROCm
  463. key: rocm-${{ env.HIPSDK_INSTALLER_VERSION }}-${{ runner.os }}
  464. - name: ccache
  465. uses: ggml-org/ccache-action@v1.2.16
  466. with:
  467. key: windows-latest-cmake-hip-${{ env.HIPSDK_INSTALLER_VERSION }}-${{ matrix.name }}-x64
  468. evict-old-files: 1d
  469. - name: Install ROCm
  470. if: steps.cache-rocm.outputs.cache-hit != 'true'
  471. id: depends
  472. run: |
  473. $ErrorActionPreference = "Stop"
  474. write-host "Downloading AMD HIP SDK Installer"
  475. Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-${{ env.HIPSDK_INSTALLER_VERSION }}-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
  476. write-host "Installing AMD HIP SDK"
  477. $proc = Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -PassThru
  478. $completed = $proc.WaitForExit(600000)
  479. if (-not $completed) {
  480. Write-Error "ROCm installation timed out after 10 minutes. Killing the process"
  481. $proc.Kill()
  482. exit 1
  483. }
  484. if ($proc.ExitCode -ne 0) {
  485. Write-Error "ROCm installation failed with exit code $($proc.ExitCode)"
  486. exit 1
  487. }
  488. write-host "Completed AMD HIP SDK installation"
  489. - name: Verify ROCm
  490. id: verify
  491. run: |
  492. # Find and test ROCm installation
  493. $clangPath = Get-ChildItem 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | Select-Object -First 1
  494. if (-not $clangPath) {
  495. Write-Error "ROCm installation not found"
  496. exit 1
  497. }
  498. & $clangPath.FullName --version
  499. - name: Build
  500. id: cmake_build
  501. run: |
  502. $env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
  503. $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
  504. cmake -G "Unix Makefiles" -B build -S . `
  505. -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" `
  506. -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" `
  507. -DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/opt/rocm-7.0.1/include/ -Wno-ignored-attributes -Wno-nested-anon-types" `
  508. -DCMAKE_BUILD_TYPE=Release `
  509. -DGGML_BACKEND_DL=ON `
  510. -DGGML_NATIVE=OFF `
  511. -DGGML_CPU=OFF `
  512. -DAMDGPU_TARGETS="${{ matrix.gpu_targets }}" `
  513. -DGGML_HIP_ROCWMMA_FATTN=ON `
  514. -DGGML_HIP=ON `
  515. -DLLAMA_BUILD_BORINGSSL=ON
  516. cmake --build build --target ggml-hip -j ${env:NUMBER_OF_PROCESSORS}
  517. md "build\bin\rocblas\library\"
  518. md "build\bin\hipblaslt\library"
  519. cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\"
  520. cp "${env:HIP_PATH}\bin\hipblaslt.dll" "build\bin\"
  521. cp "${env:HIP_PATH}\bin\rocblas.dll" "build\bin\"
  522. cp "${env:HIP_PATH}\bin\rocblas\library\*" "build\bin\rocblas\library\"
  523. cp "${env:HIP_PATH}\bin\hipblaslt\library\*" "build\bin\hipblaslt\library\"
  524. - name: Pack artifacts
  525. id: pack_artifacts
  526. run: |
  527. 7z a -snl llama-bin-win-hip-${{ matrix.name }}-x64.zip .\build\bin\*
  528. - name: Upload artifacts
  529. uses: actions/upload-artifact@v4
  530. with:
  531. path: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  532. name: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  533. ios-xcode-build:
  534. runs-on: macos-15
  535. steps:
  536. - name: Checkout code
  537. uses: actions/checkout@v4
  538. with:
  539. fetch-depth: 0
  540. - name: Setup Xcode
  541. run: |
  542. sudo xcode-select -s /Applications/Xcode_16.4.app
  543. - name: Build
  544. id: cmake_build
  545. run: |
  546. sysctl -a
  547. cmake -B build -G Xcode \
  548. -DGGML_METAL_USE_BF16=ON \
  549. -DGGML_METAL_EMBED_LIBRARY=ON \
  550. -DLLAMA_OPENSSL=OFF \
  551. -DLLAMA_BUILD_EXAMPLES=OFF \
  552. -DLLAMA_BUILD_TOOLS=OFF \
  553. -DLLAMA_BUILD_TESTS=OFF \
  554. -DLLAMA_BUILD_SERVER=OFF \
  555. -DCMAKE_SYSTEM_NAME=iOS \
  556. -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
  557. -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
  558. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
  559. - name: xcodebuild for swift package
  560. id: xcodebuild
  561. run: |
  562. ./build-xcframework.sh
  563. - name: Build Xcode project
  564. run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build
  565. - name: Determine tag name
  566. id: tag
  567. uses: ./.github/actions/get-tag-name
  568. - name: Pack artifacts
  569. id: pack_artifacts
  570. run: |
  571. # Zip file is required for Swift Package Manager, which does not support tar.gz for binary targets.
  572. # For more details, see https://developer.apple.com/documentation/xcode/distributing-binary-frameworks-as-swift-packages
  573. zip -r -y llama-${{ steps.tag.outputs.name }}-xcframework.zip build-apple/llama.xcframework
  574. - name: Upload artifacts
  575. uses: actions/upload-artifact@v4
  576. with:
  577. path: llama-${{ steps.tag.outputs.name }}-xcframework.zip
  578. name: llama-${{ steps.tag.outputs.name }}-xcframework.zip
  579. openEuler-cann:
  580. strategy:
  581. matrix:
  582. arch: [x86, aarch64]
  583. chip_type: ['910b', '310p']
  584. build: ['Release']
  585. runs-on: ${{ matrix.arch == 'aarch64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }}
  586. steps:
  587. - name: Checkout
  588. uses: actions/checkout@v4
  589. with:
  590. fetch-depth: 0
  591. - name: Free up disk space
  592. uses: ggml-org/free-disk-space@v1.3.1
  593. with:
  594. tool-cache: true
  595. - name: Set container image
  596. id: cann-image
  597. run: |
  598. image="ascendai/cann:${{ matrix.chip_type == '910b' && '8.3.rc2-910b-openeuler24.03-py3.11' || '8.3.rc2-310p-openeuler24.03-py3.11' }}"
  599. echo "image=${image}" >> "${GITHUB_OUTPUT}"
  600. - name: Pull container image
  601. run: docker pull "${{ steps.cann-image.outputs.image }}"
  602. - name: Build
  603. env:
  604. BUILD_TYPE: ${{ matrix.build }}
  605. SOC_TYPE: ascend${{ matrix.chip_type }}
  606. run: |
  607. HOST_UID=$(id -u)
  608. HOST_GID=$(id -g)
  609. docker run --rm \
  610. -v "${PWD}:/workspace" \
  611. -w /workspace \
  612. -e SOC_TYPE=${SOC_TYPE} \
  613. -e BUILD_TYPE=${BUILD_TYPE} \
  614. "${{ steps.cann-image.outputs.image }}" \
  615. bash -lc '
  616. set -e
  617. yum install -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs git gcc gcc-c++ make cmake openssl-devel
  618. yum clean all && rm -rf /var/cache/yum
  619. git config --global --add safe.directory "/workspace"
  620. export LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/$(uname -m)-linux/devlib/:${LD_LIBRARY_PATH}
  621. cmake -S . -B build \
  622. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
  623. -DGGML_CANN=on \
  624. -DSOC_TYPE=${SOC_TYPE}
  625. cmake --build build -j $(nproc)
  626. chown -R '"${HOST_UID}"':'"${HOST_GID}"' /workspace/build
  627. '
  628. - name: Determine tag name
  629. id: tag
  630. uses: ./.github/actions/get-tag-name
  631. - name: Pack artifacts
  632. run: |
  633. cp LICENSE ./build/bin/
  634. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-${{ matrix.chip_type }}-openEuler-${{ matrix.arch }}.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  635. - name: Upload artifacts
  636. uses: actions/upload-artifact@v4
  637. with:
  638. path: llama-${{ steps.tag.outputs.name }}-bin-${{ matrix.chip_type }}-openEuler-${{ matrix.arch }}.tar.gz
  639. name: llama-bin-${{ matrix.chip_type }}-openEuler-${{ matrix.arch }}.tar.gz
  640. release:
  641. if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
  642. # Fine-grant permission
  643. # https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
  644. permissions:
  645. contents: write # for creating release
  646. runs-on: ubuntu-latest
  647. needs:
  648. - windows
  649. - windows-cpu
  650. - windows-cuda
  651. - windows-sycl
  652. - windows-hip
  653. - ubuntu-22-cpu
  654. - ubuntu-22-vulkan
  655. - macOS-arm64
  656. - macOS-x64
  657. - ios-xcode-build
  658. - openEuler-cann
  659. steps:
  660. - name: Clone
  661. id: checkout
  662. uses: actions/checkout@v4
  663. with:
  664. fetch-depth: 0
  665. - name: Determine tag name
  666. id: tag
  667. uses: ./.github/actions/get-tag-name
  668. - name: Download artifacts
  669. id: download-artifact
  670. uses: actions/download-artifact@v4
  671. with:
  672. path: ./artifact
  673. merge-multiple: true
  674. - name: Move artifacts
  675. id: move_artifacts
  676. run: |
  677. mkdir -p release
  678. echo "Adding CPU backend files to existing zips..."
  679. for arch in x64 arm64; do
  680. cpu_zip="artifact/llama-bin-win-cpu-${arch}.zip"
  681. temp_dir=$(mktemp -d)
  682. echo "Extracting CPU backend for $arch..."
  683. unzip "$cpu_zip" -d "$temp_dir"
  684. echo "Adding CPU files to $arch zips..."
  685. for target_zip in artifact/llama-bin-win-*-${arch}.zip; do
  686. if [[ "$target_zip" == "$cpu_zip" ]]; then
  687. continue
  688. fi
  689. echo "Adding CPU backend to $(basename "$target_zip")"
  690. realpath_target_zip=$(realpath "$target_zip")
  691. (cd "$temp_dir" && zip -r "$realpath_target_zip" .)
  692. done
  693. rm -rf "$temp_dir"
  694. done
  695. echo "Renaming and moving zips to release..."
  696. for zip_file in artifact/llama-bin-win-*.zip; do
  697. base_name=$(basename "$zip_file" .zip)
  698. zip_name="llama-${{ steps.tag.outputs.name }}-${base_name#llama-}.zip"
  699. echo "Moving $zip_file to release/$zip_name"
  700. mv "$zip_file" "release/$zip_name"
  701. done
  702. echo "Moving other artifacts..."
  703. mv -v artifact/*.zip release
  704. mv -v artifact/*.tar.gz release
  705. - name: Create release
  706. id: create_release
  707. uses: ggml-org/action-create-release@v1
  708. env:
  709. GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
  710. with:
  711. tag_name: ${{ steps.tag.outputs.name }}
  712. body: |
  713. <details open>
  714. ${{ github.event.head_commit.message }}
  715. </details>
  716. **macOS/iOS:**
  717. - [macOS Apple Silicon (arm64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz)
  718. - [macOS Intel (x64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz)
  719. - [iOS XCFramework](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-xcframework.zip)
  720. **Linux:**
  721. - [Ubuntu x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.tar.gz)
  722. - [Ubuntu x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz)
  723. - [Ubuntu s390x (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-s390x.tar.gz)
  724. **Windows:**
  725. - [Windows x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-x64.zip)
  726. - [Windows arm64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-arm64.zip)
  727. - [Windows x64 (CUDA 12)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-12.4-x64.zip) - [CUDA 12.4 DLLs](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/cudart-llama-bin-win-cuda-12.4-x64.zip)
  728. - [Windows x64 (CUDA 13)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-13.1-x64.zip) - [CUDA 13.1 DLLs](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/cudart-llama-bin-win-cuda-13.1-x64.zip)
  729. - [Windows x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-vulkan-x64.zip)
  730. - [Windows x64 (SYCL)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip)
  731. - [Windows x64 (HIP)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-hip-radeon-x64.zip)
  732. **openEuler:**
  733. - [openEuler x86 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-x86.tar.gz)
  734. - [openEuler x86 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-x86.tar.gz)
  735. - [openEuler aarch64 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-aarch64.tar.gz)
  736. - [openEuler aarch64 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-aarch64.tar.gz)
  737. - name: Upload release
  738. id: upload_release
  739. uses: actions/github-script@v3
  740. with:
  741. github-token: ${{secrets.GITHUB_TOKEN}}
  742. script: |
  743. const path = require('path');
  744. const fs = require('fs');
  745. const release_id = '${{ steps.create_release.outputs.id }}';
  746. for (let file of await fs.readdirSync('./release')) {
  747. if (path.extname(file) === '.zip' || file.endsWith('.tar.gz')) {
  748. console.log('uploadReleaseAsset', file);
  749. await github.repos.uploadReleaseAsset({
  750. owner: context.repo.owner,
  751. repo: context.repo.repo,
  752. release_id: release_id,
  753. name: file,
  754. data: await fs.readFileSync(`./release/${file}`)
  755. });
  756. }
  757. }