release.yml 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. name: Release
  2. on:
  3. workflow_dispatch: # allows manual triggering
  4. inputs:
  5. create_release:
  6. description: 'Create new release'
  7. required: true
  8. type: boolean
  9. push:
  10. branches:
  11. - master
  12. paths: ['.github/workflows/release.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp']
  13. concurrency:
  14. group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
  15. cancel-in-progress: true
  16. env:
  17. BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
  18. CMAKE_ARGS: "-DLLAMA_BUILD_EXAMPLES=OFF -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=ON -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON"
  19. jobs:
  20. macOS-arm64:
  21. runs-on: macos-14
  22. steps:
  23. - name: Clone
  24. id: checkout
  25. uses: actions/checkout@v4
  26. with:
  27. fetch-depth: 0
  28. - name: ccache
  29. uses: ggml-org/ccache-action@v1.2.16
  30. with:
  31. key: macOS-latest-cmake-arm64
  32. evict-old-files: 1d
  33. - name: Build
  34. id: cmake_build
  35. run: |
  36. sysctl -a
  37. cmake -B build \
  38. -DCMAKE_INSTALL_RPATH='@loader_path' \
  39. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  40. -DLLAMA_FATAL_WARNINGS=ON \
  41. -DLLAMA_CURL=OFF \
  42. -DLLAMA_BUILD_BORINGSSL=ON \
  43. -DGGML_METAL_USE_BF16=ON \
  44. -DGGML_METAL_EMBED_LIBRARY=ON \
  45. -DGGML_RPC=ON \
  46. ${{ env.CMAKE_ARGS }}
  47. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  48. - name: Determine tag name
  49. id: tag
  50. uses: ./.github/actions/get-tag-name
  51. - name: Pack artifacts
  52. id: pack_artifacts
  53. run: |
  54. cp LICENSE ./build/bin/
  55. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz -s ",./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  56. - name: Upload artifacts
  57. uses: actions/upload-artifact@v4
  58. with:
  59. path: llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz
  60. name: llama-bin-macos-arm64.tar.gz
  61. macOS-x64:
  62. runs-on: macos-15-intel
  63. steps:
  64. - name: Clone
  65. id: checkout
  66. uses: actions/checkout@v4
  67. with:
  68. fetch-depth: 0
  69. - name: ccache
  70. uses: ggml-org/ccache-action@v1.2.16
  71. with:
  72. key: macOS-latest-cmake-x64
  73. evict-old-files: 1d
  74. - name: Build
  75. id: cmake_build
  76. run: |
  77. sysctl -a
  78. # Metal is disabled due to intermittent failures with Github runners not having a GPU:
  79. # https://github.com/ggml-org/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
  80. cmake -B build \
  81. -DCMAKE_INSTALL_RPATH='@loader_path' \
  82. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  83. -DLLAMA_FATAL_WARNINGS=ON \
  84. -DLLAMA_CURL=OFF \
  85. -DLLAMA_BUILD_BORINGSSL=ON \
  86. -DGGML_METAL=OFF \
  87. -DGGML_RPC=ON \
  88. -DCMAKE_OSX_DEPLOYMENT_TARGET=13.3
  89. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  90. - name: Determine tag name
  91. id: tag
  92. uses: ./.github/actions/get-tag-name
  93. - name: Pack artifacts
  94. id: pack_artifacts
  95. run: |
  96. cp LICENSE ./build/bin/
  97. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz -s ",./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  98. - name: Upload artifacts
  99. uses: actions/upload-artifact@v4
  100. with:
  101. path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz
  102. name: llama-bin-macos-x64.tar.gz
  103. ubuntu-22-cpu:
  104. strategy:
  105. matrix:
  106. include:
  107. - build: 'x64'
  108. os: ubuntu-22.04
  109. - build: 's390x'
  110. os: ubuntu-24.04-s390x
  111. # GGML_BACKEND_DL and GGML_CPU_ALL_VARIANTS are not currently supported on arm
  112. # - build: 'arm64'
  113. # os: ubuntu-22.04-arm
  114. runs-on: ${{ matrix.os }}
  115. steps:
  116. - name: Clone
  117. id: checkout
  118. uses: actions/checkout@v4
  119. with:
  120. fetch-depth: 0
  121. - name: ccache
  122. uses: ggml-org/ccache-action@v1.2.16
  123. with:
  124. key: ubuntu-cpu-cmake-${{ matrix.build }}
  125. evict-old-files: 1d
  126. - name: Dependencies
  127. id: depends
  128. run: |
  129. sudo apt-get update
  130. sudo apt-get install build-essential libssl-dev
  131. - name: Build
  132. id: cmake_build
  133. run: |
  134. cmake -B build \
  135. -DCMAKE_INSTALL_RPATH='$ORIGIN' \
  136. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  137. -DGGML_BACKEND_DL=ON \
  138. -DGGML_NATIVE=OFF \
  139. -DGGML_CPU_ALL_VARIANTS=ON \
  140. -DLLAMA_FATAL_WARNINGS=ON \
  141. -DLLAMA_CURL=OFF \
  142. -DLLAMA_OPENSSL=ON \
  143. ${{ env.CMAKE_ARGS }}
  144. cmake --build build --config Release -j $(nproc)
  145. - name: Determine tag name
  146. id: tag
  147. uses: ./.github/actions/get-tag-name
  148. - name: Pack artifacts
  149. id: pack_artifacts
  150. run: |
  151. cp LICENSE ./build/bin/
  152. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  153. - name: Upload artifacts
  154. uses: actions/upload-artifact@v4
  155. with:
  156. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.tar.gz
  157. name: llama-bin-ubuntu-${{ matrix.build }}.tar.gz
  158. ubuntu-22-vulkan:
  159. runs-on: ubuntu-22.04
  160. steps:
  161. - name: Clone
  162. id: checkout
  163. uses: actions/checkout@v4
  164. with:
  165. fetch-depth: 0
  166. - name: ccache
  167. uses: ggml-org/ccache-action@v1.2.16
  168. with:
  169. key: ubuntu-22-cmake-vulkan
  170. evict-old-files: 1d
  171. - name: Dependencies
  172. id: depends
  173. run: |
  174. wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
  175. sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
  176. sudo apt-get update -y
  177. sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libssl-dev
  178. - name: Build
  179. id: cmake_build
  180. run: |
  181. cmake -B build \
  182. -DCMAKE_INSTALL_RPATH='$ORIGIN' \
  183. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  184. -DLLAMA_CURL=OFF \
  185. -DLLAMA_OPENSSL=ON \
  186. -DGGML_BACKEND_DL=ON \
  187. -DGGML_NATIVE=OFF \
  188. -DGGML_CPU_ALL_VARIANTS=ON \
  189. -DGGML_VULKAN=ON \
  190. ${{ env.CMAKE_ARGS }}
  191. cmake --build build --config Release -j $(nproc)
  192. - name: Determine tag name
  193. id: tag
  194. uses: ./.github/actions/get-tag-name
  195. - name: Pack artifacts
  196. id: pack_artifacts
  197. run: |
  198. cp LICENSE ./build/bin/
  199. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  200. - name: Upload artifacts
  201. uses: actions/upload-artifact@v4
  202. with:
  203. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz
  204. name: llama-bin-ubuntu-vulkan-x64.tar.gz
  205. windows-cpu:
  206. runs-on: windows-2025
  207. strategy:
  208. matrix:
  209. include:
  210. - arch: 'x64'
  211. - arch: 'arm64'
  212. steps:
  213. - name: Clone
  214. uses: actions/checkout@v4
  215. with:
  216. fetch-depth: 0
  217. - name: ccache
  218. uses: ggml-org/ccache-action@v1.2.16
  219. with:
  220. key: windows-latest-cmake-cpu-${{ matrix.arch }}
  221. variant: ccache
  222. evict-old-files: 1d
  223. - name: Install Ninja
  224. run: |
  225. choco install ninja
  226. - name: Build
  227. shell: cmd
  228. run: |
  229. call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ${{ matrix.arch == 'x64' && 'x64' || 'amd64_arm64' }}
  230. cmake -S . -B build -G "Ninja Multi-Config" ^
  231. -D CMAKE_TOOLCHAIN_FILE=cmake/${{ matrix.arch }}-windows-llvm.cmake ^
  232. -DLLAMA_CURL=OFF ^
  233. -DLLAMA_BUILD_BORINGSSL=ON ^
  234. -DGGML_NATIVE=OFF ^
  235. -DGGML_BACKEND_DL=ON ^
  236. -DGGML_CPU_ALL_VARIANTS=${{ matrix.arch == 'x64' && 'ON' || 'OFF' }} ^
  237. -DGGML_OPENMP=ON ^
  238. ${{ env.CMAKE_ARGS }}
  239. cmake --build build --config Release
  240. - name: Pack artifacts
  241. id: pack_artifacts
  242. run: |
  243. Copy-Item "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Redist\MSVC\14.44.35112\debug_nonredist\${{ matrix.arch }}\Microsoft.VC143.OpenMP.LLVM\libomp140.${{ matrix.arch == 'x64' && 'x86_64' || 'aarch64' }}.dll" .\build\bin\Release\
  244. 7z a -snl llama-bin-win-cpu-${{ matrix.arch }}.zip .\build\bin\Release\*
  245. - name: Upload artifacts
  246. uses: actions/upload-artifact@v4
  247. with:
  248. path: llama-bin-win-cpu-${{ matrix.arch }}.zip
  249. name: llama-bin-win-cpu-${{ matrix.arch }}.zip
  250. windows:
  251. runs-on: windows-2025
  252. env:
  253. OPENBLAS_VERSION: 0.3.23
  254. VULKAN_VERSION: 1.4.313.2
  255. strategy:
  256. matrix:
  257. include:
  258. - backend: 'vulkan'
  259. arch: 'x64'
  260. defines: '-DGGML_VULKAN=ON'
  261. target: 'ggml-vulkan'
  262. - backend: 'opencl-adreno'
  263. arch: 'arm64'
  264. defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
  265. target: 'ggml-opencl'
  266. steps:
  267. - name: Clone
  268. id: checkout
  269. uses: actions/checkout@v4
  270. - name: ccache
  271. uses: ggml-org/ccache-action@v1.2.16
  272. with:
  273. key: windows-latest-cmake-${{ matrix.backend }}-${{ matrix.arch }}
  274. variant: ccache
  275. evict-old-files: 1d
  276. - name: Install Vulkan SDK
  277. id: get_vulkan
  278. if: ${{ matrix.backend == 'vulkan' }}
  279. run: |
  280. curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/vulkansdk-windows-X64-${env:VULKAN_VERSION}.exe"
  281. & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install
  282. Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}"
  283. Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin"
  284. - name: Install Ninja
  285. id: install_ninja
  286. run: |
  287. choco install ninja
  288. - name: Install OpenCL Headers and Libs
  289. id: install_opencl
  290. if: ${{ matrix.backend == 'opencl-adreno' && matrix.arch == 'arm64' }}
  291. run: |
  292. git clone https://github.com/KhronosGroup/OpenCL-Headers
  293. cd OpenCL-Headers
  294. cmake -B build `
  295. -DBUILD_TESTING=OFF `
  296. -DOPENCL_HEADERS_BUILD_TESTING=OFF `
  297. -DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF `
  298. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  299. cmake --build build --target install
  300. git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader
  301. cd OpenCL-ICD-Loader
  302. cmake -B build-arm64-release `
  303. -A arm64 `
  304. -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" `
  305. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  306. cmake --build build-arm64-release --target install --config release
  307. - name: Build
  308. id: cmake_build
  309. run: |
  310. cmake -S . -B build ${{ matrix.defines }} -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_BACKEND_DL=ON -DLLAMA_CURL=OFF
  311. cmake --build build --config Release --target ${{ matrix.target }}
  312. - name: Pack artifacts
  313. id: pack_artifacts
  314. run: |
  315. 7z a -snl llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip .\build\bin\Release\${{ matrix.target }}.dll
  316. - name: Upload artifacts
  317. uses: actions/upload-artifact@v4
  318. with:
  319. path: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  320. name: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  321. windows-cuda:
  322. runs-on: windows-2022
  323. strategy:
  324. matrix:
  325. cuda: ['12.4', '13.1']
  326. steps:
  327. - name: Clone
  328. id: checkout
  329. uses: actions/checkout@v4
  330. - name: Install ccache
  331. uses: ggml-org/ccache-action@v1.2.16
  332. with:
  333. key: windows-cuda-${{ matrix.cuda }}
  334. variant: ccache
  335. evict-old-files: 1d
  336. - name: Install Cuda Toolkit
  337. uses: ./.github/actions/windows-setup-cuda
  338. with:
  339. cuda_version: ${{ matrix.cuda }}
  340. - name: Install Ninja
  341. id: install_ninja
  342. run: |
  343. choco install ninja
  344. - name: Build
  345. id: cmake_build
  346. shell: cmd
  347. # TODO: Remove GGML_CUDA_CUB_3DOT2 flag once CCCL 3.2 is bundled within CTK and that CTK version is used in this project
  348. run: |
  349. call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
  350. cmake -S . -B build -G "Ninja Multi-Config" ^
  351. -DGGML_BACKEND_DL=ON ^
  352. -DGGML_NATIVE=OFF ^
  353. -DGGML_CPU=OFF ^
  354. -DGGML_CUDA=ON ^
  355. -DLLAMA_CURL=OFF ^
  356. -DGGML_CUDA_CUB_3DOT2=ON
  357. set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1
  358. cmake --build build --config Release -j %NINJA_JOBS% --target ggml-cuda
  359. - name: Pack artifacts
  360. id: pack_artifacts
  361. run: |
  362. 7z a -snl llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip .\build\bin\Release\ggml-cuda.dll
  363. - name: Upload artifacts
  364. uses: actions/upload-artifact@v4
  365. with:
  366. path: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  367. name: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  368. - name: Copy and pack Cuda runtime
  369. run: |
  370. echo "Cuda install location: ${{ env.CUDA_PATH }}"
  371. $dst='.\build\bin\cudart\'
  372. robocopy "${{env.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  373. robocopy "${{env.CUDA_PATH}}\lib" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  374. robocopy "${{env.CUDA_PATH}}\bin\x64" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  375. 7z a cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip $dst\*
  376. - name: Upload Cuda runtime
  377. uses: actions/upload-artifact@v4
  378. with:
  379. path: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  380. name: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  381. windows-sycl:
  382. runs-on: windows-2022
  383. defaults:
  384. run:
  385. shell: bash
  386. env:
  387. WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/24751ead-ddc5-4479-b9e6-f9fe2ff8b9f2/intel-deep-learning-essentials-2025.2.1.25_offline.exe
  388. WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
  389. ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
  390. steps:
  391. - name: Clone
  392. id: checkout
  393. uses: actions/checkout@v4
  394. - name: ccache
  395. uses: ggml-org/ccache-action@v1.2.16
  396. with:
  397. key: windows-latest-cmake-sycl
  398. variant: ccache
  399. evict-old-files: 1d
  400. - name: Install
  401. run: |
  402. scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL
  403. - name: Build
  404. id: cmake_build
  405. shell: cmd
  406. run: |
  407. call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
  408. cmake -G "Ninja" -B build ^
  409. -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx ^
  410. -DCMAKE_BUILD_TYPE=Release ^
  411. -DGGML_BACKEND_DL=ON -DBUILD_SHARED_LIBS=ON ^
  412. -DGGML_CPU=OFF -DGGML_SYCL=ON ^
  413. -DLLAMA_CURL=OFF
  414. cmake --build build --target ggml-sycl -j
  415. - name: Build the release package
  416. id: pack_artifacts
  417. run: |
  418. echo "cp oneAPI running time dll files in ${{ env.ONEAPI_ROOT }} to ./build/bin"
  419. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.5.dll" ./build/bin
  420. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_core.2.dll" ./build/bin
  421. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin
  422. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero.dll" ./build/bin
  423. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero_v2.dll" ./build/bin
  424. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_opencl.dll" ./build/bin
  425. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_loader.dll" ./build/bin
  426. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_win_proxy_loader.dll" ./build/bin
  427. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl8.dll" ./build/bin
  428. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin
  429. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin
  430. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin
  431. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl-ls.exe" ./build/bin
  432. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libsycl-fallback-bfloat16.spv" ./build/bin
  433. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libsycl-native-bfloat16.spv" ./build/bin
  434. cp "${{ env.ONEAPI_ROOT }}/dnnl/latest/bin/dnnl.dll" ./build/bin
  435. cp "${{ env.ONEAPI_ROOT }}/tbb/latest/bin/tbb12.dll" ./build/bin
  436. cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/tcm.dll" ./build/bin
  437. cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/libhwloc-15.dll" ./build/bin
  438. cp "${{ env.ONEAPI_ROOT }}/umf/latest/bin/umf.dll" ./build/bin
  439. echo "cp oneAPI running time dll files to ./build/bin done"
  440. 7z a -snl llama-bin-win-sycl-x64.zip ./build/bin/*
  441. - name: Upload the release package
  442. uses: actions/upload-artifact@v4
  443. with:
  444. path: llama-bin-win-sycl-x64.zip
  445. name: llama-bin-win-sycl-x64.zip
  446. windows-hip:
  447. runs-on: windows-2022
  448. env:
  449. HIPSDK_INSTALLER_VERSION: "25.Q3"
  450. strategy:
  451. matrix:
  452. include:
  453. - name: "radeon"
  454. gpu_targets: "gfx1151;gfx1200;gfx1201;gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032"
  455. steps:
  456. - name: Clone
  457. id: checkout
  458. uses: actions/checkout@v4
  459. - name: Grab rocWMMA package
  460. id: grab_rocwmma
  461. run: |
  462. curl -o rocwmma.deb "https://repo.radeon.com/rocm/apt/7.0.1/pool/main/r/rocwmma-dev/rocwmma-dev_2.0.0.70001-42~24.04_amd64.deb"
  463. 7z x rocwmma.deb
  464. 7z x data.tar
  465. - name: Cache ROCm Installation
  466. id: cache-rocm
  467. uses: actions/cache@v4
  468. with:
  469. path: C:\Program Files\AMD\ROCm
  470. key: rocm-${{ env.HIPSDK_INSTALLER_VERSION }}-${{ runner.os }}
  471. - name: ccache
  472. uses: ggml-org/ccache-action@v1.2.16
  473. with:
  474. key: windows-latest-cmake-hip-${{ env.HIPSDK_INSTALLER_VERSION }}-${{ matrix.name }}-x64
  475. evict-old-files: 1d
  476. - name: Install ROCm
  477. if: steps.cache-rocm.outputs.cache-hit != 'true'
  478. id: depends
  479. run: |
  480. $ErrorActionPreference = "Stop"
  481. write-host "Downloading AMD HIP SDK Installer"
  482. Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-${{ env.HIPSDK_INSTALLER_VERSION }}-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
  483. write-host "Installing AMD HIP SDK"
  484. $proc = Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -PassThru
  485. $completed = $proc.WaitForExit(600000)
  486. if (-not $completed) {
  487. Write-Error "ROCm installation timed out after 10 minutes. Killing the process"
  488. $proc.Kill()
  489. exit 1
  490. }
  491. if ($proc.ExitCode -ne 0) {
  492. Write-Error "ROCm installation failed with exit code $($proc.ExitCode)"
  493. exit 1
  494. }
  495. write-host "Completed AMD HIP SDK installation"
  496. - name: Verify ROCm
  497. id: verify
  498. run: |
  499. # Find and test ROCm installation
  500. $clangPath = Get-ChildItem 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | Select-Object -First 1
  501. if (-not $clangPath) {
  502. Write-Error "ROCm installation not found"
  503. exit 1
  504. }
  505. & $clangPath.FullName --version
  506. - name: Build
  507. id: cmake_build
  508. run: |
  509. $env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
  510. $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
  511. cmake -G "Unix Makefiles" -B build -S . `
  512. -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" `
  513. -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" `
  514. -DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/opt/rocm-7.0.1/include/ -Wno-ignored-attributes -Wno-nested-anon-types" `
  515. -DCMAKE_BUILD_TYPE=Release `
  516. -DGGML_BACKEND_DL=ON `
  517. -DGGML_NATIVE=OFF `
  518. -DGGML_CPU=OFF `
  519. -DAMDGPU_TARGETS="${{ matrix.gpu_targets }}" `
  520. -DGGML_HIP_ROCWMMA_FATTN=ON `
  521. -DGGML_HIP=ON `
  522. -DLLAMA_CURL=OFF
  523. cmake --build build --target ggml-hip -j ${env:NUMBER_OF_PROCESSORS}
  524. md "build\bin\rocblas\library\"
  525. md "build\bin\hipblaslt\library"
  526. cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\"
  527. cp "${env:HIP_PATH}\bin\hipblaslt.dll" "build\bin\"
  528. cp "${env:HIP_PATH}\bin\rocblas.dll" "build\bin\"
  529. cp "${env:HIP_PATH}\bin\rocblas\library\*" "build\bin\rocblas\library\"
  530. cp "${env:HIP_PATH}\bin\hipblaslt\library\*" "build\bin\hipblaslt\library\"
  531. - name: Pack artifacts
  532. id: pack_artifacts
  533. run: |
  534. 7z a -snl llama-bin-win-hip-${{ matrix.name }}-x64.zip .\build\bin\*
  535. - name: Upload artifacts
  536. uses: actions/upload-artifact@v4
  537. with:
  538. path: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  539. name: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  540. ios-xcode-build:
  541. runs-on: macos-15
  542. steps:
  543. - name: Checkout code
  544. uses: actions/checkout@v4
  545. with:
  546. fetch-depth: 0
  547. - name: Setup Xcode
  548. run: |
  549. sudo xcode-select -s /Applications/Xcode_16.4.app
  550. - name: Build
  551. id: cmake_build
  552. run: |
  553. sysctl -a
  554. cmake -B build -G Xcode \
  555. -DGGML_METAL_USE_BF16=ON \
  556. -DGGML_METAL_EMBED_LIBRARY=ON \
  557. -DLLAMA_CURL=OFF \
  558. -DLLAMA_BUILD_EXAMPLES=OFF \
  559. -DLLAMA_BUILD_TOOLS=OFF \
  560. -DLLAMA_BUILD_TESTS=OFF \
  561. -DLLAMA_BUILD_SERVER=OFF \
  562. -DCMAKE_SYSTEM_NAME=iOS \
  563. -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
  564. -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
  565. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
  566. - name: xcodebuild for swift package
  567. id: xcodebuild
  568. run: |
  569. ./build-xcframework.sh
  570. - name: Build Xcode project
  571. run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build
  572. - name: Determine tag name
  573. id: tag
  574. uses: ./.github/actions/get-tag-name
  575. - name: Pack artifacts
  576. id: pack_artifacts
  577. run: |
  578. # Zip file is required for Swift Package Manager, which does not support tar.gz for binary targets.
  579. # For more details, see https://developer.apple.com/documentation/xcode/distributing-binary-frameworks-as-swift-packages
  580. zip -r -y llama-${{ steps.tag.outputs.name }}-xcframework.zip build-apple/llama.xcframework
  581. - name: Upload artifacts
  582. uses: actions/upload-artifact@v4
  583. with:
  584. path: llama-${{ steps.tag.outputs.name }}-xcframework.zip
  585. name: llama-${{ steps.tag.outputs.name }}-xcframework.zip
  586. openEuler-cann:
  587. strategy:
  588. matrix:
  589. arch: [x86, aarch64]
  590. chip_type: ['910b', '310p']
  591. build: ['Release']
  592. runs-on: ${{ matrix.arch == 'aarch64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }}
  593. steps:
  594. - name: Checkout
  595. uses: actions/checkout@v4
  596. with:
  597. fetch-depth: 0
  598. - name: Free up disk space
  599. uses: ggml-org/free-disk-space@v1.3.1
  600. with:
  601. tool-cache: true
  602. - name: Set container image
  603. id: cann-image
  604. run: |
  605. image="ascendai/cann:${{ matrix.chip_type == '910b' && '8.3.rc2-910b-openeuler24.03-py3.11' || '8.3.rc2-310p-openeuler24.03-py3.11' }}"
  606. echo "image=${image}" >> "${GITHUB_OUTPUT}"
  607. - name: Pull container image
  608. run: docker pull "${{ steps.cann-image.outputs.image }}"
  609. - name: Build
  610. env:
  611. BUILD_TYPE: ${{ matrix.build }}
  612. SOC_TYPE: ascend${{ matrix.chip_type }}
  613. run: |
  614. HOST_UID=$(id -u)
  615. HOST_GID=$(id -g)
  616. docker run --rm \
  617. -v "${PWD}:/workspace" \
  618. -w /workspace \
  619. -e SOC_TYPE=${SOC_TYPE} \
  620. -e BUILD_TYPE=${BUILD_TYPE} \
  621. "${{ steps.cann-image.outputs.image }}" \
  622. bash -lc '
  623. set -e
  624. yum install -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs git gcc gcc-c++ make cmake openssl-devel
  625. yum clean all && rm -rf /var/cache/yum
  626. git config --global --add safe.directory "/workspace"
  627. export LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/$(uname -m)-linux/devlib/:${LD_LIBRARY_PATH}
  628. cmake -S . -B build \
  629. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
  630. -DLLAMA_CURL=OFF \
  631. -DLLAMA_OPENSSL=ON \
  632. -DGGML_CANN=on \
  633. -DSOC_TYPE=${SOC_TYPE}
  634. cmake --build build -j $(nproc)
  635. chown -R '"${HOST_UID}"':'"${HOST_GID}"' /workspace/build
  636. '
  637. - name: Determine tag name
  638. id: tag
  639. uses: ./.github/actions/get-tag-name
  640. - name: Pack artifacts
  641. run: |
  642. cp LICENSE ./build/bin/
  643. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-${{ matrix.chip_type }}-openEuler-${{ matrix.arch }}.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  644. - name: Upload artifacts
  645. uses: actions/upload-artifact@v4
  646. with:
  647. path: llama-${{ steps.tag.outputs.name }}-bin-${{ matrix.chip_type }}-openEuler-${{ matrix.arch }}.tar.gz
  648. name: llama-bin-${{ matrix.chip_type }}-openEuler-${{ matrix.arch }}.tar.gz
  649. release:
  650. if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
  651. # Fine-grant permission
  652. # https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
  653. permissions:
  654. contents: write # for creating release
  655. runs-on: ubuntu-latest
  656. needs:
  657. - windows
  658. - windows-cpu
  659. - windows-cuda
  660. - windows-sycl
  661. - windows-hip
  662. - ubuntu-22-cpu
  663. - ubuntu-22-vulkan
  664. - macOS-arm64
  665. - macOS-x64
  666. - ios-xcode-build
  667. - openEuler-cann
  668. steps:
  669. - name: Clone
  670. id: checkout
  671. uses: actions/checkout@v4
  672. with:
  673. fetch-depth: 0
  674. - name: Determine tag name
  675. id: tag
  676. uses: ./.github/actions/get-tag-name
  677. - name: Download artifacts
  678. id: download-artifact
  679. uses: actions/download-artifact@v4
  680. with:
  681. path: ./artifact
  682. merge-multiple: true
  683. - name: Move artifacts
  684. id: move_artifacts
  685. run: |
  686. mkdir -p release
  687. echo "Adding CPU backend files to existing zips..."
  688. for arch in x64 arm64; do
  689. cpu_zip="artifact/llama-bin-win-cpu-${arch}.zip"
  690. temp_dir=$(mktemp -d)
  691. echo "Extracting CPU backend for $arch..."
  692. unzip "$cpu_zip" -d "$temp_dir"
  693. echo "Adding CPU files to $arch zips..."
  694. for target_zip in artifact/llama-bin-win-*-${arch}.zip; do
  695. if [[ "$target_zip" == "$cpu_zip" ]]; then
  696. continue
  697. fi
  698. echo "Adding CPU backend to $(basename "$target_zip")"
  699. realpath_target_zip=$(realpath "$target_zip")
  700. (cd "$temp_dir" && zip -r "$realpath_target_zip" .)
  701. done
  702. rm -rf "$temp_dir"
  703. done
  704. echo "Renaming and moving zips to release..."
  705. for zip_file in artifact/llama-bin-win-*.zip; do
  706. base_name=$(basename "$zip_file" .zip)
  707. zip_name="llama-${{ steps.tag.outputs.name }}-${base_name#llama-}.zip"
  708. echo "Moving $zip_file to release/$zip_name"
  709. mv "$zip_file" "release/$zip_name"
  710. done
  711. echo "Moving other artifacts..."
  712. mv -v artifact/*.zip release
  713. mv -v artifact/*.tar.gz release
  714. - name: Create release
  715. id: create_release
  716. uses: ggml-org/action-create-release@v1
  717. env:
  718. GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
  719. with:
  720. tag_name: ${{ steps.tag.outputs.name }}
  721. body: |
  722. <details open>
  723. ${{ github.event.head_commit.message }}
  724. </details>
  725. **macOS/iOS:**
  726. - [macOS Apple Silicon (arm64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz)
  727. - [macOS Intel (x64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz)
  728. - [iOS XCFramework](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-xcframework.zip)
  729. **Linux:**
  730. - [Ubuntu x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.tar.gz)
  731. - [Ubuntu x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz)
  732. - [Ubuntu s390x (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-s390x.tar.gz)
  733. **Windows:**
  734. - [Windows x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-x64.zip)
  735. - [Windows arm64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-arm64.zip)
  736. - [Windows x64 (CUDA 12)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-12.4-x64.zip) - [CUDA 12.4 DLLs](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/cudart-llama-bin-win-cuda-12.4-x64.zip)
  737. - [Windows x64 (CUDA 13)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-13.1-x64.zip) - [CUDA 13.1 DLLs](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/cudart-llama-bin-win-cuda-13.1-x64.zip)
  738. - [Windows x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-vulkan-x64.zip)
  739. - [Windows x64 (SYCL)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip)
  740. - [Windows x64 (HIP)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-hip-radeon-x64.zip)
  741. **openEuler:**
  742. - [openEuler x86 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-x86.tar.gz)
  743. - [openEuler x86 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-x86.tar.gz)
  744. - [openEuler aarch64 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-aarch64.tar.gz)
  745. - [openEuler aarch64 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-aarch64.tar.gz)
  746. - name: Upload release
  747. id: upload_release
  748. uses: actions/github-script@v3
  749. with:
  750. github-token: ${{secrets.GITHUB_TOKEN}}
  751. script: |
  752. const path = require('path');
  753. const fs = require('fs');
  754. const release_id = '${{ steps.create_release.outputs.id }}';
  755. for (let file of await fs.readdirSync('./release')) {
  756. if (path.extname(file) === '.zip' || file.endsWith('.tar.gz')) {
  757. console.log('uploadReleaseAsset', file);
  758. await github.repos.uploadReleaseAsset({
  759. owner: context.repo.owner,
  760. repo: context.repo.repo,
  761. release_id: release_id,
  762. name: file,
  763. data: await fs.readFileSync(`./release/${file}`)
  764. });
  765. }
  766. }