release.yml 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908
  1. name: Release
  2. on:
  3. workflow_dispatch: # allows manual triggering
  4. inputs:
  5. create_release:
  6. description: 'Create new release'
  7. required: true
  8. type: boolean
  9. push:
  10. branches:
  11. - master
  12. paths: ['.github/workflows/release.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp']
  13. concurrency:
  14. group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
  15. cancel-in-progress: true
  16. env:
  17. BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
  18. CMAKE_ARGS: "-DLLAMA_BUILD_EXAMPLES=OFF -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=ON -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON"
  19. jobs:
  20. macOS-arm64:
  21. runs-on: macos-14
  22. steps:
  23. - name: Clone
  24. id: checkout
  25. uses: actions/checkout@v4
  26. with:
  27. fetch-depth: 0
  28. - name: ccache
  29. uses: ggml-org/ccache-action@v1.2.16
  30. with:
  31. key: macOS-latest-cmake-arm64
  32. evict-old-files: 1d
  33. - name: Dependencies
  34. id: depends
  35. continue-on-error: true
  36. run: |
  37. brew update
  38. brew install curl
  39. - name: Build
  40. id: cmake_build
  41. run: |
  42. sysctl -a
  43. cmake -B build \
  44. -DCMAKE_INSTALL_RPATH='@loader_path' \
  45. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  46. -DLLAMA_FATAL_WARNINGS=ON \
  47. -DGGML_METAL_USE_BF16=ON \
  48. -DGGML_METAL_EMBED_LIBRARY=ON \
  49. -DGGML_RPC=ON \
  50. ${{ env.CMAKE_ARGS }}
  51. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  52. - name: Determine tag name
  53. id: tag
  54. uses: ./.github/actions/get-tag-name
  55. - name: Pack artifacts
  56. id: pack_artifacts
  57. run: |
  58. cp LICENSE ./build/bin/
  59. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz -s ",./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  60. - name: Upload artifacts
  61. uses: actions/upload-artifact@v4
  62. with:
  63. path: llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz
  64. name: llama-bin-macos-arm64.tar.gz
  65. macOS-x64:
  66. runs-on: macos-15-intel
  67. steps:
  68. - name: Clone
  69. id: checkout
  70. uses: actions/checkout@v4
  71. with:
  72. fetch-depth: 0
  73. - name: ccache
  74. uses: ggml-org/ccache-action@v1.2.16
  75. with:
  76. key: macOS-latest-cmake-x64
  77. evict-old-files: 1d
  78. - name: Dependencies
  79. id: depends
  80. continue-on-error: true
  81. run: |
  82. brew update
  83. brew install curl
  84. - name: Build
  85. id: cmake_build
  86. run: |
  87. sysctl -a
  88. # Metal is disabled due to intermittent failures with Github runners not having a GPU:
  89. # https://github.com/ggml-org/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
  90. cmake -B build \
  91. -DCMAKE_INSTALL_RPATH='@loader_path' \
  92. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  93. -DLLAMA_FATAL_WARNINGS=ON \
  94. -DGGML_METAL=OFF \
  95. -DGGML_RPC=ON \
  96. -DCMAKE_OSX_DEPLOYMENT_TARGET=13.3
  97. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  98. - name: Determine tag name
  99. id: tag
  100. uses: ./.github/actions/get-tag-name
  101. - name: Pack artifacts
  102. id: pack_artifacts
  103. run: |
  104. cp LICENSE ./build/bin/
  105. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz -s ",./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  106. - name: Upload artifacts
  107. uses: actions/upload-artifact@v4
  108. with:
  109. path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz
  110. name: llama-bin-macos-x64.tar.gz
  111. ubuntu-22-cpu:
  112. strategy:
  113. matrix:
  114. include:
  115. - build: 'x64'
  116. os: ubuntu-22.04
  117. - build: 's390x'
  118. os: ubuntu-24.04-s390x
  119. # GGML_BACKEND_DL and GGML_CPU_ALL_VARIANTS are not currently supported on arm
  120. # - build: 'arm64'
  121. # os: ubuntu-22.04-arm
  122. runs-on: ${{ matrix.os }}
  123. steps:
  124. - name: Clone
  125. id: checkout
  126. uses: actions/checkout@v4
  127. with:
  128. fetch-depth: 0
  129. - name: ccache
  130. uses: ggml-org/ccache-action@v1.2.16
  131. with:
  132. key: ubuntu-cpu-cmake-${{ matrix.build }}
  133. evict-old-files: 1d
  134. - name: Dependencies
  135. id: depends
  136. run: |
  137. sudo apt-get update
  138. sudo apt-get install build-essential libcurl4-openssl-dev
  139. - name: Build
  140. id: cmake_build
  141. run: |
  142. cmake -B build \
  143. -DCMAKE_INSTALL_RPATH='$ORIGIN' \
  144. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  145. -DGGML_BACKEND_DL=ON \
  146. -DGGML_NATIVE=OFF \
  147. -DGGML_CPU_ALL_VARIANTS=ON \
  148. -DLLAMA_FATAL_WARNINGS=ON \
  149. ${{ env.CMAKE_ARGS }}
  150. cmake --build build --config Release -j $(nproc)
  151. - name: Determine tag name
  152. id: tag
  153. uses: ./.github/actions/get-tag-name
  154. - name: Pack artifacts
  155. id: pack_artifacts
  156. run: |
  157. cp LICENSE ./build/bin/
  158. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  159. - name: Upload artifacts
  160. uses: actions/upload-artifact@v4
  161. with:
  162. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.tar.gz
  163. name: llama-bin-ubuntu-${{ matrix.build }}.tar.gz
  164. ubuntu-22-vulkan:
  165. runs-on: ubuntu-22.04
  166. steps:
  167. - name: Clone
  168. id: checkout
  169. uses: actions/checkout@v4
  170. with:
  171. fetch-depth: 0
  172. - name: ccache
  173. uses: ggml-org/ccache-action@v1.2.16
  174. with:
  175. key: ubuntu-22-cmake-vulkan
  176. evict-old-files: 1d
  177. - name: Dependencies
  178. id: depends
  179. run: |
  180. wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
  181. sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
  182. sudo apt-get update -y
  183. sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libcurl4-openssl-dev
  184. - name: Build
  185. id: cmake_build
  186. run: |
  187. cmake -B build \
  188. -DCMAKE_INSTALL_RPATH='$ORIGIN' \
  189. -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
  190. -DGGML_BACKEND_DL=ON \
  191. -DGGML_NATIVE=OFF \
  192. -DGGML_CPU_ALL_VARIANTS=ON \
  193. -DGGML_VULKAN=ON \
  194. ${{ env.CMAKE_ARGS }}
  195. cmake --build build --config Release -j $(nproc)
  196. - name: Determine tag name
  197. id: tag
  198. uses: ./.github/actions/get-tag-name
  199. - name: Pack artifacts
  200. id: pack_artifacts
  201. run: |
  202. cp LICENSE ./build/bin/
  203. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  204. - name: Upload artifacts
  205. uses: actions/upload-artifact@v4
  206. with:
  207. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz
  208. name: llama-bin-ubuntu-vulkan-x64.tar.gz
  209. windows-cpu:
  210. runs-on: windows-2025
  211. strategy:
  212. matrix:
  213. include:
  214. - arch: 'x64'
  215. - arch: 'arm64'
  216. steps:
  217. - name: Clone
  218. uses: actions/checkout@v4
  219. with:
  220. fetch-depth: 0
  221. - name: ccache
  222. uses: ggml-org/ccache-action@v1.2.16
  223. with:
  224. key: windows-latest-cmake-cpu-${{ matrix.arch }}
  225. variant: ccache
  226. evict-old-files: 1d
  227. - name: Install Ninja
  228. run: |
  229. choco install ninja
  230. - name: libCURL
  231. id: get_libcurl
  232. uses: ./.github/actions/windows-setup-curl
  233. with:
  234. architecture: ${{ matrix.arch == 'x64' && 'win64' || 'win64a' }}
  235. - name: Build
  236. shell: cmd
  237. env:
  238. CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
  239. run: |
  240. call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ${{ matrix.arch == 'x64' && 'x64' || 'amd64_arm64' }}
  241. cmake -S . -B build -G "Ninja Multi-Config" ^
  242. -D CMAKE_TOOLCHAIN_FILE=cmake/${{ matrix.arch }}-windows-llvm.cmake ^
  243. -DGGML_NATIVE=OFF ^
  244. -DGGML_BACKEND_DL=ON ^
  245. -DGGML_CPU_ALL_VARIANTS=${{ matrix.arch == 'x64' && 'ON' || 'OFF' }} ^
  246. -DGGML_OPENMP=ON ^
  247. -DCURL_LIBRARY="%CURL_PATH%/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="%CURL_PATH%/include" ^
  248. ${{ env.CMAKE_ARGS }}
  249. cmake --build build --config Release
  250. - name: Pack artifacts
  251. id: pack_artifacts
  252. env:
  253. CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
  254. run: |
  255. Copy-Item $env:CURL_PATH\bin\libcurl-${{ matrix.arch }}.dll .\build\bin\Release\
  256. Copy-Item "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Redist\MSVC\14.44.35112\debug_nonredist\${{ matrix.arch }}\Microsoft.VC143.OpenMP.LLVM\libomp140.${{ matrix.arch == 'x64' && 'x86_64' || 'aarch64' }}.dll" .\build\bin\Release\
  257. 7z a -snl llama-bin-win-cpu-${{ matrix.arch }}.zip .\build\bin\Release\*
  258. - name: Upload artifacts
  259. uses: actions/upload-artifact@v4
  260. with:
  261. path: llama-bin-win-cpu-${{ matrix.arch }}.zip
  262. name: llama-bin-win-cpu-${{ matrix.arch }}.zip
  263. windows:
  264. runs-on: windows-2025
  265. env:
  266. OPENBLAS_VERSION: 0.3.23
  267. VULKAN_VERSION: 1.4.313.2
  268. strategy:
  269. matrix:
  270. include:
  271. - backend: 'vulkan'
  272. arch: 'x64'
  273. defines: '-DGGML_VULKAN=ON'
  274. target: 'ggml-vulkan'
  275. - backend: 'opencl-adreno'
  276. arch: 'arm64'
  277. defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
  278. target: 'ggml-opencl'
  279. steps:
  280. - name: Clone
  281. id: checkout
  282. uses: actions/checkout@v4
  283. - name: ccache
  284. uses: ggml-org/ccache-action@v1.2.16
  285. with:
  286. key: windows-latest-cmake-${{ matrix.backend }}-${{ matrix.arch }}
  287. variant: ccache
  288. evict-old-files: 1d
  289. - name: Install Vulkan SDK
  290. id: get_vulkan
  291. if: ${{ matrix.backend == 'vulkan' }}
  292. run: |
  293. curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/vulkansdk-windows-X64-${env:VULKAN_VERSION}.exe"
  294. & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install
  295. Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}"
  296. Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin"
  297. - name: Install Ninja
  298. id: install_ninja
  299. run: |
  300. choco install ninja
  301. - name: Install OpenCL Headers and Libs
  302. id: install_opencl
  303. if: ${{ matrix.backend == 'opencl-adreno' && matrix.arch == 'arm64' }}
  304. run: |
  305. git clone https://github.com/KhronosGroup/OpenCL-Headers
  306. cd OpenCL-Headers
  307. cmake -B build `
  308. -DBUILD_TESTING=OFF `
  309. -DOPENCL_HEADERS_BUILD_TESTING=OFF `
  310. -DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF `
  311. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  312. cmake --build build --target install
  313. git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader
  314. cd OpenCL-ICD-Loader
  315. cmake -B build-arm64-release `
  316. -A arm64 `
  317. -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" `
  318. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  319. cmake --build build-arm64-release --target install --config release
  320. - name: Build
  321. id: cmake_build
  322. run: |
  323. cmake -S . -B build ${{ matrix.defines }} -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_BACKEND_DL=ON -DLLAMA_CURL=OFF
  324. cmake --build build --config Release --target ${{ matrix.target }}
  325. - name: Pack artifacts
  326. id: pack_artifacts
  327. run: |
  328. 7z a -snl llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip .\build\bin\Release\${{ matrix.target }}.dll
  329. - name: Upload artifacts
  330. uses: actions/upload-artifact@v4
  331. with:
  332. path: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  333. name: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  334. windows-cuda:
  335. runs-on: windows-2022
  336. strategy:
  337. matrix:
  338. cuda: ['12.4', '13.1']
  339. steps:
  340. - name: Clone
  341. id: checkout
  342. uses: actions/checkout@v4
  343. - name: Install ccache
  344. uses: ggml-org/ccache-action@v1.2.16
  345. with:
  346. key: windows-cuda-${{ matrix.cuda }}
  347. variant: ccache
  348. evict-old-files: 1d
  349. - name: Install Cuda Toolkit
  350. uses: ./.github/actions/windows-setup-cuda
  351. with:
  352. cuda_version: ${{ matrix.cuda }}
  353. - name: Install Ninja
  354. id: install_ninja
  355. run: |
  356. choco install ninja
  357. - name: Build
  358. id: cmake_build
  359. shell: cmd
  360. run: |
  361. call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
  362. cmake -S . -B build -G "Ninja Multi-Config" ^
  363. -DGGML_BACKEND_DL=ON ^
  364. -DGGML_NATIVE=OFF ^
  365. -DGGML_CPU=OFF ^
  366. -DGGML_CUDA=ON ^
  367. -DLLAMA_CURL=OFF
  368. set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1
  369. cmake --build build --config Release -j %NINJA_JOBS% --target ggml-cuda
  370. - name: Pack artifacts
  371. id: pack_artifacts
  372. run: |
  373. 7z a -snl llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip .\build\bin\Release\ggml-cuda.dll
  374. - name: Upload artifacts
  375. uses: actions/upload-artifact@v4
  376. with:
  377. path: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  378. name: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  379. - name: Copy and pack Cuda runtime
  380. run: |
  381. echo "Cuda install location: ${{ env.CUDA_PATH }}"
  382. $dst='.\build\bin\cudart\'
  383. robocopy "${{env.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  384. robocopy "${{env.CUDA_PATH}}\lib" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  385. robocopy "${{env.CUDA_PATH}}\bin\x64" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  386. 7z a cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip $dst\*
  387. - name: Upload Cuda runtime
  388. uses: actions/upload-artifact@v4
  389. with:
  390. path: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  391. name: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  392. windows-sycl:
  393. runs-on: windows-2022
  394. defaults:
  395. run:
  396. shell: bash
  397. env:
  398. WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/24751ead-ddc5-4479-b9e6-f9fe2ff8b9f2/intel-deep-learning-essentials-2025.2.1.25_offline.exe
  399. WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
  400. ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
  401. steps:
  402. - name: Clone
  403. id: checkout
  404. uses: actions/checkout@v4
  405. - name: ccache
  406. uses: ggml-org/ccache-action@v1.2.16
  407. with:
  408. key: windows-latest-cmake-sycl
  409. variant: ccache
  410. evict-old-files: 1d
  411. - name: Install
  412. run: |
  413. scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL
  414. - name: Build
  415. id: cmake_build
  416. shell: cmd
  417. run: |
  418. call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
  419. cmake -G "Ninja" -B build ^
  420. -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx ^
  421. -DCMAKE_BUILD_TYPE=Release ^
  422. -DGGML_BACKEND_DL=ON -DBUILD_SHARED_LIBS=ON ^
  423. -DGGML_CPU=OFF -DGGML_SYCL=ON ^
  424. -DLLAMA_CURL=OFF
  425. cmake --build build --target ggml-sycl -j
  426. - name: Build the release package
  427. id: pack_artifacts
  428. run: |
  429. echo "cp oneAPI running time dll files in ${{ env.ONEAPI_ROOT }} to ./build/bin"
  430. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.5.dll" ./build/bin
  431. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_core.2.dll" ./build/bin
  432. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin
  433. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero.dll" ./build/bin
  434. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero_v2.dll" ./build/bin
  435. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_opencl.dll" ./build/bin
  436. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_loader.dll" ./build/bin
  437. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_win_proxy_loader.dll" ./build/bin
  438. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl8.dll" ./build/bin
  439. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin
  440. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin
  441. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin
  442. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl-ls.exe" ./build/bin
  443. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libsycl-fallback-bfloat16.spv" ./build/bin
  444. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libsycl-native-bfloat16.spv" ./build/bin
  445. cp "${{ env.ONEAPI_ROOT }}/dnnl/latest/bin/dnnl.dll" ./build/bin
  446. cp "${{ env.ONEAPI_ROOT }}/tbb/latest/bin/tbb12.dll" ./build/bin
  447. cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/tcm.dll" ./build/bin
  448. cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/libhwloc-15.dll" ./build/bin
  449. cp "${{ env.ONEAPI_ROOT }}/umf/latest/bin/umf.dll" ./build/bin
  450. echo "cp oneAPI running time dll files to ./build/bin done"
  451. 7z a -snl llama-bin-win-sycl-x64.zip ./build/bin/*
  452. - name: Upload the release package
  453. uses: actions/upload-artifact@v4
  454. with:
  455. path: llama-bin-win-sycl-x64.zip
  456. name: llama-bin-win-sycl-x64.zip
  457. windows-hip:
  458. runs-on: windows-2022
  459. env:
  460. HIPSDK_INSTALLER_VERSION: "25.Q3"
  461. strategy:
  462. matrix:
  463. include:
  464. - name: "radeon"
  465. gpu_targets: "gfx1151;gfx1200;gfx1201;gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032"
  466. steps:
  467. - name: Clone
  468. id: checkout
  469. uses: actions/checkout@v4
  470. - name: Grab rocWMMA package
  471. id: grab_rocwmma
  472. run: |
  473. curl -o rocwmma.deb "https://repo.radeon.com/rocm/apt/7.0.1/pool/main/r/rocwmma-dev/rocwmma-dev_2.0.0.70001-42~24.04_amd64.deb"
  474. 7z x rocwmma.deb
  475. 7z x data.tar
  476. - name: Cache ROCm Installation
  477. id: cache-rocm
  478. uses: actions/cache@v4
  479. with:
  480. path: C:\Program Files\AMD\ROCm
  481. key: rocm-${{ env.HIPSDK_INSTALLER_VERSION }}-${{ runner.os }}
  482. - name: ccache
  483. uses: ggml-org/ccache-action@v1.2.16
  484. with:
  485. key: windows-latest-cmake-hip-${{ env.HIPSDK_INSTALLER_VERSION }}-${{ matrix.name }}-x64
  486. evict-old-files: 1d
  487. - name: Install ROCm
  488. if: steps.cache-rocm.outputs.cache-hit != 'true'
  489. id: depends
  490. run: |
  491. $ErrorActionPreference = "Stop"
  492. write-host "Downloading AMD HIP SDK Installer"
  493. Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-${{ env.HIPSDK_INSTALLER_VERSION }}-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
  494. write-host "Installing AMD HIP SDK"
  495. $proc = Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -PassThru
  496. $completed = $proc.WaitForExit(600000)
  497. if (-not $completed) {
  498. Write-Error "ROCm installation timed out after 10 minutes. Killing the process"
  499. $proc.Kill()
  500. exit 1
  501. }
  502. if ($proc.ExitCode -ne 0) {
  503. Write-Error "ROCm installation failed with exit code $($proc.ExitCode)"
  504. exit 1
  505. }
  506. write-host "Completed AMD HIP SDK installation"
  507. - name: Verify ROCm
  508. id: verify
  509. run: |
  510. # Find and test ROCm installation
  511. $clangPath = Get-ChildItem 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | Select-Object -First 1
  512. if (-not $clangPath) {
  513. Write-Error "ROCm installation not found"
  514. exit 1
  515. }
  516. & $clangPath.FullName --version
  517. - name: Build
  518. id: cmake_build
  519. run: |
  520. $env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
  521. $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
  522. cmake -G "Unix Makefiles" -B build -S . `
  523. -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" `
  524. -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" `
  525. -DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/opt/rocm-7.0.1/include/ -Wno-ignored-attributes -Wno-nested-anon-types" `
  526. -DCMAKE_BUILD_TYPE=Release `
  527. -DGGML_BACKEND_DL=ON `
  528. -DGGML_NATIVE=OFF `
  529. -DGGML_CPU=OFF `
  530. -DAMDGPU_TARGETS="${{ matrix.gpu_targets }}" `
  531. -DGGML_HIP_ROCWMMA_FATTN=ON `
  532. -DGGML_HIP=ON `
  533. -DLLAMA_CURL=OFF
  534. cmake --build build --target ggml-hip -j ${env:NUMBER_OF_PROCESSORS}
  535. md "build\bin\rocblas\library\"
  536. md "build\bin\hipblaslt\library"
  537. cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\"
  538. cp "${env:HIP_PATH}\bin\hipblaslt.dll" "build\bin\"
  539. cp "${env:HIP_PATH}\bin\rocblas.dll" "build\bin\"
  540. cp "${env:HIP_PATH}\bin\rocblas\library\*" "build\bin\rocblas\library\"
  541. cp "${env:HIP_PATH}\bin\hipblaslt\library\*" "build\bin\hipblaslt\library\"
  542. - name: Pack artifacts
  543. id: pack_artifacts
  544. run: |
  545. 7z a -snl llama-bin-win-hip-${{ matrix.name }}-x64.zip .\build\bin\*
  546. - name: Upload artifacts
  547. uses: actions/upload-artifact@v4
  548. with:
  549. path: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  550. name: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  551. ios-xcode-build:
  552. runs-on: macos-15
  553. steps:
  554. - name: Checkout code
  555. uses: actions/checkout@v4
  556. with:
  557. fetch-depth: 0
  558. - name: Setup Xcode
  559. run: |
  560. sudo xcode-select -s /Applications/Xcode_16.4.app
  561. - name: Build
  562. id: cmake_build
  563. run: |
  564. sysctl -a
  565. cmake -B build -G Xcode \
  566. -DGGML_METAL_USE_BF16=ON \
  567. -DGGML_METAL_EMBED_LIBRARY=ON \
  568. -DLLAMA_CURL=OFF \
  569. -DLLAMA_BUILD_EXAMPLES=OFF \
  570. -DLLAMA_BUILD_TOOLS=OFF \
  571. -DLLAMA_BUILD_TESTS=OFF \
  572. -DLLAMA_BUILD_SERVER=OFF \
  573. -DCMAKE_SYSTEM_NAME=iOS \
  574. -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
  575. -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
  576. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
  577. - name: xcodebuild for swift package
  578. id: xcodebuild
  579. run: |
  580. ./build-xcframework.sh
  581. - name: Build Xcode project
  582. run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build
  583. - name: Determine tag name
  584. id: tag
  585. uses: ./.github/actions/get-tag-name
  586. - name: Pack artifacts
  587. id: pack_artifacts
  588. run: |
  589. tar -czvf llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz -C build-apple llama.xcframework
  590. - name: Upload artifacts
  591. uses: actions/upload-artifact@v4
  592. with:
  593. path: llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz
  594. name: llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz
  595. openEuler-cann:
  596. strategy:
  597. matrix:
  598. arch: [x86, aarch64]
  599. chip_type: ['910b', '310p']
  600. build: ['Release']
  601. runs-on: ${{ matrix.arch == 'aarch64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }}
  602. steps:
  603. - name: Checkout
  604. uses: actions/checkout@v4
  605. with:
  606. fetch-depth: 0
  607. - name: Free up disk space
  608. uses: ggml-org/free-disk-space@v1.3.1
  609. with:
  610. tool-cache: true
  611. - name: Set container image
  612. id: cann-image
  613. run: |
  614. image="ascendai/cann:${{ matrix.chip_type == '910b' && '8.3.rc2-910b-openeuler24.03-py3.11' || '8.3.rc2-310p-openeuler24.03-py3.11' }}"
  615. echo "image=${image}" >> "${GITHUB_OUTPUT}"
  616. - name: Pull container image
  617. run: docker pull "${{ steps.cann-image.outputs.image }}"
  618. - name: Build
  619. env:
  620. BUILD_TYPE: ${{ matrix.build }}
  621. SOC_TYPE: ascend${{ matrix.chip_type }}
  622. run: |
  623. HOST_UID=$(id -u)
  624. HOST_GID=$(id -g)
  625. docker run --rm \
  626. -v "${PWD}:/workspace" \
  627. -w /workspace \
  628. -e SOC_TYPE=${SOC_TYPE} \
  629. -e BUILD_TYPE=${BUILD_TYPE} \
  630. "${{ steps.cann-image.outputs.image }}" \
  631. bash -lc '
  632. set -e
  633. yum install -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs git gcc gcc-c++ make cmake libcurl-devel
  634. yum clean all && rm -rf /var/cache/yum
  635. git config --global --add safe.directory "/workspace"
  636. export LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/$(uname -m)-linux/devlib/:${LD_LIBRARY_PATH}
  637. cmake -S . -B build \
  638. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
  639. -DGGML_CANN=on \
  640. -DSOC_TYPE=${SOC_TYPE}
  641. cmake --build build -j $(nproc)
  642. chown -R '"${HOST_UID}"':'"${HOST_GID}"' /workspace/build
  643. '
  644. - name: Determine tag name
  645. id: tag
  646. uses: ./.github/actions/get-tag-name
  647. - name: Pack artifacts
  648. run: |
  649. cp LICENSE ./build/bin/
  650. tar -czvf llama-${{ steps.tag.outputs.name }}-bin-${{ matrix.chip_type }}-openEuler-${{ matrix.arch }}.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
  651. - name: Upload artifacts
  652. uses: actions/upload-artifact@v4
  653. with:
  654. path: llama-${{ steps.tag.outputs.name }}-bin-${{ matrix.chip_type }}-openEuler-${{ matrix.arch }}.tar.gz
  655. name: llama-bin-${{ matrix.chip_type }}-openEuler-${{ matrix.arch }}.tar.gz
  656. release:
  657. if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
  658. # Fine-grant permission
  659. # https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
  660. permissions:
  661. contents: write # for creating release
  662. runs-on: ubuntu-latest
  663. needs:
  664. - windows
  665. - windows-cpu
  666. - windows-cuda
  667. - windows-sycl
  668. - windows-hip
  669. - ubuntu-22-cpu
  670. - ubuntu-22-vulkan
  671. - macOS-arm64
  672. - macOS-x64
  673. - ios-xcode-build
  674. - openEuler-cann
  675. steps:
  676. - name: Clone
  677. id: checkout
  678. uses: actions/checkout@v4
  679. with:
  680. fetch-depth: 0
  681. - name: Determine tag name
  682. id: tag
  683. uses: ./.github/actions/get-tag-name
  684. - name: Download artifacts
  685. id: download-artifact
  686. uses: actions/download-artifact@v4
  687. with:
  688. path: ./artifact
  689. merge-multiple: true
  690. - name: Move artifacts
  691. id: move_artifacts
  692. run: |
  693. mkdir -p release
  694. echo "Adding CPU backend files to existing zips..."
  695. for arch in x64 arm64; do
  696. cpu_zip="artifact/llama-bin-win-cpu-${arch}.zip"
  697. temp_dir=$(mktemp -d)
  698. echo "Extracting CPU backend for $arch..."
  699. unzip "$cpu_zip" -d "$temp_dir"
  700. echo "Adding CPU files to $arch zips..."
  701. for target_zip in artifact/llama-bin-win-*-${arch}.zip; do
  702. if [[ "$target_zip" == "$cpu_zip" ]]; then
  703. continue
  704. fi
  705. echo "Adding CPU backend to $(basename "$target_zip")"
  706. realpath_target_zip=$(realpath "$target_zip")
  707. (cd "$temp_dir" && zip -r "$realpath_target_zip" .)
  708. done
  709. rm -rf "$temp_dir"
  710. done
  711. echo "Renaming and moving zips to release..."
  712. for zip_file in artifact/llama-bin-win-*.zip; do
  713. base_name=$(basename "$zip_file" .zip)
  714. zip_name="llama-${{ steps.tag.outputs.name }}-${base_name#llama-}.zip"
  715. echo "Moving $zip_file to release/$zip_name"
  716. mv "$zip_file" "release/$zip_name"
  717. done
  718. echo "Moving other artifacts..."
  719. mv -v artifact/*.zip release
  720. mv -v artifact/*.tar.gz release
  721. - name: Create release
  722. id: create_release
  723. uses: ggml-org/action-create-release@v1
  724. env:
  725. GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
  726. with:
  727. tag_name: ${{ steps.tag.outputs.name }}
  728. body: |
  729. <details open>
  730. ${{ github.event.head_commit.message }}
  731. </details>
  732. **macOS/iOS:**
  733. - [macOS Apple Silicon (arm64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz)
  734. - [macOS Intel (x64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz)
  735. - [iOS XCFramework](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz)
  736. **Linux:**
  737. - [Ubuntu x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.tar.gz)
  738. - [Ubuntu x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz)
  739. - [Ubuntu s390x (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-s390x.tar.gz)
  740. **Windows:**
  741. - [Windows x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-x64.zip)
  742. - [Windows arm64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-arm64.zip)
  743. - [Windows x64 (CUDA 12)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-12.4-x64.zip) - [CUDA 12.4 DLLs](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/cudart-llama-bin-win-cuda-12.4-x64.zip)
  744. - [Windows x64 (CUDA 13)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-13.1-x64.zip) - [CUDA 13.1 DLLs](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/cudart-llama-bin-win-cuda-13.1-x64.zip)
  745. - [Windows x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-vulkan-x64.zip)
  746. - [Windows x64 (SYCL)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip)
  747. - [Windows x64 (HIP)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-hip-radeon-x64.zip)
  748. **openEuler:**
  749. - [openEuler x86 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-x86.tar.gz)
  750. - [openEuler x86 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-x86.tar.gz)
  751. - [openEuler aarch64 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-aarch64.tar.gz)
  752. - [openEuler aarch64 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-aarch64.tar.gz)
  753. - name: Upload release
  754. id: upload_release
  755. uses: actions/github-script@v3
  756. with:
  757. github-token: ${{secrets.GITHUB_TOKEN}}
  758. script: |
  759. const path = require('path');
  760. const fs = require('fs');
  761. const release_id = '${{ steps.create_release.outputs.id }}';
  762. for (let file of await fs.readdirSync('./release')) {
  763. if (path.extname(file) === '.zip' || file.endsWith('.tar.gz')) {
  764. console.log('uploadReleaseAsset', file);
  765. await github.repos.uploadReleaseAsset({
  766. owner: context.repo.owner,
  767. repo: context.repo.repo,
  768. release_id: release_id,
  769. name: file,
  770. data: await fs.readFileSync(`./release/${file}`)
  771. });
  772. }
  773. }