release.yml 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. name: Release
  2. on:
  3. workflow_dispatch: # allows manual triggering
  4. inputs:
  5. create_release:
  6. description: 'Create new release'
  7. required: true
  8. type: boolean
  9. push:
  10. branches:
  11. - master
  12. paths: ['.github/workflows/release.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp']
  13. concurrency:
  14. group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
  15. cancel-in-progress: true
  16. env:
  17. BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
  18. CMAKE_ARGS: "-DLLAMA_BUILD_EXAMPLES=OFF -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=ON -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON"
  19. jobs:
  20. macOS-arm64:
  21. runs-on: macos-14
  22. steps:
  23. - name: Clone
  24. id: checkout
  25. uses: actions/checkout@v4
  26. with:
  27. fetch-depth: 0
  28. - name: ccache
  29. uses: hendrikmuhs/ccache-action@v1.2.16
  30. with:
  31. key: macOS-latest-cmake-arm64
  32. evict-old-files: 1d
  33. - name: Dependencies
  34. id: depends
  35. continue-on-error: true
  36. run: |
  37. brew update
  38. brew install curl
  39. - name: Build
  40. id: cmake_build
  41. run: |
  42. sysctl -a
  43. cmake -B build \
  44. -DCMAKE_BUILD_RPATH="@loader_path" \
  45. -DLLAMA_FATAL_WARNINGS=ON \
  46. -DGGML_METAL_USE_BF16=ON \
  47. -DGGML_METAL_EMBED_LIBRARY=ON \
  48. -DGGML_RPC=ON \
  49. ${{ env.CMAKE_ARGS }}
  50. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  51. - name: Determine tag name
  52. id: tag
  53. uses: ./.github/actions/get-tag-name
  54. - name: Pack artifacts
  55. id: pack_artifacts
  56. run: |
  57. cp LICENSE ./build/bin/
  58. zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip ./build/bin/*
  59. - name: Upload artifacts
  60. uses: actions/upload-artifact@v4
  61. with:
  62. path: llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip
  63. name: llama-bin-macos-arm64.zip
  64. macOS-x64:
  65. runs-on: macos-13
  66. steps:
  67. - name: Clone
  68. id: checkout
  69. uses: actions/checkout@v4
  70. with:
  71. fetch-depth: 0
  72. - name: ccache
  73. uses: hendrikmuhs/ccache-action@v1.2.16
  74. with:
  75. key: macOS-latest-cmake-x64
  76. evict-old-files: 1d
  77. - name: Dependencies
  78. id: depends
  79. continue-on-error: true
  80. run: |
  81. brew update
  82. brew install curl
  83. - name: Build
  84. id: cmake_build
  85. run: |
  86. sysctl -a
  87. # Metal is disabled due to intermittent failures with Github runners not having a GPU:
  88. # https://github.com/ggml-org/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
  89. cmake -B build \
  90. -DCMAKE_BUILD_RPATH="@loader_path" \
  91. -DLLAMA_FATAL_WARNINGS=ON \
  92. -DGGML_METAL=OFF \
  93. -DGGML_RPC=ON
  94. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
  95. - name: Determine tag name
  96. id: tag
  97. uses: ./.github/actions/get-tag-name
  98. - name: Pack artifacts
  99. id: pack_artifacts
  100. run: |
  101. cp LICENSE ./build/bin/
  102. zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip ./build/bin/*
  103. - name: Upload artifacts
  104. uses: actions/upload-artifact@v4
  105. with:
  106. path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip
  107. name: llama-bin-macos-x64.zip
  108. ubuntu-22-cpu:
  109. strategy:
  110. matrix:
  111. include:
  112. - build: 'x64'
  113. os: ubuntu-22.04
  114. - build: 'arm64'
  115. os: ubuntu-22.04-arm
  116. runs-on: ${{ matrix.os }}
  117. steps:
  118. - name: Clone
  119. id: checkout
  120. uses: actions/checkout@v4
  121. with:
  122. fetch-depth: 0
  123. - name: ccache
  124. uses: hendrikmuhs/ccache-action@v1.2.16
  125. with:
  126. key: ubuntu-cpu-cmake
  127. evict-old-files: 1d
  128. - name: Dependencies
  129. id: depends
  130. run: |
  131. sudo apt-get update
  132. sudo apt-get install build-essential libcurl4-openssl-dev
  133. - name: Build
  134. id: cmake_build
  135. run: |
  136. cmake -B build \
  137. -DLLAMA_FATAL_WARNINGS=ON \
  138. ${{ env.CMAKE_ARGS }}
  139. cmake --build build --config Release -j $(nproc)
  140. - name: Determine tag name
  141. id: tag
  142. uses: ./.github/actions/get-tag-name
  143. - name: Pack artifacts
  144. id: pack_artifacts
  145. run: |
  146. cp LICENSE ./build/bin/
  147. zip -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.zip ./build/bin/*
  148. - name: Upload artifacts
  149. uses: actions/upload-artifact@v4
  150. with:
  151. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.zip
  152. name: llama-bin-ubuntu-${{ matrix.build }}.zip
  153. ubuntu-22-vulkan:
  154. runs-on: ubuntu-22.04
  155. steps:
  156. - name: Clone
  157. id: checkout
  158. uses: actions/checkout@v4
  159. with:
  160. fetch-depth: 0
  161. - name: ccache
  162. uses: hendrikmuhs/ccache-action@v1.2.16
  163. with:
  164. key: ubuntu-22-cmake-vulkan
  165. evict-old-files: 1d
  166. - name: Dependencies
  167. id: depends
  168. run: |
  169. wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
  170. sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
  171. sudo apt-get update -y
  172. sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libcurl4-openssl-dev
  173. - name: Build
  174. id: cmake_build
  175. run: |
  176. cmake -B build \
  177. -DGGML_VULKAN=ON \
  178. ${{ env.CMAKE_ARGS }}
  179. cmake --build build --config Release -j $(nproc)
  180. - name: Determine tag name
  181. id: tag
  182. uses: ./.github/actions/get-tag-name
  183. - name: Pack artifacts
  184. id: pack_artifacts
  185. run: |
  186. cp LICENSE ./build/bin/
  187. zip -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip ./build/bin/*
  188. - name: Upload artifacts
  189. uses: actions/upload-artifact@v4
  190. with:
  191. path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip
  192. name: llama-bin-ubuntu-vulkan-x64.zip
  193. windows-cpu:
  194. runs-on: windows-latest
  195. strategy:
  196. matrix:
  197. include:
  198. - arch: 'x64'
  199. - arch: 'arm64'
  200. steps:
  201. - name: Clone
  202. uses: actions/checkout@v4
  203. with:
  204. fetch-depth: 0
  205. - name: ccache
  206. uses: hendrikmuhs/ccache-action@v1.2.16
  207. with:
  208. key: windows-latest-cmake-cpu-${{ matrix.arch }}
  209. variant: ccache
  210. evict-old-files: 1d
  211. - name: Install Ninja
  212. run: |
  213. choco install ninja
  214. - name: libCURL
  215. id: get_libcurl
  216. uses: ./.github/actions/windows-setup-curl
  217. with:
  218. architecture: ${{ matrix.arch == 'x64' && 'win64' || 'win64a' }}
  219. - name: Build
  220. env:
  221. CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
  222. run: |
  223. cmake -S . -B build -G "Ninja Multi-Config" `
  224. -D CMAKE_TOOLCHAIN_FILE=cmake/${{ matrix.arch }}-windows-llvm.cmake `
  225. -DGGML_NATIVE=OFF `
  226. -DGGML_BACKEND_DL=ON `
  227. -DGGML_CPU_ALL_VARIANTS=ON `
  228. -DGGML_OPENMP=OFF `
  229. -DCURL_LIBRARY="$env:CURL_PATH/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:CURL_PATH/include" `
  230. ${{ env.CMAKE_ARGS }}
  231. cmake --build build --config Release
  232. - name: Pack artifacts
  233. id: pack_artifacts
  234. env:
  235. CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
  236. run: |
  237. Copy-Item $env:CURL_PATH\bin\libcurl-${{ matrix.arch }}.dll .\build\bin\Release\
  238. 7z a llama-bin-win-cpu-${{ matrix.arch }}.zip .\build\bin\Release\*
  239. - name: Upload artifacts
  240. uses: actions/upload-artifact@v4
  241. with:
  242. path: llama-bin-win-cpu-${{ matrix.arch }}.zip
  243. name: llama-bin-win-cpu-${{ matrix.arch }}.zip
  244. windows:
  245. runs-on: windows-latest
  246. env:
  247. OPENBLAS_VERSION: 0.3.23
  248. VULKAN_VERSION: 1.4.309.0
  249. strategy:
  250. matrix:
  251. include:
  252. - backend: 'vulkan'
  253. arch: 'x64'
  254. defines: '-DGGML_VULKAN=ON'
  255. target: 'ggml-vulkan'
  256. - backend: 'opencl-adreno'
  257. arch: 'arm64'
  258. defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
  259. target: 'ggml-opencl'
  260. steps:
  261. - name: Clone
  262. id: checkout
  263. uses: actions/checkout@v4
  264. - name: ccache
  265. uses: hendrikmuhs/ccache-action@v1.2.16
  266. with:
  267. key: windows-latest-cmake-${{ matrix.backend }}-${{ matrix.arch }}
  268. variant: ccache
  269. evict-old-files: 1d
  270. - name: Install Vulkan SDK
  271. id: get_vulkan
  272. if: ${{ matrix.backend == 'vulkan' }}
  273. run: |
  274. curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe"
  275. & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install
  276. Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}"
  277. Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin"
  278. - name: Install Ninja
  279. id: install_ninja
  280. run: |
  281. choco install ninja
  282. - name: Install OpenCL Headers and Libs
  283. id: install_opencl
  284. if: ${{ matrix.backend == 'opencl-adreno' && matrix.arch == 'arm64' }}
  285. run: |
  286. git clone https://github.com/KhronosGroup/OpenCL-Headers
  287. cd OpenCL-Headers
  288. cmake -B build `
  289. -DBUILD_TESTING=OFF `
  290. -DOPENCL_HEADERS_BUILD_TESTING=OFF `
  291. -DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF `
  292. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  293. cmake --build build --target install
  294. git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader
  295. cd OpenCL-ICD-Loader
  296. cmake -B build-arm64-release `
  297. -A arm64 `
  298. -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" `
  299. -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
  300. cmake --build build-arm64-release --target install --config release
  301. - name: Build
  302. id: cmake_build
  303. run: |
  304. cmake -S . -B build ${{ matrix.defines }} -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_BACKEND_DL=ON -DLLAMA_CURL=OFF
  305. cmake --build build --config Release --target ${{ matrix.target }}
  306. - name: Pack artifacts
  307. id: pack_artifacts
  308. run: |
  309. 7z a llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip .\build\bin\Release\${{ matrix.target }}.dll
  310. - name: Upload artifacts
  311. uses: actions/upload-artifact@v4
  312. with:
  313. path: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  314. name: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
  315. windows-cuda:
  316. runs-on: windows-2019
  317. strategy:
  318. matrix:
  319. cuda: ['12.4', '11.7']
  320. steps:
  321. - name: Clone
  322. id: checkout
  323. uses: actions/checkout@v4
  324. - name: Install ccache
  325. uses: hendrikmuhs/ccache-action@v1.2.16
  326. with:
  327. key: windows-cuda-${{ matrix.cuda }}
  328. variant: ccache
  329. evict-old-files: 1d
  330. - name: Install Cuda Toolkit
  331. uses: ./.github/actions/windows-setup-cuda
  332. with:
  333. cuda_version: ${{ matrix.cuda }}
  334. - name: Install Ninja
  335. id: install_ninja
  336. run: |
  337. choco install ninja
  338. - name: Build
  339. id: cmake_build
  340. shell: cmd
  341. run: |
  342. call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
  343. cmake -S . -B build -G "Ninja Multi-Config" ^
  344. -DGGML_BACKEND_DL=ON ^
  345. -DGGML_NATIVE=OFF ^
  346. -DGGML_CPU=OFF ^
  347. -DGGML_CUDA=ON ^
  348. -DLLAMA_CURL=OFF
  349. set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1
  350. cmake --build build --config Release -j %NINJA_JOBS% --target ggml-cuda
  351. - name: Pack artifacts
  352. id: pack_artifacts
  353. run: |
  354. 7z a llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip .\build\bin\Release\ggml-cuda.dll
  355. - name: Upload artifacts
  356. uses: actions/upload-artifact@v4
  357. with:
  358. path: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  359. name: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  360. - name: Copy and pack Cuda runtime
  361. run: |
  362. echo "Cuda install location: ${{ env.CUDA_PATH }}"
  363. $dst='.\build\bin\cudart\'
  364. robocopy "${{env.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  365. robocopy "${{env.CUDA_PATH}}\lib" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
  366. 7z a cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip $dst\*
  367. - name: Upload Cuda runtime
  368. uses: actions/upload-artifact@v4
  369. with:
  370. path: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  371. name: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
  372. windows-sycl:
  373. runs-on: windows-latest
  374. defaults:
  375. run:
  376. shell: bash
  377. env:
  378. WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/7cd9bba0-7aab-4e30-b3ae-2221006a4a05/intel-oneapi-base-toolkit-2025.1.1.34_offline.exe
  379. WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
  380. ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
  381. steps:
  382. - name: Clone
  383. id: checkout
  384. uses: actions/checkout@v4
  385. - name: ccache
  386. uses: hendrikmuhs/ccache-action@v1.2.16
  387. with:
  388. key: windows-latest-cmake-sycl
  389. variant: ccache
  390. evict-old-files: 1d
  391. - name: Install
  392. run: |
  393. scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL
  394. - name: Build
  395. id: cmake_build
  396. shell: cmd
  397. run: |
  398. call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
  399. cmake -G "Ninja" -B build ^
  400. -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx ^
  401. -DCMAKE_BUILD_TYPE=Release ^
  402. -DGGML_BACKEND_DL=ON -DBUILD_SHARED_LIBS=ON ^
  403. -DGGML_CPU=OFF -DGGML_SYCL=ON ^
  404. -DLLAMA_CURL=OFF
  405. cmake --build build --target ggml-sycl -j
  406. - name: Build the release package
  407. id: pack_artifacts
  408. run: |
  409. echo "cp oneAPI running time dll files in ${{ env.ONEAPI_ROOT }} to ./build/bin"
  410. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.5.dll" ./build/bin
  411. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_core.2.dll" ./build/bin
  412. cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin
  413. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero.dll" ./build/bin
  414. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_opencl.dll" ./build/bin
  415. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_loader.dll" ./build/bin
  416. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_win_proxy_loader.dll" ./build/bin
  417. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl8.dll" ./build/bin
  418. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin
  419. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin
  420. cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin
  421. cp "${{ env.ONEAPI_ROOT }}/dnnl/latest/bin/dnnl.dll" ./build/bin
  422. cp "${{ env.ONEAPI_ROOT }}/tbb/latest/bin/tbb12.dll" ./build/bin
  423. echo "cp oneAPI running time dll files to ./build/bin done"
  424. 7z a llama-bin-win-sycl-x64.zip ./build/bin/*
  425. - name: Upload the release package
  426. uses: actions/upload-artifact@v4
  427. with:
  428. path: llama-bin-win-sycl-x64.zip
  429. name: llama-bin-win-sycl-x64.zip
  430. windows-hip:
  431. runs-on: windows-latest
  432. strategy:
  433. matrix:
  434. include:
  435. - name: "radeon"
  436. gpu_targets: "gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032"
  437. steps:
  438. - name: Clone
  439. id: checkout
  440. uses: actions/checkout@v4
  441. - name: Clone rocWMMA repository
  442. id: clone_rocwmma
  443. run: |
  444. git clone https://github.com/rocm/rocwmma --branch rocm-6.2.4 --depth 1
  445. - name: ccache
  446. uses: hendrikmuhs/ccache-action@v1.2.16
  447. with:
  448. key: windows-latest-cmake-hip-${{ matrix.name }}-x64
  449. evict-old-files: 1d
  450. - name: Install
  451. id: depends
  452. run: |
  453. $ErrorActionPreference = "Stop"
  454. write-host "Downloading AMD HIP SDK Installer"
  455. Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
  456. write-host "Installing AMD HIP SDK"
  457. Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
  458. write-host "Completed AMD HIP SDK installation"
  459. - name: Verify ROCm
  460. id: verify
  461. run: |
  462. & 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
  463. - name: Build
  464. id: cmake_build
  465. run: |
  466. $env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
  467. $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
  468. cmake -G "Unix Makefiles" -B build -S . `
  469. -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" `
  470. -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" `
  471. -DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/rocwmma/library/include/ -Wno-ignored-attributes -Wno-nested-anon-types" `
  472. -DCMAKE_BUILD_TYPE=Release `
  473. -DGGML_BACKEND_DL=ON `
  474. -DGGML_NATIVE=OFF `
  475. -DGGML_CPU=OFF `
  476. -DAMDGPU_TARGETS="${{ matrix.gpu_targets }}" `
  477. -DGGML_HIP_ROCWMMA_FATTN=ON `
  478. -DGGML_HIP=ON `
  479. -DLLAMA_CURL=OFF
  480. cmake --build build --target ggml-hip -j ${env:NUMBER_OF_PROCESSORS}
  481. md "build\bin\rocblas\library\"
  482. cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\"
  483. cp "${env:HIP_PATH}\bin\rocblas.dll" "build\bin\"
  484. cp "${env:HIP_PATH}\bin\rocblas\library\*" "build\bin\rocblas\library\"
  485. - name: Pack artifacts
  486. id: pack_artifacts
  487. run: |
  488. 7z a llama-bin-win-hip-${{ matrix.name }}-x64.zip .\build\bin\*
  489. - name: Upload artifacts
  490. uses: actions/upload-artifact@v4
  491. with:
  492. path: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  493. name: llama-bin-win-hip-${{ matrix.name }}-x64.zip
  494. ios-xcode-build:
  495. runs-on: macos-latest
  496. steps:
  497. - name: Checkout code
  498. uses: actions/checkout@v4
  499. with:
  500. fetch-depth: 0
  501. - name: Build
  502. id: cmake_build
  503. run: |
  504. sysctl -a
  505. cmake -B build -G Xcode \
  506. -DGGML_METAL_USE_BF16=ON \
  507. -DGGML_METAL_EMBED_LIBRARY=ON \
  508. -DLLAMA_CURL=OFF \
  509. -DLLAMA_BUILD_EXAMPLES=OFF \
  510. -DLLAMA_BUILD_TOOLS=OFF \
  511. -DLLAMA_BUILD_TESTS=OFF \
  512. -DLLAMA_BUILD_SERVER=OFF \
  513. -DCMAKE_SYSTEM_NAME=iOS \
  514. -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
  515. -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
  516. cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
  517. - name: xcodebuild for swift package
  518. id: xcodebuild
  519. run: |
  520. ./build-xcframework.sh
  521. - name: Build Xcode project
  522. run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build
  523. - name: Determine tag name
  524. id: tag
  525. uses: ./.github/actions/get-tag-name
  526. - name: Pack artifacts
  527. id: pack_artifacts
  528. run: |
  529. zip --symlinks -r llama-${{ steps.tag.outputs.name }}-xcframework.zip build-apple/llama.xcframework
  530. - name: Upload artifacts
  531. uses: actions/upload-artifact@v4
  532. with:
  533. path: llama-${{ steps.tag.outputs.name }}-xcframework.zip
  534. name: llama-${{ steps.tag.outputs.name }}-xcframework
  535. release:
  536. if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
  537. # Fine-grant permission
  538. # https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
  539. permissions:
  540. contents: write # for creating release
  541. runs-on: ubuntu-latest
  542. needs:
  543. - windows
  544. - windows-cpu
  545. - windows-cuda
  546. - windows-sycl
  547. - windows-hip
  548. - ubuntu-22-cpu
  549. - ubuntu-22-vulkan
  550. - macOS-arm64
  551. - macOS-x64
  552. - ios-xcode-build
  553. steps:
  554. - name: Clone
  555. id: checkout
  556. uses: actions/checkout@v4
  557. with:
  558. fetch-depth: 0
  559. - name: Determine tag name
  560. id: tag
  561. uses: ./.github/actions/get-tag-name
  562. - name: Download artifacts
  563. id: download-artifact
  564. uses: actions/download-artifact@v4
  565. with:
  566. path: ./artifact
  567. merge-multiple: true
  568. - name: Move artifacts
  569. id: move_artifacts
  570. run: |
  571. mkdir -p release
  572. echo "Adding CPU backend files to existing zips..."
  573. for arch in x64 arm64; do
  574. cpu_zip="artifact/llama-bin-win-cpu-${arch}.zip"
  575. temp_dir=$(mktemp -d)
  576. echo "Extracting CPU backend for $arch..."
  577. unzip "$cpu_zip" -d "$temp_dir"
  578. echo "Adding CPU files to $arch zips..."
  579. for target_zip in artifact/llama-bin-win-*-${arch}.zip; do
  580. if [[ "$target_zip" == "$cpu_zip" ]]; then
  581. continue
  582. fi
  583. echo "Adding CPU backend to $(basename "$target_zip")"
  584. realpath_target_zip=$(realpath "$target_zip")
  585. (cd "$temp_dir" && zip -r "$realpath_target_zip" .)
  586. done
  587. rm -rf "$temp_dir"
  588. done
  589. echo "Renaming and moving zips to release..."
  590. for zip_file in artifact/llama-bin-win-*.zip; do
  591. base_name=$(basename "$zip_file" .zip)
  592. zip_name="llama-${{ steps.tag.outputs.name }}-${base_name#llama-}.zip"
  593. echo "Moving $zip_file to release/$zip_name"
  594. mv "$zip_file" "release/$zip_name"
  595. done
  596. echo "Moving other artifacts..."
  597. mv -v artifact/*.zip release
  598. - name: Create release
  599. id: create_release
  600. uses: ggml-org/action-create-release@v1
  601. env:
  602. GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
  603. with:
  604. tag_name: ${{ steps.tag.outputs.name }}
  605. - name: Upload release
  606. id: upload_release
  607. uses: actions/github-script@v3
  608. with:
  609. github-token: ${{secrets.GITHUB_TOKEN}}
  610. script: |
  611. const path = require('path');
  612. const fs = require('fs');
  613. const release_id = '${{ steps.create_release.outputs.id }}';
  614. for (let file of await fs.readdirSync('./release')) {
  615. if (path.extname(file) === '.zip') {
  616. console.log('uploadReleaseAsset', file);
  617. await github.repos.uploadReleaseAsset({
  618. owner: context.repo.owner,
  619. repo: context.repo.repo,
  620. release_id: release_id,
  621. name: file,
  622. data: await fs.readFileSync(`./release/${file}`)
  623. });
  624. }
  625. }