build-amd.yml 1.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
  1. name: CI (AMD)
  2. on:
  3. workflow_dispatch: # allows manual triggering
  4. push:
  5. branches:
  6. - master
  7. paths: [
  8. '.github/workflows/build-amd.yml',
  9. '**/CMakeLists.txt',
  10. '**/.cmake',
  11. '**/*.h',
  12. '**/*.hpp',
  13. '**/*.c',
  14. '**/*.cpp',
  15. '**/*.cu',
  16. '**/*.cuh',
  17. '**/*.comp'
  18. ]
  19. concurrency:
  20. group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
  21. cancel-in-progress: true
  22. jobs:
  23. ggml-ci-x64-amd-vulkan:
  24. runs-on: [self-hosted, Linux, X64, AMD]
  25. steps:
  26. - name: Clone
  27. id: checkout
  28. uses: actions/checkout@v4
  29. - name: Test
  30. id: ggml-ci
  31. run: |
  32. vulkaninfo --summary
  33. GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
  34. ggml-ci-x64-amd-rocm:
  35. runs-on: [self-hosted, Linux, X64, AMD]
  36. steps:
  37. - name: Clone
  38. id: checkout
  39. uses: actions/checkout@v4
  40. - name: Test
  41. id: ggml-ci
  42. run: |
  43. amd-smi static
  44. GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp