llama-config.cmake.in 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. set(LLAMA_VERSION @LLAMA_INSTALL_VERSION@)
  2. set(LLAMA_BUILD_COMMIT @LLAMA_BUILD_COMMIT@)
  3. set(LLAMA_BUILD_NUMBER @LLAMA_BUILD_NUMBER@)
  4. set(LLAMA_SHARED_LIB @BUILD_SHARED_LIBS@)
  5. set(GGML_ACCELERATE @GGML_ACCELERATE@)
  6. set(GGML_VULKAN_CHECK_RESULTS @GGML_VULKAN_CHECK_RESULTS@)
  7. set(GGML_VULKAN_DEBUG @GGML_VULKAN_DEBUG@)
  8. set(GGML_VULKAN_MEMORY_DEBUG @GGML_VULKAN_MEMORY_DEBUG@)
  9. set(GGML_VULKAN_VALIDATE @GGML_VULKAN_VALIDATE@)
  10. set(GGML_OPENMP @GGML_OPENMP@)
  11. @PACKAGE_INIT@
  12. set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@")
  13. set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@")
  14. set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@")
  15. find_package(Threads REQUIRED)
  16. set(_llama_transient_defines "@GGML_TRANSIENT_DEFINES@")
  17. set(_llama_link_deps "")
  18. foreach(_ggml_lib ggml ggml-base)
  19. string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY")
  20. find_library(${_ggml_lib_var} ${_ggml_lib}
  21. REQUIRED
  22. HINTS ${LLAMA_LIB_DIR}
  23. NO_CMAKE_FIND_ROOT_PATH
  24. )
  25. list(APPEND _llama_link_deps "${${_ggml_lib_var}}")
  26. message(STATUS "Found ${${_ggml_lib_var}}")
  27. endforeach()
  28. foreach(backend amx blas cann cpu cuda hip kompute metal musa rpc sycl vulkan)
  29. string(TOUPPER "GGML_${backend}" backend_id)
  30. set(_ggml_lib "ggml-${backend}")
  31. string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY")
  32. find_library(${_ggml_lib_var} ${_ggml_lib}
  33. HINTS ${LLAMA_LIB_DIR}
  34. NO_CMAKE_FIND_ROOT_PATH
  35. )
  36. if(${_ggml_lib_var})
  37. list(APPEND _llama_link_deps "${${_ggml_lib_var}}")
  38. set(${backend_id} ON)
  39. message(STATUS "Found backend ${${_ggml_lib_var}}")
  40. else()
  41. set(${backend_id} OFF)
  42. endif()
  43. endforeach()
  44. if (APPLE AND GGML_ACCELERATE)
  45. find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED)
  46. endif()
  47. if (GGML_BLAS)
  48. find_package(BLAS REQUIRED)
  49. endif()
  50. if (GGML_CUDA)
  51. find_package(CUDAToolkit REQUIRED)
  52. endif()
  53. if (GGML_METAL)
  54. find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
  55. find_library(METAL_FRAMEWORK Metal REQUIRED)
  56. find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
  57. endif()
  58. if (GGML_VULKAN)
  59. find_package(Vulkan REQUIRED)
  60. endif()
  61. if (GGML_HIP)
  62. find_package(hip REQUIRED)
  63. find_package(hipblas REQUIRED)
  64. find_package(rocblas REQUIRED)
  65. endif()
  66. if (GGML_SYCL)
  67. find_package(IntelSYCL REQUIRED)
  68. find_package(MKL REQUIRED)
  69. endif()
  70. if (GGML_OPENMP)
  71. find_package(OpenMP REQUIRED)
  72. endif()
  73. find_library(llama_LIBRARY llama
  74. REQUIRED
  75. HINTS ${LLAMA_LIB_DIR}
  76. NO_CMAKE_FIND_ROOT_PATH
  77. )
  78. add_library(llama UNKNOWN IMPORTED)
  79. set_target_properties(llama
  80. PROPERTIES
  81. INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}"
  82. INTERFACE_LINK_LIBRARIES "${_llama_link_deps}"
  83. INTERFACE_COMPILE_DEFINITIONS "${_llama_transient_defines}"
  84. IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
  85. IMPORTED_LOCATION "${llama_LIBRARY}"
  86. INTERFACE_COMPILE_FEATURES cxx_std_11
  87. POSITION_INDEPENDENT_CODE ON )
  88. check_required_components(Llama)