CMakeLists.txt 4.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. function(llama_build_executable source)
  2. get_filename_component(TEST_TARGET ${source} NAME_WE)
  3. add_executable(${TEST_TARGET} ${source} get-model.cpp)
  4. install(TARGETS ${TEST_TARGET} RUNTIME)
  5. target_link_libraries(${TEST_TARGET} PRIVATE common)
  6. endfunction()
  7. function(llama_test_executable name source)
  8. get_filename_component(TEST_TARGET ${source} NAME_WE)
  9. add_test(NAME ${name} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
  10. set_property(TEST ${name} PROPERTY LABELS "main")
  11. endfunction()
  12. function(llama_build_and_test_executable source)
  13. llama_build_and_test_executable_with_label(${source} "main")
  14. endfunction()
  15. function(llama_build_and_test_executable_with_label source label)
  16. get_filename_component(TEST_TARGET ${source} NAME_WE)
  17. add_executable(${TEST_TARGET} ${source} get-model.cpp)
  18. install(TARGETS ${TEST_TARGET} RUNTIME)
  19. target_link_libraries(${TEST_TARGET} PRIVATE common)
  20. add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
  21. set_property(TEST ${TEST_TARGET} PROPERTY LABELS ${label})
  22. endfunction()
  23. # llama_build_and_test_executable(test-double-float.cpp) # SLOW
  24. llama_build_and_test_executable(test-quantize-fns.cpp)
  25. llama_build_and_test_executable(test-quantize-perf.cpp)
  26. llama_build_and_test_executable(test-sampling.cpp)
  27. llama_build_and_test_executable(test-chat-template.cpp)
  28. llama_build_executable(test-tokenizer-0-llama.cpp)
  29. llama_test_executable (test-tokenizer-0-llama test-tokenizer-0-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
  30. llama_build_executable(test-tokenizer-0-falcon.cpp)
  31. llama_test_executable (test-tokenizer-0-falcon test-tokenizer-0-falcon.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
  32. llama_build_executable(test-tokenizer-1-llama.cpp)
  33. llama_test_executable (test-tokenizer-1-llama test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
  34. llama_test_executable (test-tokenizer-1-baichuan test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
  35. llama_build_executable(test-tokenizer-1-bpe.cpp)
  36. llama_test_executable (test-tokenizer-1-falcon test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
  37. llama_test_executable (test-tokenizer-1-aquila test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
  38. llama_test_executable (test-tokenizer-1-mpt test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
  39. llama_test_executable (test-tokenizer-1-stablelm-3b-4e1t test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-stablelm-3b-4e1t.gguf)
  40. llama_test_executable (test-tokenizer-1-gpt-neox test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
  41. llama_test_executable (test-tokenizer-1-refact test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
  42. llama_test_executable (test-tokenizer-1-starcoder test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
  43. llama_test_executable (test-tokenizer-1-gpt2 test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt2.gguf)
  44. # llama_test_executable (test-tokenizer-1-bloom test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bloom.gguf) # BIG
  45. llama_build_and_test_executable(test-grammar-parser.cpp)
  46. llama_build_and_test_executable(test-llama-grammar.cpp)
  47. llama_build_and_test_executable(test-grad0.cpp)
  48. # llama_build_and_test_executable(test-opt.cpp) # SLOW
  49. llama_build_and_test_executable(test-backend-ops.cpp)
  50. llama_build_and_test_executable(test-rope.cpp)
  51. llama_build_and_test_executable_with_label(test-model-load-cancel.cpp "model")
  52. llama_build_and_test_executable_with_label(test-autorelease.cpp "model")
  53. # dummy executable - not installed
  54. get_filename_component(TEST_TARGET test-c.c NAME_WE)
  55. add_executable(${TEST_TARGET} test-c.c)
  56. target_link_libraries(${TEST_TARGET} PRIVATE llama)