test-autorelease.cpp 712 B

123456789101112131415161718192021222324
  1. // ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763
  2. #include <cstdio>
  3. #include <string>
  4. #include <thread>
  5. #include "llama.h"
  6. #include "get-model.h"
  7. // This creates a new context inside a pthread and then tries to exit cleanly.
  8. int main(int argc, char ** argv) {
  9. auto * model_path = get_model_or_exit(argc, argv);
  10. std::thread([&model_path]() {
  11. llama_backend_init();
  12. auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
  13. auto * ctx = llama_init_from_model(model, llama_context_default_params());
  14. llama_free(ctx);
  15. llama_model_free(model);
  16. llama_backend_free();
  17. }).join();
  18. return 0;
  19. }