test-autorelease.cpp 811 B

12345678910111213141516171819202122232425262728
  1. // ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763
  2. #include <cstdio>
  3. #include <string>
  4. #include <thread>
  5. #include "llama.h"
  6. // This creates a new context inside a pthread and then tries to exit cleanly.
  7. int main(int argc, char ** argv) {
  8. if (argc < 2) {
  9. printf("Usage: %s model.gguf\n", argv[0]);
  10. return 0; // intentionally return success
  11. }
  12. const std::string fname = argv[1];
  13. std::thread([&fname]() {
  14. llama_backend_init(false);
  15. auto * model = llama_load_model_from_file(fname.c_str(), llama_model_default_params());
  16. auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
  17. llama_free(ctx);
  18. llama_free_model(model);
  19. llama_backend_free();
  20. }).join();
  21. return 0;
  22. }