Kaynağa Gözat

llama : print hint when loading a model when no backends are loaded (#13589)

Diego Devesa 8 ay önce
ebeveyn
işleme
5364ae4ba5
1 değiştirilmiş dosya ile 5 ekleme ve 0 silme
  1. 5 0
      src/llama.cpp

+ 5 - 0
src/llama.cpp

@@ -140,6 +140,11 @@ static struct llama_model * llama_model_load_from_file_impl(
         struct llama_model_params params) {
     ggml_time_init();
 
+    if (!params.vocab_only && ggml_backend_reg_count() == 0) {
+        LLAMA_LOG_ERROR("%s: no backends are loaded. hint: use ggml_backend_load() or ggml_backend_load_all() to load a backend before calling this function\n", __func__);
+        return nullptr;
+    }
+
     unsigned cur_percentage = 0;
     if (params.progress_callback == NULL) {
         params.progress_callback_user_data = &cur_percentage;