فهرست منبع

common: fix warning message when no GPU found (#10564)

Johannes Gäßler 1 سال پیش
والد
کامیت
890719311b
1فایلهای تغییر یافته به همراه6 افزوده شده و 4 حذف شده
  1. 6 4
      common/arg.cpp

+ 6 - 4
common/arg.cpp

@@ -1370,8 +1370,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
         [](common_params & params, int value) {
         [](common_params & params, int value) {
             params.n_gpu_layers = value;
             params.n_gpu_layers = value;
             if (!llama_supports_gpu_offload()) {
             if (!llama_supports_gpu_offload()) {
-                fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers option will be ignored\n");
-                fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
+                fprintf(stderr, "warning: no usable GPU found, --gpu-layers option will be ignored\n");
+                fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
+                fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
             }
             }
         }
         }
     ).set_env("LLAMA_ARG_N_GPU_LAYERS"));
     ).set_env("LLAMA_ARG_N_GPU_LAYERS"));
@@ -2104,8 +2105,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
         [](common_params & params, int value) {
         [](common_params & params, int value) {
             params.speculative.n_gpu_layers = value;
             params.speculative.n_gpu_layers = value;
             if (!llama_supports_gpu_offload()) {
             if (!llama_supports_gpu_offload()) {
-                fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers-draft option will be ignored\n");
-                fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
+                fprintf(stderr, "warning: no usable GPU found, --gpu-layers-draft option will be ignored\n");
+                fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
+                fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
             }
             }
         }
         }
     ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
     ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));