|
@@ -19146,11 +19146,7 @@ const char * llama_print_system_info(void) {
|
|
|
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
|
|
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
|
|
|
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
|
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
|
|
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
|
|
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
|
|
|
-#ifdef GGML_USE_LLAMAFILE
|
|
|
|
|
- s += "LLAMAFILE = 1 | ";
|
|
|
|
|
-#else
|
|
|
|
|
- s += "LLAMAFILE = 0 | ";
|
|
|
|
|
-#endif
|
|
|
|
|
|
|
+ s += "LLAMAFILE = " + std::to_string(ggml_cpu_has_llamafile()) + " | ";
|
|
|
|
|
|
|
|
return s.c_str();
|
|
return s.c_str();
|
|
|
}
|
|
}
|