Просмотр исходного кода

nitpick : correct MB to MiB (#15934)

MB was incorrectly used for 1024 x 1024 bytes instead of MiB
ddh0 4 месяцев назад
Родитель
Сommit
df082f5630
1 измененных файлов с 3 добавлено и 3 удалено
  1. 3 3
      src/llama-quant.cpp

+ 3 - 3
src/llama-quant.cpp

@@ -920,7 +920,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
             new_type = tensor->type;
             new_type = tensor->type;
             new_data = tensor->data;
             new_data = tensor->data;
             new_size = ggml_nbytes(tensor);
             new_size = ggml_nbytes(tensor);
-            LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
+            LLAMA_LOG_INFO("size = %8.3f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0);
         } else {
         } else {
             const int64_t nelements = ggml_nelements(tensor);
             const int64_t nelements = ggml_nelements(tensor);
 
 
@@ -1037,8 +1037,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
     }
     }
     close_ofstream();
     close_ofstream();
 
 
-    LLAMA_LOG_INFO("%s: model size  = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
-    LLAMA_LOG_INFO("%s: quant size  = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
+    LLAMA_LOG_INFO("%s: model size  = %8.2f MiB\n", __func__, total_size_org/1024.0/1024.0);
+    LLAMA_LOG_INFO("%s: quant size  = %8.2f MiB\n", __func__, total_size_new/1024.0/1024.0);
 
 
     if (qs.n_fallback > 0) {
     if (qs.n_fallback > 0) {
         LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",
         LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",