quantize.cpp 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. #include "build-info.h"
  2. #include "llama.h"
  3. #include <cstdio>
  4. #include <map>
  5. #include <string>
  6. static const std::map<std::string, llama_ftype> LLAMA_FTYPE_MAP = {
  7. {"q4_0", LLAMA_FTYPE_MOSTLY_Q4_0},
  8. {"q4_1", LLAMA_FTYPE_MOSTLY_Q4_1},
  9. {"q5_0", LLAMA_FTYPE_MOSTLY_Q5_0},
  10. {"q5_1", LLAMA_FTYPE_MOSTLY_Q5_1},
  11. {"q8_0", LLAMA_FTYPE_MOSTLY_Q8_0},
  12. {"q2_K", LLAMA_FTYPE_MOSTLY_Q2_K},
  13. {"q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M},
  14. {"q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S},
  15. {"q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M},
  16. {"q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L},
  17. {"q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M},
  18. {"q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S},
  19. {"q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M},
  20. {"q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M},
  21. {"q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S},
  22. {"q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M},
  23. {"q6_K", LLAMA_FTYPE_MOSTLY_Q6_K},
  24. };
  25. bool try_parse_ftype(const std::string & ftype_str, llama_ftype & ftype, std::string & ftype_str_out) {
  26. auto it = LLAMA_FTYPE_MAP.find(ftype_str);
  27. if (it != LLAMA_FTYPE_MAP.end()) {
  28. ftype = it->second;
  29. ftype_str_out = it->first;
  30. return true;
  31. }
  32. // try to parse as an integer
  33. try {
  34. int ftype_int = std::stoi(ftype_str);
  35. for (auto it = LLAMA_FTYPE_MAP.begin(); it != LLAMA_FTYPE_MAP.end(); it++) {
  36. if (it->second == ftype_int) {
  37. ftype = it->second;
  38. ftype_str_out = it->first;
  39. return true;
  40. }
  41. }
  42. }
  43. catch (...) {
  44. // stoi failed
  45. }
  46. return false;
  47. }
  48. // usage:
  49. // ./quantize models/llama/ggml-model.bin [models/llama/ggml-model-quant.bin] type [nthreads]
  50. //
  51. int main(int argc, char ** argv) {
  52. if (argc < 3) {
  53. fprintf(stderr, "usage: %s model-f32.bin [model-quant.bin] type [nthreads]\n", argv[0]);
  54. for (auto it = LLAMA_FTYPE_MAP.begin(); it != LLAMA_FTYPE_MAP.end(); it++) {
  55. fprintf(stderr, " type = \"%s\" or %d\n", it->first.c_str(), it->second);
  56. }
  57. return 1;
  58. }
  59. llama_init_backend();
  60. // parse command line arguments
  61. const std::string fname_inp = argv[1];
  62. std::string fname_out;
  63. int nthread;
  64. llama_ftype ftype;
  65. int arg_idx = 2;
  66. std::string ftype_str;
  67. if (try_parse_ftype(argv[arg_idx], ftype, ftype_str)) {
  68. // argv[2] is the ftype
  69. std::string fpath;
  70. const size_t pos = fname_inp.find_last_of('/');
  71. if (pos != std::string::npos) {
  72. fpath = fname_inp.substr(0, pos + 1);
  73. }
  74. // export as [inp path]/ggml-model-[ftype].bin
  75. fname_out = fpath + "ggml-model-" + ftype_str + ".bin";
  76. arg_idx++;
  77. }
  78. else {
  79. // argv[2] is the output path
  80. fname_out = argv[arg_idx];
  81. arg_idx++;
  82. if (argc <= arg_idx) {
  83. fprintf(stderr, "%s: missing ftype\n", __func__);
  84. return 1;
  85. }
  86. // argv[3] is the ftype
  87. if (!try_parse_ftype(argv[arg_idx], ftype, ftype_str)) {
  88. fprintf(stderr, "%s: invalid ftype '%s'\n", __func__, argv[3]);
  89. return 1;
  90. }
  91. arg_idx++;
  92. }
  93. // parse nthreads
  94. if (argc > arg_idx) {
  95. try {
  96. nthread = std::stoi(argv[arg_idx]);
  97. }
  98. catch (const std::exception & e) {
  99. fprintf(stderr, "%s: invalid nthread '%s' (%s)\n", __func__, argv[arg_idx], e.what());
  100. return 1;
  101. }
  102. } else {
  103. nthread = 0;
  104. }
  105. fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
  106. fprintf(stderr, "%s: quantizing '%s' to '%s' as %s", __func__, fname_inp.c_str(), fname_out.c_str(), ftype_str.c_str());
  107. if (nthread > 0) {
  108. fprintf(stderr, " using %d threads", nthread);
  109. }
  110. fprintf(stderr, "\n");
  111. const int64_t t_main_start_us = llama_time_us();
  112. int64_t t_quantize_us = 0;
  113. // load the model
  114. {
  115. const int64_t t_start_us = llama_time_us();
  116. if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), ftype, nthread)) {
  117. fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
  118. return 1;
  119. }
  120. t_quantize_us = llama_time_us() - t_start_us;
  121. }
  122. // report timing
  123. {
  124. const int64_t t_main_end_us = llama_time_us();
  125. printf("\n");
  126. printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0);
  127. printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0);
  128. }
  129. return 0;
  130. }