1
0

quantize.cpp 1.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. #include "ggml.h"
  2. #include "llama.h"
  3. #include <cstdio>
  4. #include <string>
  5. // usage:
  6. // ./quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type
  7. //
  8. int main(int argc, char ** argv) {
  9. ggml_time_init();
  10. if (argc < 4) {
  11. fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type [nthread]\n", argv[0]);
  12. fprintf(stderr, " type = %d - q4_0\n", LLAMA_FTYPE_MOSTLY_Q4_0);
  13. fprintf(stderr, " type = %d - q4_1\n", LLAMA_FTYPE_MOSTLY_Q4_1);
  14. fprintf(stderr, " type = %d - q4_2\n", LLAMA_FTYPE_MOSTLY_Q4_2);
  15. fprintf(stderr, " type = %d - q4_3\n", LLAMA_FTYPE_MOSTLY_Q4_3);
  16. fprintf(stderr, " type = %d - q8_0\n", LLAMA_FTYPE_MOSTLY_Q8_0);
  17. return 1;
  18. }
  19. // needed to initialize f16 tables
  20. {
  21. struct ggml_init_params params = { 0, NULL, false };
  22. struct ggml_context * ctx = ggml_init(params);
  23. ggml_free(ctx);
  24. }
  25. const std::string fname_inp = argv[1];
  26. const std::string fname_out = argv[2];
  27. const enum llama_ftype ftype = (enum llama_ftype)atoi(argv[3]);
  28. int nthread = argc > 4 ? atoi(argv[4]) : 0;
  29. const int64_t t_main_start_us = ggml_time_us();
  30. int64_t t_quantize_us = 0;
  31. // load the model
  32. {
  33. const int64_t t_start_us = ggml_time_us();
  34. if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), ftype, nthread)) {
  35. fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
  36. return 1;
  37. }
  38. t_quantize_us = ggml_time_us() - t_start_us;
  39. }
  40. // report timing
  41. {
  42. const int64_t t_main_end_us = ggml_time_us();
  43. printf("\n");
  44. printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0);
  45. printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0);
  46. }
  47. return 0;
  48. }