test-tokenizer-1-bpe.cpp 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. #include "llama.h"
  2. #include "common.h"
  3. #include "unicode.h"
  4. #include "console.h"
  5. #include <cassert>
  6. #include <codecvt>
  7. #include <cstdio>
  8. #include <cstring>
  9. #include <locale>
  10. #include <string>
  11. #include <thread>
  12. #include <vector>
  13. int main(int argc, char **argv) {
  14. if (argc < 2 || argc > 3) {
  15. fprintf(stderr, "Usage: %s <vocab-file> [--ignore-merges]\n", argv[0]);
  16. return 1;
  17. }
  18. const std::string fname = argv[1];
  19. bool ignore_merges = false;
  20. if (argc == 3) {
  21. if (std::strcmp(argv[2], "--ignore-merges") != 0) {
  22. fprintf(stderr, "Usage: %s <vocab-file> [--ignore-merges]\n", argv[0]);
  23. return 1;
  24. }
  25. ignore_merges = true;
  26. }
  27. fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
  28. if (ignore_merges) {
  29. fprintf(stderr, "%s : ignoring merges for tokens inside vocab\n", __func__);
  30. }
  31. llama_model * model;
  32. llama_context * ctx;
  33. llama_backend_init();
  34. // load the vocab
  35. {
  36. auto mparams = llama_model_default_params();
  37. mparams.vocab_only = true;
  38. model = llama_load_model_from_file(fname.c_str(), mparams);
  39. if (model == NULL) {
  40. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  41. return 1;
  42. }
  43. auto cparams = llama_context_default_params();
  44. ctx = llama_new_context_with_model(model, cparams);
  45. if (ctx == NULL) {
  46. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  47. llama_free_model(model);
  48. return 1;
  49. }
  50. }
  51. GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_BPE);
  52. #ifdef _WIN32
  53. // We need this for unicode console support
  54. console::init(false, false);
  55. atexit([]() { console::cleanup(); });
  56. #endif
  57. const int n_vocab = llama_n_vocab(model);
  58. for (int i = 0; i < n_vocab; ++i) {
  59. std::string str = llama_detokenize_bpe(ctx, std::vector<int>(1, i));
  60. try {
  61. auto cps = unicode_cpts_from_utf8(str);
  62. std::vector<llama_token> tokens = llama_tokenize(ctx, str, false, true);
  63. if (ignore_merges && tokens.size() > 1) {
  64. fprintf(stderr,
  65. "%s : error: token %d detokenizes to '%s'(%zu) but "
  66. "tokenization of this to multiple tokens: [",
  67. __func__, i, str.c_str(), str.length());
  68. fprintf(stderr, "%d", tokens[0]);
  69. for (size_t i = 1; i < tokens.size(); i++) {
  70. fprintf(stderr, ", %d", tokens[i]);
  71. }
  72. fprintf(stderr, "]\n");
  73. return 2;
  74. }
  75. std::string check = llama_detokenize_bpe(ctx, tokens);
  76. if (check != str) {
  77. fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
  78. __func__, i, str.c_str(), str.length(), check.c_str(), check.length());
  79. return 2;
  80. }
  81. }
  82. catch (const std::invalid_argument &) {
  83. //fprintf(stderr, "%s : info: utf8 conversion %d '%s'\n", __func__, i, str.c_str());
  84. }
  85. }
  86. // unicode
  87. {
  88. const int nthread = std::thread::hardware_concurrency();
  89. std::vector<std::thread> threads(nthread);
  90. for (int i = 0; i < nthread; ++i) {
  91. threads[i] = std::thread([i, nthread, ctx]() {
  92. for (uint32_t cp = i; cp < 0x0010ffff; cp += nthread) {
  93. if (!( // NOLINT
  94. (cp < 0x03 || cp > 0x05) && cp != 0x0b && cp != 0x11 &&
  95. (cp < 0x13 || cp > 0x17) && cp != 0x19 &&
  96. (cp < 0x1c || cp > 0x1e) &&
  97. (cp < 0xd800 || cp > 0xdfff) &&
  98. (cp < 0x00040000 || cp >= 0x000e0000)
  99. )) {
  100. continue;
  101. }
  102. std::string str = unicode_cpt_to_utf8(cp);
  103. std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
  104. std::string check = llama_detokenize_bpe(ctx, tokens);
  105. if (cp != 9601 && str != check) {
  106. fprintf(stderr, "error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
  107. cp, check.c_str(), check.length(), str.c_str(), str.length());
  108. std::exit(3);
  109. }
  110. }
  111. });
  112. }
  113. for (auto & t : threads) {
  114. t.join();
  115. }
  116. }
  117. llama_free_model(model);
  118. llama_free(ctx);
  119. llama_backend_free();
  120. return 0;
  121. }