1
0

test-tokenizer-1-bpe.cpp 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. #include "llama.h"
  2. #include "common.h"
  3. #include "console.h"
  4. #include "../src/unicode.h"
  5. #include <cassert>
  6. #include <codecvt>
  7. #include <cstdio>
  8. #include <cstring>
  9. #include <locale>
  10. #include <string>
  11. #include <thread>
  12. #include <vector>
  13. #include <atomic>
  14. int main(int argc, char **argv) {
  15. if (argc < 2 || argc > 3) {
  16. fprintf(stderr, "Usage: %s <vocab-file> [--ignore-merges]\n", argv[0]);
  17. return 1;
  18. }
  19. const std::string fname = argv[1];
  20. bool ignore_merges = false;
  21. if (argc == 3) {
  22. if (std::strcmp(argv[2], "--ignore-merges") != 0) {
  23. fprintf(stderr, "Usage: %s <vocab-file> [--ignore-merges]\n", argv[0]);
  24. return 1;
  25. }
  26. ignore_merges = true;
  27. }
  28. fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
  29. if (ignore_merges) {
  30. fprintf(stderr, "%s : ignoring merges for tokens inside vocab\n", __func__);
  31. }
  32. llama_model * model;
  33. llama_context * ctx;
  34. llama_backend_init();
  35. // load the vocab
  36. {
  37. auto mparams = llama_model_default_params();
  38. mparams.vocab_only = true;
  39. model = llama_model_load_from_file(fname.c_str(), mparams);
  40. if (model == NULL) {
  41. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  42. return 1;
  43. }
  44. auto cparams = llama_context_default_params();
  45. ctx = llama_init_from_model(model, cparams);
  46. if (ctx == NULL) {
  47. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  48. llama_model_free(model);
  49. return 1;
  50. }
  51. }
  52. const llama_vocab * vocab = llama_model_get_vocab(model);
  53. //GGML_ASSERT(llama_vocab_type(vocab) == LLAMA_VOCAB_TYPE_BPE);
  54. if (llama_vocab_type(vocab) != LLAMA_VOCAB_TYPE_BPE) {
  55. return 99;
  56. }
  57. #ifdef _WIN32
  58. // We need this for unicode console support
  59. console::init(false, false);
  60. atexit([]() { console::cleanup(); });
  61. #endif
  62. const int n_vocab = llama_vocab_n_tokens(vocab);
  63. for (int i = 0; i < n_vocab; ++i) {
  64. std::string str = common_detokenize(ctx, std::vector<int>(1, i));
  65. try {
  66. auto cps = unicode_cpts_from_utf8(str);
  67. std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
  68. if (ignore_merges && tokens.size() > 1) {
  69. fprintf(stderr,
  70. "%s : error: token %d detokenizes to '%s'(%zu) but "
  71. "tokenization of this to multiple tokens: [",
  72. __func__, i, str.c_str(), str.length());
  73. fprintf(stderr, "%d", tokens[0]);
  74. for (size_t i = 1; i < tokens.size(); i++) {
  75. fprintf(stderr, ", %d", tokens[i]);
  76. }
  77. fprintf(stderr, "]\n");
  78. return 2;
  79. }
  80. std::string check = common_detokenize(ctx, tokens);
  81. if (check != str) {
  82. fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
  83. __func__, i, str.c_str(), str.length(), check.c_str(), check.length());
  84. return 2;
  85. }
  86. }
  87. catch (const std::invalid_argument &) {
  88. //fprintf(stderr, "%s : info: utf8 conversion %d '%s'\n", __func__, i, str.c_str());
  89. }
  90. }
  91. // unicode
  92. {
  93. const int nthread = std::thread::hardware_concurrency();
  94. std::vector<std::thread> threads(nthread);
  95. std::atomic_int errcode = {};
  96. for (int i = 0; i < nthread; ++i) {
  97. threads[i] = std::thread([i, nthread, ctx, &errcode]() {
  98. for (uint32_t cp = i; !errcode && cp < 0x00110000; cp += nthread) {
  99. if ((0x0000D800 <= cp && cp <= 0x0000DFFF) || // surrogates \p{Cs}
  100. (0x00040000 <= cp && cp <= 0x000E0000)) { // undefined \p{Cn}
  101. continue;
  102. }
  103. std::string str = unicode_cpt_to_utf8(cp);
  104. std::vector<llama_token> tokens = common_tokenize(ctx, str, false);
  105. std::string check = common_detokenize(ctx, tokens);
  106. if (cp != 9601 && str != check) {
  107. fprintf(stderr, "error: codepoint 0x%x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
  108. cp, check.c_str(), check.length(), str.c_str(), str.length());
  109. errcode = 3;
  110. }
  111. }
  112. });
  113. }
  114. for (auto & t : threads) {
  115. t.join();
  116. }
  117. if (errcode) {
  118. return errcode;
  119. }
  120. }
  121. llama_model_free(model);
  122. llama_free(ctx);
  123. llama_backend_free();
  124. return 0;
  125. }