test-tokenizer-0-falcon.cpp 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. #include "llama.h"
  2. #include "common.h"
  3. #include "console.h"
  4. #include <cstdio>
  5. #include <string>
  6. #include <map>
  7. #include <vector>
  8. #include <fstream>
  9. // generate using test-tokenizer-0-falcon.py
  10. static const std::map<std::string, std::vector<llama_token>> & k_tests() {
  11. static std::map<std::string, std::vector<llama_token>> _k_tests = {
  12. { "" , { }, },
  13. { " " , { 204, }, },
  14. { " " , { 258, }, },
  15. { " " , { 466, }, },
  16. { "\t" , { 192, }, },
  17. { "\n" , { 193, }, },
  18. { "\t\n" , { 19125, }, },
  19. { "Hello world" , { 9856, 1079, }, },
  20. { " Hello world" , { 23090, 1079, }, },
  21. { "Hello World" , { 9856, 2889, }, },
  22. { " Hello World" , { 23090, 2889, }, },
  23. { " Hello World!" , { 23090, 2889, 12, }, },
  24. { "Hello, world!" , { 9856, 23, 1079, 12, }, },
  25. { " Hello, world!" , { 23090, 23, 1079, 12, }, },
  26. { " this is 🦙.cpp" , { 414, 304, 3346, 111, 231, 25, 29247, }, },
  27. { "w048 7tuijk dsdfhu" , { 98, 55866, 204, 34, 16682, 7149, 36190, 6869, 11481, }, },
  28. { "нещо на Български" , { 150, 133, 6207, 151, 215, 150, 134, 5052, 133, 6279, 5052, 223, 151, 216, 49679, 123, 53110, 47043, 7795, }, },
  29. { "កាន់តែពិសេសអាចខលចេញ" , { 38154, 206, 38154, 126, 38154, 225, 167, 237, 217, 38154, 221, 167, 237, 208, 38154, 228, 38154, 127, 38154, 237, 167, 237, 207, 38154, 237, 38154, 107, 38154, 126, 38154, 211, 38154, 207, 38154, 233, 38154, 211, 167, 237, 207, 38154, 215, }, },
  30. { "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)", { 2571, 232, 206, 204, 19, 11003, 20, 8196, 126, 283, 219, 48778, 116, 13392, 204, 19, 51831, 732, 63209, 1741, 7955, 522, 20, 22438, 211, 204, 19, 7927, 53360, 325, 504, 701, 946, 10930, 20, }, },
  31. { "Hello" , { 9856, }, },
  32. { " Hello" , { 23090, }, },
  33. { " Hello" , { 204, 23090, }, },
  34. { " Hello" , { 258, 23090, }, },
  35. { " Hello" , { 466, 23090, }, },
  36. { " Hello\n Hello" , { 466, 23090, 742, 23090, }, },
  37. };
  38. return _k_tests;
  39. }
  40. int main(int argc, char **argv) {
  41. if (argc < 2) {
  42. fprintf(stderr, "Usage: %s vocab-file [text-file]\n", argv[0]);
  43. return 1;
  44. }
  45. const std::string fname = argv[1];
  46. std::string fname_text;
  47. if (argc > 2) {
  48. fname_text = argv[2];
  49. }
  50. fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
  51. llama_model * model;
  52. llama_context * ctx;
  53. llama_backend_init(false);
  54. // load the vocab
  55. {
  56. auto mparams = llama_model_default_params();
  57. mparams.vocab_only = true;
  58. model = llama_load_model_from_file(fname.c_str(), mparams);
  59. if (model == NULL) {
  60. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  61. return 1;
  62. }
  63. auto cparams = llama_context_default_params();
  64. ctx = llama_new_context_with_model(model, cparams);
  65. if (ctx == NULL) {
  66. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  67. llama_free_model(model);
  68. return 1;
  69. }
  70. }
  71. if (llama_vocab_type(model) != LLAMA_VOCAB_TYPE_BPE) {
  72. fprintf(stderr, "%s : error: vocab type is not BPE\n", __func__);
  73. llama_free_model(model);
  74. llama_free(ctx);
  75. return 2;
  76. }
  77. #ifdef _WIN32
  78. // We need this for unicode console support
  79. console::init(false, false);
  80. atexit([]() { console::cleanup(); });
  81. #endif
  82. bool success = true;
  83. for (const auto & test_kv : k_tests()) {
  84. const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, false);
  85. printf("\n");
  86. printf("src: '%s'\n", test_kv.first.c_str());
  87. printf("res: '%s'\n", llama_detokenize_bpe(ctx, res).c_str());
  88. printf("tok: ");
  89. for (const auto & tok : res) {
  90. printf("%d ", tok);
  91. }
  92. printf("\n");
  93. bool correct = res.size() == test_kv.second.size();
  94. for (int i = 0; i < (int) res.size() && correct; ++i) {
  95. if (test_kv.second[i] != res[i]) {
  96. correct = false;
  97. }
  98. }
  99. if (!correct) {
  100. fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
  101. fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
  102. llama_detokenize_bpe(ctx, res).c_str(),
  103. llama_detokenize_bpe(ctx, test_kv.second).c_str());
  104. fprintf(stderr, "%s : expected tokens: ", __func__);
  105. for (const auto & t : test_kv.second) {
  106. fprintf(stderr, "%6d, ", t);
  107. }
  108. fprintf(stderr, "\n");
  109. fprintf(stderr, "%s : got tokens: ", __func__);
  110. for (const auto & t : res) {
  111. fprintf(stderr, "%6d, ", t);
  112. }
  113. fprintf(stderr, "\n");
  114. success = false;
  115. }
  116. }
  117. if (!fname_text.empty()) {
  118. fprintf(stderr, "%s : tokenizing: '%s'\n", __func__, fname_text.c_str());
  119. std::string text;
  120. {
  121. std::ifstream ifs(fname_text);
  122. if (!ifs) {
  123. fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_text.c_str());
  124. return 1;
  125. }
  126. text = std::string(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>());
  127. }
  128. fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());
  129. const std::vector<llama_token> res = llama_tokenize(ctx, text, true);
  130. fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());
  131. {
  132. const std::string fname_out = fname_text + ".tokcpp";
  133. std::ofstream ofs(fname_out);
  134. if (!ofs) {
  135. fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
  136. return 1;
  137. }
  138. for (const auto & tok : res) {
  139. ofs << tok << " ";
  140. }
  141. ofs << "\n";
  142. }
  143. fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
  144. }
  145. llama_free_model(model);
  146. llama_free(ctx);
  147. llama_backend_free();
  148. return success ? 0 : 3;
  149. }