1
0

test-tokenizer-0.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. #include "llama.h"
  2. #include "common.h"
  3. #include "console.h"
  4. #include <cstdio>
  5. #include <string>
  6. #include <map>
  7. #include <vector>
  8. #include <fstream>
  9. //static const std::map<std::string, std::vector<llama_token>> & k_tests() {
  10. // static std::map<std::string, std::vector<llama_token>> _k_tests = {
  11. // { "" , { }, },
  12. // { " " , { 220, }, },
  13. // { " " , { 256, }, },
  14. // { " " , { 262, }, },
  15. // { "\t" , { 197, }, },
  16. // { "\n" , { 198, }, },
  17. // { "\n\n" , { 271, }, },
  18. // { "\n\n\n" , { 1432, }, },
  19. // { "\t\n" , { 1602, }, },
  20. // { "Hello world" , { 9906, 1917, }, },
  21. // { " Hello world" , { 22691, 1917, }, },
  22. // { "Hello World" , { 9906, 4435, }, },
  23. // { " Hello World" , { 22691, 4435, }, },
  24. // { " Hello World!" , { 22691, 4435, 0, }, },
  25. // { "Hello, world!" , { 9906, 11, 1917, 0, }, },
  26. // { " Hello, world!" , { 22691, 11, 1917, 0, }, },
  27. // { " this is 🦙.cpp" , { 420, 374, 11410, 99, 247, 13, 11055, }, },
  28. // { "w048 7tuijk dsdfhu" , { 86, 23904, 220, 22, 83, 2005, 42908, 11729, 3013, 17156, }, },
  29. // { "нещо на Български" , { 79862, 102118, 13373, 64571, 34694, 3114, 112203, 80112, }, },
  30. // { "កាន់តែពិសេសអាចខលចេញ" , { 21549, 222, 98629, 241, 45358, 233, 21549, 237, 45358, 224, 21549, 244, 21549, 115, 21549, 253, 45358, 223, 21549, 253, 21549, 95, 98629, 227, 21549, 223, 21549, 249, 21549, 227, 45358, 223, 21549, 231, }, },
  31. // { "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)", { 9468, 248, 222, 320, 8416, 8, 27623, 114, 102470, 9468, 234, 104, 31643, 320, 36773, 100166, 98634, 8, 26602, 227, 320, 3323, 43465, 430, 706, 1202, 1866, 4037, 8, }, },
  32. // { "Hello" , { 9906, }, },
  33. // { " Hello" , { 22691, }, },
  34. // { " Hello" , { 220, 22691, }, },
  35. // { " Hello" , { 256, 22691, }, },
  36. // { " Hello" , { 262, 22691, }, },
  37. // { " Hello\n Hello" , { 262, 22691, 198, 262, 22691, }, },
  38. // { " (" , { 320, }, },
  39. // { "\n =" , { 198, 284, }, },
  40. // { "' era" , { 6, 11639, }, },
  41. // { "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~", { 9906, 11, 379, 65948, 0, 2650, 527, 499, 27623, 223, 949, 37046, 101067, 19000, 23182, 102301, 9263, 18136, 16, 36827, 21909, }, },
  42. // { "3" , { 18, }, },
  43. // { "33" , { 1644, }, },
  44. // { "333" , { 8765, }, },
  45. // { "3333" , { 8765, 18, }, },
  46. // { "33333" , { 8765, 1644, }, },
  47. // { "333333" , { 8765, 8765, }, },
  48. // { "3333333" , { 8765, 8765, 18, }, },
  49. // { "33333333" , { 8765, 8765, 1644, }, },
  50. // { "333333333" , { 8765, 8765, 8765, }, },
  51. // };
  52. //
  53. // return _k_tests;
  54. //}
  55. static std::map<std::string, std::vector<llama_token>> read_tests(const std::string & fname_inp, const std::string & fname_out) {
  56. std::map<std::string, std::vector<llama_token>> tests;
  57. std::ifstream ifs_inp(fname_inp);
  58. if (!ifs_inp) {
  59. fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_inp.c_str());
  60. return tests;
  61. }
  62. std::string sraw((std::istreambuf_iterator<char>(ifs_inp)), std::istreambuf_iterator<char>());
  63. std::ifstream ifs_out(fname_out);
  64. if (!ifs_out) {
  65. fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
  66. return tests;
  67. }
  68. std::vector<std::string> sout;
  69. for (std::string line; std::getline(ifs_out, line);) {
  70. sout.push_back(line);
  71. }
  72. const std::string sep = "\n__ggml_vocab_test__\n";
  73. std::vector<std::string> sinp;
  74. size_t pos = 0;
  75. while (pos < sraw.size()) {
  76. const size_t next = sraw.find(sep, pos);
  77. if (next == std::string::npos) {
  78. sinp.push_back(sraw.substr(pos));
  79. break;
  80. }
  81. sinp.push_back(sraw.substr(pos, next - pos));
  82. pos = next + sep.size();
  83. }
  84. if (sinp.size() != sout.size()) {
  85. fprintf(stderr, "%s : error: input and output files have different number of tests\n", __func__);
  86. return tests;
  87. }
  88. for (size_t i = 0; i < sinp.size(); ++i) {
  89. const std::string & s = sinp[i];
  90. const std::string & o = string_strip(sout[i]);
  91. std::vector<llama_token> toks;
  92. size_t pos = 0;
  93. while (pos < o.size()) {
  94. size_t next = o.find(' ', pos);
  95. if (next == std::string::npos) {
  96. next = o.size();
  97. }
  98. const std::string stok = o.substr(pos, next - pos);
  99. toks.push_back(std::stoi(stok));
  100. pos = next + 1;
  101. }
  102. tests[s] = toks;
  103. }
  104. return tests;
  105. }
  106. int main(int argc, char **argv) {
  107. if (argc < 2) {
  108. fprintf(stderr, "Usage: %s vocab-file [text-file]\n", argv[0]);
  109. return 1;
  110. }
  111. const std::string fname = argv[1];
  112. const std::string fname_inp = fname + ".inp";
  113. const std::string fname_out = fname + ".out";
  114. std::string fname_text;
  115. if (argc > 2) {
  116. fname_text = argv[2];
  117. }
  118. fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
  119. llama_model * model;
  120. llama_context * ctx;
  121. llama_backend_init();
  122. // load the vocab
  123. {
  124. auto mparams = llama_model_default_params();
  125. mparams.vocab_only = true;
  126. model = llama_load_model_from_file(fname.c_str(), mparams);
  127. if (model == NULL) {
  128. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  129. return 1;
  130. }
  131. auto cparams = llama_context_default_params();
  132. ctx = llama_new_context_with_model(model, cparams);
  133. if (ctx == NULL) {
  134. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  135. llama_free_model(model);
  136. return 1;
  137. }
  138. }
  139. #ifdef _WIN32
  140. // We need this for unicode console support
  141. console::init(false, false);
  142. atexit([]() { console::cleanup(); });
  143. #endif
  144. bool success = true;
  145. const auto k_tests = read_tests(fname_inp, fname_out);
  146. if (k_tests.empty()) {
  147. fprintf(stderr, "%s : error: no tests found\n", __func__);
  148. return 1;
  149. }
  150. const bool add_special = false;
  151. for (const auto & test_kv : k_tests) {
  152. const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special);
  153. printf("\n");
  154. printf("src: '%s'\n", test_kv.first.c_str());
  155. printf("res: '%s'\n", llama_detokenize_bpe(ctx, res).c_str());
  156. printf("tok: ");
  157. for (const auto & tok : res) {
  158. printf("%d ", tok);
  159. }
  160. printf("\n");
  161. bool correct = res.size() == test_kv.second.size();
  162. for (int i = 0; i < (int) res.size() && correct; ++i) {
  163. if (test_kv.second[i] != res[i]) {
  164. correct = false;
  165. }
  166. }
  167. if (!correct) {
  168. fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
  169. fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
  170. llama_detokenize_bpe(ctx, res).c_str(),
  171. llama_detokenize_bpe(ctx, test_kv.second).c_str());
  172. fprintf(stderr, "%s : expected tokens: ", __func__);
  173. for (const auto & t : test_kv.second) {
  174. fprintf(stderr, "%6d '%s', ", t, llama_token_to_piece(ctx, t).c_str());
  175. }
  176. fprintf(stderr, "\n");
  177. fprintf(stderr, "%s : got tokens: ", __func__);
  178. for (const auto & t : res) {
  179. fprintf(stderr, "%6d '%s', ", t, llama_token_to_piece(ctx, t).c_str());
  180. }
  181. fprintf(stderr, "\n");
  182. success = false;
  183. }
  184. }
  185. if (!fname_text.empty()) {
  186. fprintf(stderr, "%s : tokenizing: '%s'\n", __func__, fname_text.c_str());
  187. std::string text;
  188. {
  189. std::ifstream ifs(fname_text);
  190. if (!ifs) {
  191. fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_text.c_str());
  192. return 1;
  193. }
  194. text = std::string(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>());
  195. }
  196. fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());
  197. const std::vector<llama_token> res = llama_tokenize(ctx, text, add_special);
  198. fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());
  199. {
  200. const std::string fname_out = fname_text + ".tokcpp";
  201. std::ofstream ofs(fname_out);
  202. if (!ofs) {
  203. fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
  204. return 1;
  205. }
  206. for (const auto & tok : res) {
  207. ofs << tok << " '" << string_strip(llama_detokenize_bpe(ctx, std::vector<int>{tok})) << "'" << std::endl;
  208. }
  209. }
  210. fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
  211. }
  212. llama_free_model(model);
  213. llama_free(ctx);
  214. llama_backend_free();
  215. printf("\n");
  216. printf("Tests %s\n", success ? "passed" : "failed");
  217. return success ? 0 : 3;
  218. }