test-tokenizer-1-bpe.cpp 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. #include "llama.h"
  2. #include "common.h"
  3. #include "unicode.h"
  4. #include "console.h"
  5. #include <cassert>
  6. #include <cstdio>
  7. #include <cstring>
  8. #include <string>
  9. #include <codecvt>
  10. #include <map>
  11. #include <vector>
  12. #include <locale>
  13. int main(int argc, char **argv) {
  14. if (argc < 2) {
  15. fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
  16. return 1;
  17. }
  18. const std::string fname = argv[1];
  19. fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
  20. llama_model * model;
  21. llama_context * ctx;
  22. llama_backend_init(false);
  23. // load the vocab
  24. {
  25. auto mparams = llama_model_default_params();
  26. mparams.vocab_only = true;
  27. model = llama_load_model_from_file(fname.c_str(), mparams);
  28. if (model == NULL) {
  29. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  30. return 1;
  31. }
  32. auto cparams = llama_context_default_params();
  33. ctx = llama_new_context_with_model(model, cparams);
  34. if (ctx == NULL) {
  35. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  36. llama_free_model(model);
  37. return 1;
  38. }
  39. }
  40. GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_BPE);
  41. #ifdef _WIN32
  42. // We need this for unicode console support
  43. console::init(false, false);
  44. atexit([]() { console::cleanup(); });
  45. #endif
  46. const int n_vocab = llama_n_vocab(model);
  47. for (int i = 0; i < n_vocab; ++i) {
  48. std::string str = llama_detokenize_bpe(ctx, std::vector<int>(1, i));
  49. try {
  50. auto cps = codepoints_from_utf8(str);
  51. std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
  52. std::string check = llama_detokenize_bpe(ctx, tokens);
  53. if (check != str) {
  54. fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
  55. __func__, i, str.c_str(), str.length(), check.c_str(), check.length());
  56. return 2;
  57. }
  58. }
  59. catch (const std::invalid_argument &) {
  60. fprintf(stderr, "%s : info: utf8 conversion %d '%s'\n", __func__, i, str.c_str());
  61. }
  62. }
  63. for (uint32_t cp = 0x0000; cp < 0xffff; ++cp) {
  64. // NOTE: these exceptions seem to be necessary, because the GPT2 tokenizer doesn't want to interfere with some ASCII control characters
  65. if ((cp < 0x03 || cp > 0x05) && cp != 0x0b && cp != 0x11 && (cp < 0x13 || cp > 0x17) && cp != 0x19 && (cp < 0x1c || cp > 0x1e) && (cp < 0xd800 || cp > 0xdfff)) {
  66. std::string str = " " + codepoint_to_utf8(cp);
  67. std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
  68. std::string check = llama_detokenize_bpe(ctx, tokens);
  69. if (str != check) {
  70. fprintf(stderr, "%s : error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
  71. __func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
  72. return 3;
  73. }
  74. }
  75. }
  76. // TODO: why doesn't this work for the full range of Unicodes?
  77. // for (uint32_t cp = 0x10000; cp < 0x0010ffff; ++cp) {
  78. for (uint32_t cp = 0x10000; cp < 0x00080000; ++cp) {
  79. std::string str = codepoint_to_utf8(cp);
  80. std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
  81. std::string check = llama_detokenize_bpe(ctx, tokens);
  82. if (str != check) {
  83. fprintf(stderr, "%s : error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
  84. __func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
  85. return 4;
  86. }
  87. }
  88. llama_free_model(model);
  89. llama_free(ctx);
  90. llama_backend_free();
  91. return 0;
  92. }