test-tokenizer-1-spm.cpp 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. #include "llama.h"
  2. #include "common.h"
  3. #include "unicode.h"
  4. #include "console.h"
  5. #include <cassert>
  6. #include <codecvt>
  7. #include <cstdio>
  8. #include <cstring>
  9. #include <locale>
  10. #include <string>
  11. #include <thread>
  12. #include <vector>
  13. #include <atomic>
  14. int main(int argc, char ** argv) {
  15. if (argc < 2) {
  16. fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
  17. return 1;
  18. }
  19. const std::string fname = argv[1];
  20. fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
  21. llama_model * model;
  22. llama_context * ctx;
  23. llama_backend_init();
  24. // load the vocab
  25. {
  26. auto mparams = llama_model_default_params();
  27. mparams.vocab_only = true;
  28. model = llama_model_load_from_file(fname.c_str(), mparams);
  29. if (model == NULL) {
  30. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  31. return 1;
  32. }
  33. auto cparams = llama_context_default_params();
  34. ctx = llama_new_context_with_model(model, cparams);
  35. if (ctx == NULL) {
  36. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  37. llama_model_free(model);
  38. return 1;
  39. }
  40. }
  41. //GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM);
  42. if (llama_vocab_type(model) != LLAMA_VOCAB_TYPE_SPM) {
  43. return 99;
  44. }
  45. #ifdef _WIN32
  46. // We need this for unicode console support
  47. console::init(false, false);
  48. atexit([]() { console::cleanup(); });
  49. #endif
  50. const int n_vocab = llama_n_vocab(model);
  51. for (int i = 0; i < n_vocab; ++i) {
  52. std::string str = common_detokenize(ctx, std::vector<int>(1, i), true);
  53. std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
  54. std::string check = common_detokenize(ctx, tokens);
  55. if (check != str) {
  56. fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
  57. __func__, i, str.c_str(), str.length(), check.c_str(), check.length());
  58. return 2;
  59. }
  60. }
  61. // unicode
  62. {
  63. const int nthread = std::thread::hardware_concurrency();
  64. std::vector<std::thread> threads(nthread);
  65. std::atomic_int errcode = {};
  66. for (int i = 0; i < nthread; ++i) {
  67. threads[i] = std::thread([i, nthread, ctx, &errcode]() {
  68. for (uint32_t cp = i; !errcode && cp < 0x00110000; cp += nthread) {
  69. if ((0x0000D800 <= cp && cp <= 0x0000DFFF) || // surrogates \p{Cs}
  70. (0x00040000 <= cp && cp <= 0x000E0000)) { // undefined \p{Cn}
  71. continue;
  72. }
  73. std::string str = unicode_cpt_to_utf8(cp);
  74. std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
  75. std::string check = common_detokenize(ctx, tokens);
  76. if (cp != 9601 && str != check) {
  77. fprintf(stderr, "error: codepoint 0x%x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
  78. cp, check.c_str(), check.length(), str.c_str(), str.length());
  79. errcode = 3;
  80. }
  81. }
  82. });
  83. }
  84. for (auto & t : threads) {
  85. t.join();
  86. }
  87. if(errcode) {
  88. return errcode;
  89. }
  90. }
  91. llama_model_free(model);
  92. llama_free(ctx);
  93. llama_backend_free();
  94. return 0;
  95. }