1
0

test-tokenizer-1-spm.cpp 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. #include "llama.h"
  2. #include "common.h"
  3. #include "console.h"
  4. #include "../src/unicode.h"
  5. #include <cassert>
  6. #include <codecvt>
  7. #include <cstdio>
  8. #include <cstring>
  9. #include <locale>
  10. #include <string>
  11. #include <thread>
  12. #include <vector>
  13. #include <atomic>
  14. int main(int argc, char ** argv) {
  15. if (argc < 2) {
  16. fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
  17. return 1;
  18. }
  19. const std::string fname = argv[1];
  20. fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
  21. llama_model * model;
  22. llama_context * ctx;
  23. llama_backend_init();
  24. // load the vocab
  25. {
  26. auto mparams = llama_model_default_params();
  27. mparams.vocab_only = true;
  28. model = llama_model_load_from_file(fname.c_str(), mparams);
  29. if (model == NULL) {
  30. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  31. return 1;
  32. }
  33. auto cparams = llama_context_default_params();
  34. ctx = llama_init_from_model(model, cparams);
  35. if (ctx == NULL) {
  36. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  37. llama_model_free(model);
  38. return 1;
  39. }
  40. }
  41. const llama_vocab * vocab = llama_model_get_vocab(model);
  42. //GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM);
  43. if (llama_vocab_type(vocab) != LLAMA_VOCAB_TYPE_SPM) {
  44. return 99;
  45. }
  46. #ifdef _WIN32
  47. // We need this for unicode console support
  48. console::init(false, false);
  49. atexit([]() { console::cleanup(); });
  50. #endif
  51. const int n_vocab = llama_vocab_n_tokens(vocab);
  52. for (int i = 0; i < n_vocab; ++i) {
  53. std::string str = common_detokenize(ctx, std::vector<int>(1, i), true);
  54. std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
  55. std::string check = common_detokenize(ctx, tokens);
  56. if (check != str) {
  57. fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
  58. __func__, i, str.c_str(), str.length(), check.c_str(), check.length());
  59. return 2;
  60. }
  61. }
  62. // unicode
  63. {
  64. const int nthread = std::thread::hardware_concurrency();
  65. std::vector<std::thread> threads(nthread);
  66. std::atomic_int errcode = {};
  67. for (int i = 0; i < nthread; ++i) {
  68. threads[i] = std::thread([i, nthread, ctx, &errcode]() {
  69. for (uint32_t cp = i; !errcode && cp < 0x00110000; cp += nthread) {
  70. if ((0x0000D800 <= cp && cp <= 0x0000DFFF) || // surrogates \p{Cs}
  71. (0x00040000 <= cp && cp <= 0x000E0000)) { // undefined \p{Cn}
  72. continue;
  73. }
  74. std::string str = unicode_cpt_to_utf8(cp);
  75. std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
  76. std::string check = common_detokenize(ctx, tokens);
  77. if (cp != 9601 && str != check) {
  78. fprintf(stderr, "error: codepoint 0x%x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
  79. cp, check.c_str(), check.length(), str.c_str(), str.length());
  80. errcode = 3;
  81. }
  82. }
  83. });
  84. }
  85. for (auto & t : threads) {
  86. t.join();
  87. }
  88. if(errcode) {
  89. return errcode;
  90. }
  91. }
  92. llama_model_free(model);
  93. llama_free(ctx);
  94. llama_backend_free();
  95. return 0;
  96. }