1
0

test-tokenizer-0.cpp 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. #include "llama.h"
  2. #include <cstdio>
  3. #include <string>
  4. #include <map>
  5. #include <vector>
  6. static const std::map<std::string, std::vector<llama_token>> & k_tests()
  7. {
  8. static std::map<std::string, std::vector<llama_token>> _k_tests = {
  9. { "Hello World", { 1, 10994, 2787, }, },
  10. { " Hello World", { 1, 15043, 2787, }, },
  11. { " Hello World!", { 1, 15043, 2787, 29991, }, },
  12. { " this is 🦙.cpp", { 1, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, },
  13. { "w048 7tuijk dsdfhu", { 1, 29893, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, },
  14. { "нещо на Български", { 1, 821, 4851, 665, 1386, 29713, 1305, }, },
  15. };
  16. return _k_tests;
  17. };
  18. int main(int argc, char **argv) {
  19. if (argc < 2) {
  20. fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
  21. return 1;
  22. }
  23. const std::string fname = argv[1];
  24. fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
  25. llama_model * model;
  26. llama_context * ctx;
  27. llama_backend_init(false);
  28. // load the vocab
  29. {
  30. auto lparams = llama_context_default_params();
  31. lparams.vocab_only = true;
  32. model = llama_load_model_from_file(fname.c_str(), lparams);
  33. if (model == NULL) {
  34. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  35. return 1;
  36. }
  37. ctx = llama_new_context_with_model(model, lparams);
  38. if (ctx == NULL) {
  39. fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
  40. llama_free_model(model);
  41. return 1;
  42. }
  43. }
  44. const int n_vocab = llama_n_vocab(ctx);
  45. if (n_vocab != 32000) {
  46. fprintf(stderr, "%s : expected 32000 tokens, got %d\n", __func__, n_vocab);
  47. llama_free_model(model);
  48. llama_free(ctx);
  49. return 2;
  50. }
  51. for (const auto & test_kv : k_tests()) {
  52. std::vector<llama_token> res(test_kv.first.size());
  53. const int n = llama_tokenize(ctx, test_kv.first.c_str(), res.data(), int(res.size()), true);
  54. res.resize(n);
  55. bool correct = res.size() == test_kv.second.size();
  56. for (int i = 0; i < (int) res.size() && correct; ++i) {
  57. if (res[i] != test_kv.second[i]) {
  58. correct = false;
  59. }
  60. }
  61. if (!correct) {
  62. fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
  63. fprintf(stderr, "%s : expected tokens: ", __func__);
  64. for (const auto & t : test_kv.second) {
  65. fprintf(stderr, "%6d, ", t);
  66. }
  67. fprintf(stderr, "\n");
  68. fprintf(stderr, "%s : got tokens: ", __func__);
  69. for (const auto & t : res) {
  70. fprintf(stderr, "%6d, ", t);
  71. }
  72. fprintf(stderr, "\n");
  73. llama_free_model(model);
  74. llama_free(ctx);
  75. return 3;
  76. }
  77. }
  78. llama_free_model(model);
  79. llama_free(ctx);
  80. llama_backend_free();
  81. return 0;
  82. }