test-tokenizer-0-bpe.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. # tests with BPE tokenizer
  2. #
  3. # sample usage:
  4. #
  5. # python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/Meta-Llama-3-8B-Instruct/
  6. # python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/falcon-7b/
  7. # python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/deepseek-coder-6.7b-instruct/
  8. #
  9. import argparse
  10. from transformers import AutoTokenizer
  11. parser = argparse.ArgumentParser()
  12. parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
  13. parser.add_argument("--fname-tok", help="path to a text file to tokenize")
  14. args = parser.parse_args()
  15. dir_tokenizer = args.dir_tokenizer
  16. tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
  17. tests = [
  18. "",
  19. " ",
  20. " ",
  21. " ",
  22. "\t",
  23. "\n",
  24. "\n\n",
  25. "\n\n\n",
  26. "\t\n",
  27. "Hello world",
  28. " Hello world",
  29. "Hello World",
  30. " Hello World",
  31. " Hello World!",
  32. "Hello, world!",
  33. " Hello, world!",
  34. " this is 🦙.cpp",
  35. "w048 7tuijk dsdfhu",
  36. "нещо на Български",
  37. "កាន់តែពិសេសអាចខលចេញ",
  38. "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
  39. "Hello",
  40. " Hello",
  41. " Hello",
  42. " Hello",
  43. " Hello",
  44. " Hello\n Hello",
  45. " (",
  46. "\n =",
  47. "' era",
  48. "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~",
  49. "3",
  50. "33",
  51. "333",
  52. "3333",
  53. "33333",
  54. "333333",
  55. "3333333",
  56. "33333333",
  57. "333333333",
  58. ]
  59. for text in tests:
  60. print('text: ', text)
  61. print(tokenizer.encode(text))
  62. print(tokenizer.decode(tokenizer.encode(text)))
  63. print("\n\ntests for C++:\n")
  64. for text in tests:
  65. res = tokenizer.encode(text)
  66. k = text.replace('\n', '\\n')
  67. k = k.replace('\t', '\\t')
  68. k = '"' + k + '"'
  69. print("{ %-24s, { " % k, end='')
  70. for x in res:
  71. print("%7d," % x, end='')
  72. print(" }, },")
  73. print(tokenizer.encode('hello'))
  74. print(tokenizer.encode('world'))
  75. print(tokenizer.encode(' world'))
  76. print(tokenizer.encode('hello world'))
  77. fname_tok = args.fname_tok
  78. if fname_tok:
  79. print('tokenizing file: ', fname_tok)
  80. fname_out = fname_tok + '.tok'
  81. with open(fname_tok, 'r', encoding='utf-8') as f:
  82. lines = f.readlines()
  83. s = ''.join(lines)
  84. res = tokenizer.encode(s)
  85. # write to file
  86. with open(fname_out, 'w', encoding='utf-8') as f:
  87. for x in res:
  88. # LLaMA v3 for some reason strips the space for these tokens (and others)
  89. # if x == 662:
  90. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  91. # elif x == 1174:
  92. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  93. # elif x == 2564:
  94. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  95. # elif x == 758:
  96. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  97. # elif x == 949:
  98. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  99. # elif x == 5354:
  100. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  101. # else:
  102. # f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
  103. f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
  104. print('len(res): ', len(res))
  105. print('len(lines): ', len(lines))
  106. print('results written to: ', fname_out)