test-tokenizer-0-spm.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. # tests with SPM tokenizer
  2. #
  3. # sample usage:
  4. #
  5. # python3 tests/test-tokenizer-0-spm.py ~/Data/huggingface/Llama-2-7b-hf/
  6. # python3 tests/test-tokenizer-0-spm.py ~/Data/huggingface/CodeLlama-34b-Instruct-hf/
  7. #
  8. import argparse
  9. from sentencepiece import SentencePieceProcessor
  10. parser = argparse.ArgumentParser()
  11. parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
  12. parser.add_argument("--fname-tok", help="path to a text file to tokenize")
  13. args = parser.parse_args()
  14. dir_tokenizer = args.dir_tokenizer
  15. tokenizer = SentencePieceProcessor(dir_tokenizer + '/tokenizer.model')
  16. tests = [
  17. "",
  18. " ",
  19. " ",
  20. " ",
  21. "\t",
  22. "\n",
  23. "\n\n",
  24. "\n\n\n",
  25. "\t\n",
  26. "Hello world",
  27. " Hello world",
  28. "Hello World",
  29. " Hello World",
  30. " Hello World!",
  31. "Hello, world!",
  32. " Hello, world!",
  33. " this is 🦙.cpp",
  34. "w048 7tuijk dsdfhu",
  35. "нещо на Български",
  36. "កាន់តែពិសេសអាចខលចេញ",
  37. "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
  38. "Hello",
  39. " Hello",
  40. " Hello",
  41. " Hello",
  42. " Hello",
  43. " Hello\n Hello",
  44. " (",
  45. "\n =",
  46. "' era",
  47. "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~",
  48. "3",
  49. "33",
  50. "333",
  51. "3333",
  52. "33333",
  53. "333333",
  54. "3333333",
  55. "33333333",
  56. "333333333",
  57. ]
  58. for text in tests:
  59. print('text: ', text)
  60. print('\nwith bos:')
  61. print(tokenizer.encode(text, add_bos=True))
  62. print(tokenizer.decode(tokenizer.encode(text, add_bos=True)))
  63. print('\nwithout bos:')
  64. print(tokenizer.encode(text, add_bos=False))
  65. print(tokenizer.decode(tokenizer.encode(text, add_bos=False)))
  66. print("'" + tokenizer.id_to_piece(15043) + "'") # '_Hello'
  67. print("'" + tokenizer.id_to_piece(29871) + "'") # '_'
  68. print("'" + tokenizer.decode([15043]) + "'") # 'Hello'
  69. print("'" + tokenizer.decode([15043, 15043]) + "'") # 'Hello Hello'
  70. print("'" + tokenizer.decode([29871, 15043]) + "'") # ' Hello'
  71. print("'" + tokenizer.decode([29871, 15043, 29871, 15043]) + "'") # ' Hello Hello'
  72. print("\n\ntests for C++:\n")
  73. for text in tests:
  74. res = tokenizer.encode(text, add_bos=False)
  75. k = text.replace('\n', '\\n')
  76. k = k.replace('\t', '\\t')
  77. k = '"' + k + '"'
  78. print("{ %-24s, { " % k, end='')
  79. for x in res:
  80. print("%7d," % x, end='')
  81. print(" }, },")
  82. print(tokenizer.encode('hello'))
  83. print(tokenizer.encode('world'))
  84. print(tokenizer.encode(' world'))
  85. print(tokenizer.encode('hello world'))
  86. fname_tok = args.fname_tok
  87. if fname_tok:
  88. print('tokenizing file: ', fname_tok)
  89. fname_out = fname_tok + '.tok'
  90. with open(fname_tok, 'r', encoding='utf-8') as f:
  91. lines = f.readlines()
  92. s = ''.join(lines)
  93. res = tokenizer.encode(s, add_bos=True)
  94. # write to file
  95. with open(fname_out, 'w', encoding='utf-8') as f:
  96. for x in res:
  97. f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
  98. print('len(res): ', len(res))
  99. print('len(lines): ', len(lines))
  100. print('results written to: ', fname_out)