test-tokenizer-0-llama.py 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. # tests with SPM tokenizer
  2. import argparse
  3. from sentencepiece import SentencePieceProcessor
  4. parser = argparse.ArgumentParser()
  5. parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
  6. parser.add_argument("--fname-tok", help="path to a text file to tokenize")
  7. args = parser.parse_args()
  8. dir_tokenizer = args.dir_tokenizer
  9. tokenizer = SentencePieceProcessor(dir_tokenizer + '/tokenizer.model')
  10. tests = [
  11. "",
  12. " ",
  13. " ",
  14. " ",
  15. "\t",
  16. "\n",
  17. "\t\n",
  18. "Hello world",
  19. " Hello world",
  20. "Hello World",
  21. " Hello World",
  22. " Hello World!",
  23. "Hello, world!",
  24. " Hello, world!",
  25. " this is 🦙.cpp",
  26. "w048 7tuijk dsdfhu",
  27. "нещо на Български",
  28. "កាន់តែពិសេសអាចខលចេញ",
  29. "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
  30. "Hello",
  31. " Hello",
  32. " Hello",
  33. " Hello",
  34. " Hello",
  35. " Hello\n Hello",
  36. ]
  37. for text in tests:
  38. print('text: ', text)
  39. print('\nwith bos:')
  40. print(tokenizer.encode(text, add_bos=True))
  41. print(tokenizer.decode(tokenizer.encode(text, add_bos=True)))
  42. print('\nwithout bos:')
  43. print(tokenizer.encode(text, add_bos=False))
  44. print(tokenizer.decode(tokenizer.encode(text, add_bos=False)))
  45. print("'" + tokenizer.id_to_piece(15043) + "'") # '_Hello'
  46. print("'" + tokenizer.id_to_piece(29871) + "'") # '_'
  47. print("'" + tokenizer.decode([15043]) + "'") # 'Hello'
  48. print("'" + tokenizer.decode([15043, 15043]) + "'") # 'Hello Hello'
  49. print("'" + tokenizer.decode([29871, 15043]) + "'") # ' Hello'
  50. print("'" + tokenizer.decode([29871, 15043, 29871, 15043]) + "'") # ' Hello Hello'
  51. print("\n\ntests for C++:\n")
  52. for text in tests:
  53. res = tokenizer.encode(text, add_bos=False)
  54. k = text.replace('\n', '\\n')
  55. k = k.replace('\t', '\\t')
  56. k = '"' + k + '"'
  57. print("{ %-24s, { " % k, end='')
  58. for x in res:
  59. print("%7d," % x, end='')
  60. print(" }, },")
  61. print(tokenizer.encode('hello'))
  62. print(tokenizer.encode('world'))
  63. print(tokenizer.encode(' world'))
  64. print(tokenizer.encode('hello world'))
  65. fname_tok = args.fname_tok
  66. if fname_tok:
  67. print('tokenizing file: ', fname_tok)
  68. fname_out = fname_tok + '.tok'
  69. with open(fname_tok, 'r', encoding='utf-8') as f:
  70. lines = f.readlines()
  71. s = ''.join(lines)
  72. res = tokenizer.encode(s, add_bos=True)
  73. # write to file
  74. with open(fname_out, 'w', encoding='utf-8') as f:
  75. for x in res:
  76. f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
  77. print('len(res): ', len(res))
  78. print('len(lines): ', len(lines))
  79. print('results written to: ', fname_out)