test-tokenizer-0.py 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. import time
  2. import argparse
  3. from transformers import AutoTokenizer
  4. parser = argparse.ArgumentParser()
  5. parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
  6. parser.add_argument("--fname-tok", help="path to a text file to tokenize", required=True)
  7. args = parser.parse_args()
  8. dir_tokenizer = args.dir_tokenizer
  9. fname_tok = args.fname_tok
  10. tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
  11. print('tokenizing file: ', fname_tok) # noqa: NP100
  12. fname_out = fname_tok + '.tok'
  13. with open(fname_tok, 'r', encoding='utf-8') as f:
  14. lines = f.readlines()
  15. s = ''.join(lines)
  16. t_start = time.time()
  17. res = tokenizer.encode(s, add_special_tokens=False)
  18. t_end = time.time()
  19. print('\nmain : tokenized in', "{:.3f}".format(1000.0 * (t_end - t_start)), 'ms (py)') # noqa: NP100
  20. with open(fname_out, 'w', encoding='utf-8') as f:
  21. for x in res:
  22. # LLaMA v3 for some reason strips the space for these tokens (and others)
  23. # if x == 662:
  24. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  25. # elif x == 1174:
  26. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  27. # elif x == 2564:
  28. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  29. # elif x == 758:
  30. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  31. # elif x == 949:
  32. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  33. # elif x == 5354:
  34. # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
  35. # else:
  36. # f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
  37. # f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
  38. f.write(str(x) + '\n')
  39. print('len(res): ', len(res)) # noqa: NP100
  40. print('len(lines): ', len(lines)) # noqa: NP100
  41. print('results written to: ', fname_out) # noqa: NP100