test-tokenizer-0-falcon.py 2.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. # tests with BPE tokenizer
  2. import argparse
  3. from transformers import AutoTokenizer
  4. parser = argparse.ArgumentParser()
  5. parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
  6. parser.add_argument("--fname-tok", help="path to a text file to tokenize")
  7. args = parser.parse_args()
  8. dir_tokenizer = args.dir_tokenizer
  9. tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
  10. tests = [
  11. "",
  12. " ",
  13. " ",
  14. " ",
  15. "\t",
  16. "\n",
  17. "\t\n",
  18. "Hello world",
  19. " Hello world",
  20. "Hello World",
  21. " Hello World",
  22. " Hello World!",
  23. "Hello, world!",
  24. " Hello, world!",
  25. " this is 🦙.cpp",
  26. "w048 7tuijk dsdfhu",
  27. "нещо на Български",
  28. "កាន់តែពិសេសអាចខលចេញ",
  29. "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
  30. "Hello",
  31. " Hello",
  32. " Hello",
  33. " Hello",
  34. " Hello",
  35. " Hello\n Hello",
  36. "\n =",
  37. "' era",
  38. ]
  39. for text in tests:
  40. print('text: ', text)
  41. print(tokenizer.encode(text))
  42. print(tokenizer.decode(tokenizer.encode(text)))
  43. print("\n\ntests for C++:\n")
  44. for text in tests:
  45. res = tokenizer.encode(text)
  46. k = text.replace('\n', '\\n')
  47. k = k.replace('\t', '\\t')
  48. k = '"' + k + '"'
  49. print("{ %-24s, { " % k, end='')
  50. for x in res:
  51. print("%7d," % x, end='')
  52. print(" }, },")
  53. print(tokenizer.encode('hello'))
  54. print(tokenizer.encode('world'))
  55. print(tokenizer.encode(' world'))
  56. print(tokenizer.encode('hello world'))
  57. fname_tok = args.fname_tok
  58. if fname_tok:
  59. print('tokenizing file: ', fname_tok)
  60. fname_out = fname_tok + '.tok'
  61. with open(fname_tok, 'r', encoding='utf-8') as f:
  62. lines = f.readlines()
  63. s = ''.join(lines)
  64. res = tokenizer.encode(s)
  65. # write to file
  66. with open(fname_out, 'w', encoding='utf-8') as f:
  67. for x in res:
  68. f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
  69. print('len(res): ', len(res))
  70. print('len(lines): ', len(lines))
  71. print('results written to: ', fname_out)