ngram-cache.h 3.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. #pragma once
  2. #include "llama.h"
  3. #include <unordered_map>
  4. #include <string>
  5. #include <vector>
  6. #define LLAMA_NGRAM_MIN 1
  7. #define LLAMA_NGRAM_MAX 4
  8. #define LLAMA_NGRAM_STATIC 2
  9. // Data structures to map n-grams to empirical token probabilities:
  10. struct llama_ngram {
  11. llama_token tokens[LLAMA_NGRAM_MAX];
  12. llama_ngram() {
  13. for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
  14. tokens[i] = -1;
  15. }
  16. }
  17. llama_ngram(const llama_token * input, const int ngram_size) {
  18. for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
  19. tokens[i] = i < ngram_size ? input[i] : -1;
  20. }
  21. }
  22. bool operator==(const llama_ngram & other) const {
  23. for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
  24. if (tokens[i] != other.tokens[i]) {
  25. return false;
  26. }
  27. }
  28. return true;
  29. }
  30. };
  31. struct llama_ngram_hash_function {
  32. size_t operator()(const llama_ngram & ngram) const {
  33. size_t hash = 0;
  34. for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
  35. hash ^= std::hash<llama_token>{}(ngram.tokens[i]);
  36. }
  37. return hash;
  38. }
  39. };
  40. // token -> number of times token has been seen
  41. typedef std::unordered_map<llama_token, int32_t> llama_ngram_cache_part;
  42. // n-gram -> empirical distribution of following tokens
  43. typedef std::unordered_map<llama_ngram, llama_ngram_cache_part, llama_ngram_hash_function> llama_ngram_cache;
  44. // Update an ngram cache with tokens.
  45. // ngram_cache: the cache to modify.
  46. // ngram_min/ngram_max: the min/max size of the ngrams to extract from inp_data.
  47. // inp_data: the token sequence with which to update ngram_cache.
  48. // nnew: how many new tokens have been appended to inp_data since the last call to this function.
  49. // print_progress: whether to print progress to stderr.
  50. //
  51. // In order to get correct results inp_data can ONLY BE APPENDED TO.
  52. // Changes in the middle need a complete rebuild.
  53. void llama_ngram_cache_update(
  54. llama_ngram_cache & ngram_cache, int ngram_min, int ngram_max, std::vector<llama_token> & inp_data, int nnew, bool print_progress);
  55. // Try to draft tokens from ngram caches.
  56. // inp: the tokens generated so far.
  57. // draft: the token sequence to draft. Expected to initially contain the previously sampled token.
  58. // n_draft: maximum number of tokens to add to draft.
  59. // ngram_min/gram_max: the min/max size of the ngrams in nc_context and nc_dynamic.
  60. // nc_context: ngram cache based on current context.
  61. // nc_dynamic: ngram cache based on previous user generations.
  62. // nc_static: ngram cache generated from a large text corpus, used for validation.
  63. void llama_ngram_cache_draft(
  64. std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max,
  65. llama_ngram_cache & nc_context, llama_ngram_cache & nc_dynamic, llama_ngram_cache & nc_static);
  66. // Save an ngram cache to a file.
  67. // ngram_cache: the ngram cache to save.
  68. // filename: the path under which to save the ngram cache.
  69. void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename);
  70. // Load an ngram cache saved with llama_ngram_cache_save.
  71. // filename: the path from which to load the ngram cache.
  72. // returns: an ngram cache containing the information saved to filename.
  73. llama_ngram_cache llama_ngram_cache_load(std::string & filename);
  74. // Merge two ngram caches.
  75. // ngram_cache_target: the ngram cache to which to add the information from ngram_cache_add.
  76. // ngram_cache_add: the ngram cache to add to ngram_cache_target.
  77. void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add);