ngram-cache.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. #include "ngram-cache.h"
  2. #include "common.h"
  3. #include "log.h"
  4. #include <cinttypes>
  5. #include <cstdint>
  6. #include <cstdio>
  7. #include <fstream>
  8. #include <thread>
  9. #include <algorithm>
  10. void common_ngram_cache_update(common_ngram_cache & ngram_cache, int ngram_min, int ngram_max,
  11. std::vector<llama_token> & inp, int nnew, bool print_progress) {
  12. const int64_t t_start_ms = ggml_time_ms();
  13. const int64_t inp_size = inp.size();
  14. const int64_t n_todo = inp_size * (ngram_max - ngram_min + 1);
  15. int64_t n_done = 0;
  16. for (int64_t ngram_size = ngram_min; ngram_size <= ngram_max; ++ngram_size) {
  17. const int64_t i_start = std::max(inp_size - nnew, ngram_size);
  18. for (int64_t i = i_start; i < inp_size; ++i) {
  19. const int64_t ngram_start = i - ngram_size;
  20. common_ngram ngram(&inp[ngram_start], ngram_size);
  21. const llama_token token = inp[i];
  22. common_ngram_cache::iterator part_it = ngram_cache.find(ngram);
  23. if (part_it == ngram_cache.end()) {
  24. common_ngram_cache_part part;
  25. part.emplace(token, 1);
  26. ngram_cache.emplace(ngram, part);
  27. } else {
  28. common_ngram_cache_part::iterator token_count_it = part_it->second.find(token);
  29. if (token_count_it == part_it->second.end()) {
  30. part_it->second.emplace(token, 1);
  31. } else {
  32. token_count_it->second++;
  33. }
  34. }
  35. ++n_done;
  36. if (print_progress && n_done % 10000000 == 0) {
  37. const int64_t t_now_ms = ggml_time_ms();
  38. const int64_t eta_ms = (inp_size*(ngram_max-ngram_min+1) - n_done) * (t_now_ms - t_start_ms) / n_done;
  39. const int64_t eta_min = eta_ms / (60*1000);
  40. const int64_t eta_s = (eta_ms - 60*1000*eta_min) / 1000;
  41. fprintf(stderr, "%s: %" PRId64 "/%" PRId64 " done, ETA: %02" PRId64 ":%02" PRId64 "\n", __func__, n_done, n_todo, eta_min, eta_s);
  42. }
  43. }
  44. }
  45. }
  46. // Helper function to get a token from the combined, speculative sequence of inp and draft.
  47. static llama_token get_token(const std::vector<llama_token> & inp, const std::vector<llama_token> & draft, const size_t i) {
  48. return i < inp.size() ? inp[i] : draft[1 + i - inp.size()];
  49. }
  50. // If sample size or percentage are below these thresholds the draft is aborted early:
  51. constexpr int draft_min_sample_size_lax[LLAMA_NGRAM_MAX] = { 2, 2, 1, 1};
  52. constexpr int draft_min_percent_lax[LLAMA_NGRAM_MAX] = {66, 50, 50, 50};
  53. constexpr int draft_min_sample_size_strict[LLAMA_NGRAM_MAX] = { 4, 3, 2, 2};
  54. constexpr int draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66};
  55. // Helper function that tries to draft a token from only the static ngram cache:
  56. static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram ngram_static) {
  57. common_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
  58. if (part_static_it == nc_static.end()) {
  59. return LLAMA_TOKEN_NULL;
  60. }
  61. const common_ngram_cache_part part_static = part_static_it->second;
  62. int max_count_static = 0;
  63. int sum_count_static = 0;
  64. llama_token max_token = LLAMA_TOKEN_NULL;
  65. for (std::pair<llama_token, int> token_count_static : part_static) {
  66. const llama_token token = token_count_static.first;
  67. const int32_t count_static = token_count_static.second;
  68. if (count_static > max_count_static) {
  69. max_token = token;
  70. max_count_static = count_static;
  71. }
  72. sum_count_static += count_static;
  73. }
  74. if (sum_count_static < draft_min_sample_size_lax[LLAMA_NGRAM_STATIC-1]) {
  75. return LLAMA_TOKEN_NULL;
  76. }
  77. if (100*max_count_static < draft_min_percent_lax[LLAMA_NGRAM_STATIC-1]*sum_count_static) {
  78. return LLAMA_TOKEN_NULL;
  79. }
  80. return max_token;
  81. }
  82. // Try to draft a token from primary cache (context/dynamic), validate with static cache:
  83. static llama_token try_draft(
  84. common_ngram_cache & nc_primary, const std::vector<common_ngram> & ngrams_primary, common_ngram_cache_part & part_static,
  85. const int * min_sample_size, const int * min_percent) {
  86. llama_token drafted_token = LLAMA_TOKEN_NULL;
  87. for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == LLAMA_TOKEN_NULL; --i) {
  88. const common_ngram ngram_primary = ngrams_primary[i];
  89. common_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary);
  90. if (part_primary_it == nc_primary.end()) {
  91. continue;
  92. }
  93. const common_ngram_cache_part part_primary = part_primary_it->second;
  94. int max_count_primary = 0;
  95. int max_count_static = 0;
  96. int sum_count_primary = 0;
  97. llama_token max_token = LLAMA_TOKEN_NULL;
  98. for (std::pair<llama_token, int> token_count_primary : part_primary) {
  99. const llama_token token = token_count_primary.first;
  100. common_ngram_cache_part::iterator token_count_static_it = part_static.find(token);
  101. const int32_t count_primary = token_count_primary.second;
  102. const int32_t count_static = token_count_static_it != part_static.end() ? 100*token_count_static_it->second : 1;
  103. if (count_primary*count_static > max_count_primary*max_count_static) {
  104. max_token = token;
  105. max_count_primary = count_primary;
  106. max_count_static = count_static;
  107. }
  108. sum_count_primary += count_primary;
  109. }
  110. if (sum_count_primary < min_sample_size[i]) {
  111. continue;
  112. }
  113. if (100*max_count_primary < min_percent[i]*sum_count_primary) {
  114. continue;;
  115. }
  116. drafted_token = max_token;
  117. }
  118. return drafted_token;
  119. }
  120. void common_ngram_cache_draft(
  121. std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max,
  122. common_ngram_cache & nc_context, common_ngram_cache & nc_dynamic, common_ngram_cache & nc_static
  123. ) {
  124. GGML_ASSERT(draft.size() == 1);
  125. const int inp_size = inp.size();
  126. if (inp_size < LLAMA_NGRAM_STATIC) {
  127. return;
  128. }
  129. while ((int) draft.size()-1 < n_draft) {
  130. llama_token drafted_token = LLAMA_TOKEN_NULL;
  131. const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1;
  132. common_ngram ngram_static;
  133. for (int j = ngram_start_static; j < ngram_start_static + LLAMA_NGRAM_STATIC; ++j) {
  134. ngram_static.tokens[j-ngram_start_static] = get_token(inp, draft, j);
  135. }
  136. common_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
  137. common_ngram_cache_part part_static;
  138. if (part_static_it != nc_static.end()) {
  139. part_static = part_static_it->second;
  140. }
  141. // cd = context + dynamic
  142. std::vector<common_ngram> ngrams_cd;
  143. for (int ngram_size_cd = ngram_min; ngram_size_cd <= ngram_max; ++ngram_size_cd) {
  144. const int ngram_start_cd = inp_size-ngram_size_cd + draft.size()-1;
  145. common_ngram ngram_cd;
  146. for (int j = ngram_start_cd; j < ngram_start_cd + ngram_size_cd; ++j) {
  147. ngram_cd.tokens[j-ngram_start_cd] = get_token(inp, draft, j);
  148. }
  149. ngrams_cd.push_back(ngram_cd);
  150. }
  151. if (drafted_token == LLAMA_TOKEN_NULL) {
  152. drafted_token = try_draft(nc_context, ngrams_cd, part_static, draft_min_sample_size_lax, draft_min_percent_lax);
  153. }
  154. if (drafted_token == LLAMA_TOKEN_NULL) {
  155. drafted_token = try_draft(nc_dynamic, ngrams_cd, part_static, draft_min_sample_size_strict, draft_min_percent_strict);
  156. }
  157. if (drafted_token == LLAMA_TOKEN_NULL) {
  158. drafted_token = try_draft(nc_static, ngram_static);
  159. }
  160. if (drafted_token == LLAMA_TOKEN_NULL) {
  161. break;
  162. }
  163. LOG(" - draft candidate: token=%d\n", drafted_token);
  164. draft.push_back(drafted_token);
  165. }
  166. }
  167. void common_ngram_cache_save(common_ngram_cache & ngram_cache, std::string & filename) {
  168. std::ofstream file_out(filename, std::ios::binary);
  169. for (std::pair<common_ngram, common_ngram_cache_part> item : ngram_cache) {
  170. const common_ngram ngram = item.first;
  171. common_ngram_cache_part token_counts = item.second;
  172. GGML_ASSERT(!token_counts.empty());
  173. const int32_t ntokens = token_counts.size();
  174. GGML_ASSERT(ntokens > 0);
  175. file_out.write(reinterpret_cast<const char *>(&ngram), sizeof(common_ngram));
  176. file_out.write(reinterpret_cast<const char *>(&ntokens), sizeof(int32_t));
  177. for (std::pair<llama_token, int32_t> item2 : token_counts) {
  178. const llama_token token = item2.first;
  179. const int32_t count = item2.second;
  180. GGML_ASSERT(count > 0);
  181. file_out.write(reinterpret_cast<const char *>(&token), sizeof(llama_token));
  182. file_out.write(reinterpret_cast<const char *>(&count), sizeof(int32_t));
  183. }
  184. }
  185. }
  186. common_ngram_cache common_ngram_cache_load(std::string & filename) {
  187. std::ifstream hashmap_file(filename, std::ios::binary);
  188. if (!hashmap_file) {
  189. throw std::ifstream::failure("Unable to open file " + filename);
  190. }
  191. common_ngram_cache ngram_cache;
  192. common_ngram ngram;
  193. int32_t ntokens;
  194. llama_token token;
  195. int32_t count;
  196. char * ngramc = reinterpret_cast<char*>(&ngram);
  197. char * ntokensc = reinterpret_cast<char*>(&ntokens);
  198. char * tokenc = reinterpret_cast<char*>(&token);
  199. char * countc = reinterpret_cast<char*>(&count);
  200. while(hashmap_file.read(ngramc, sizeof(common_ngram))) {
  201. GGML_ASSERT(!hashmap_file.eof());
  202. GGML_ASSERT(hashmap_file.read(ntokensc, sizeof(int32_t)));
  203. GGML_ASSERT(ntokens > 0);
  204. common_ngram_cache_part token_counts;
  205. for (int i = 0; i < ntokens; ++i) {
  206. GGML_ASSERT(!hashmap_file.eof());
  207. GGML_ASSERT(hashmap_file.read(tokenc, sizeof(llama_token)));
  208. GGML_ASSERT(!hashmap_file.eof());
  209. GGML_ASSERT(hashmap_file.read(countc, sizeof(int32_t)));
  210. GGML_ASSERT(count > 0);
  211. token_counts.emplace(token, count);
  212. }
  213. ngram_cache.emplace(ngram, token_counts);
  214. }
  215. GGML_ASSERT(hashmap_file.eof());
  216. return ngram_cache;
  217. }
  218. void common_ngram_cache_merge(common_ngram_cache & ngram_cache_target, common_ngram_cache & ngram_cache_add) {
  219. for (std::pair<common_ngram, common_ngram_cache_part> ngram_part : ngram_cache_add) {
  220. const common_ngram ngram = ngram_part.first;
  221. common_ngram_cache_part part = ngram_part.second;
  222. common_ngram_cache::iterator part_merged_it = ngram_cache_target.find(ngram);
  223. if (part_merged_it == ngram_cache_target.end()) {
  224. ngram_cache_target.emplace(ngram, part);
  225. continue;
  226. }
  227. for (std::pair<llama_token, int32_t> token_count : part) {
  228. const llama_token token = token_count.first;
  229. const int32_t count = token_count.second;
  230. GGML_ASSERT(count > 0);
  231. common_ngram_cache_part::iterator token_count_merged_it = part_merged_it->second.find(token);
  232. if (token_count_merged_it == part_merged_it->second.end()) {
  233. part_merged_it->second.emplace(token, count);
  234. continue;
  235. }
  236. token_count_merged_it->second += count;
  237. }
  238. }
  239. }