1
0

sampling.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. #define LLAMA_API_INTERNAL
  2. #include "sampling.h"
  3. #include <random>
  4. struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_params & params) {
  5. struct llama_sampling_context * result = new llama_sampling_context();
  6. result->params = params;
  7. result->grammar = nullptr;
  8. // if there is a grammar, parse it
  9. if (!params.grammar.empty()) {
  10. result->parsed_grammar = grammar_parser::parse(params.grammar.c_str());
  11. // will be empty (default) if there are parse errors
  12. if (result->parsed_grammar.rules.empty()) {
  13. fprintf(stderr, "%s: failed to parse grammar\n", __func__);
  14. delete result;
  15. return nullptr;
  16. }
  17. // Ensure that there is a "root" node.
  18. if (result->parsed_grammar.symbol_ids.find("root") == result->parsed_grammar.symbol_ids.end()) {
  19. fprintf(stderr, "%s: grammar does not contain a 'root' symbol\n", __func__);
  20. delete result;
  21. return nullptr;
  22. }
  23. std::vector<const llama_grammar_element *> grammar_rules(result->parsed_grammar.c_rules());
  24. result->grammar = llama_grammar_init(
  25. grammar_rules.data(),
  26. grammar_rules.size(), result->parsed_grammar.symbol_ids.at("root"));
  27. }
  28. result->prev.resize(params.n_prev);
  29. result->n_considered = 0;
  30. llama_sampling_set_rng_seed(result, params.seed);
  31. return result;
  32. }
  33. void llama_sampling_free(struct llama_sampling_context * ctx) {
  34. if (ctx->grammar != NULL) {
  35. llama_grammar_free(ctx->grammar);
  36. }
  37. delete ctx;
  38. }
  39. void llama_sampling_reset(llama_sampling_context * ctx) {
  40. if (ctx->grammar != NULL) {
  41. llama_grammar_free(ctx->grammar);
  42. ctx->grammar = NULL;
  43. }
  44. if (!ctx->parsed_grammar.rules.empty()) {
  45. std::vector<const llama_grammar_element *> grammar_rules(ctx->parsed_grammar.c_rules());
  46. ctx->grammar = llama_grammar_init(
  47. grammar_rules.data(),
  48. grammar_rules.size(), ctx->parsed_grammar.symbol_ids.at("root"));
  49. }
  50. std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
  51. ctx->cur.clear();
  52. ctx->n_considered = 0;
  53. }
  54. void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
  55. if (seed == LLAMA_DEFAULT_SEED) {
  56. seed = std::random_device{}();
  57. }
  58. ctx->rng.seed(seed);
  59. }
  60. void llama_sampling_cp(llama_sampling_context * src, llama_sampling_context * dst) {
  61. if (dst->grammar) {
  62. llama_grammar_free(dst->grammar);
  63. dst->grammar = nullptr;
  64. }
  65. if (src->grammar) {
  66. dst->grammar = llama_grammar_copy(src->grammar);
  67. }
  68. dst->prev = src->prev;
  69. }
  70. llama_token llama_sampling_last(llama_sampling_context * ctx) {
  71. return ctx->prev.back();
  72. }
  73. std::string llama_sampling_prev_str(llama_sampling_context * ctx_sampling, llama_context * ctx_main, int n) {
  74. const int size = ctx_sampling->prev.size();
  75. n = std::min(n, size);
  76. std::string result;
  77. for (int i = size - n; i < size; i++) {
  78. result += llama_token_to_piece(ctx_main, ctx_sampling->prev[i]);
  79. }
  80. return result;
  81. }
  82. std::string llama_sampling_print(const llama_sampling_params & params) {
  83. char result[1024];
  84. snprintf(result, sizeof(result),
  85. "\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n"
  86. "\ttop_k = %d, tfs_z = %.3f, top_p = %.3f, min_p = %.3f, typical_p = %.3f, temp = %.3f\n"
  87. "\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f",
  88. params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present,
  89. params.top_k, params.tfs_z, params.top_p, params.min_p, params.typical_p, params.temp,
  90. params.mirostat, params.mirostat_eta, params.mirostat_tau);
  91. return std::string(result);
  92. }
  93. std::string llama_sampling_order_print(const llama_sampling_params & params) {
  94. std::string result = "CFG -> Penalties ";
  95. if (params.mirostat == 0) {
  96. for (auto sampler_type : params.samplers_sequence) {
  97. const auto sampler_type_name = sampler_type_to_name_string(sampler_type);
  98. if (!sampler_type_name.empty()) {
  99. result += "-> " + sampler_type_name + " ";
  100. }
  101. }
  102. } else {
  103. result += "-> mirostat ";
  104. }
  105. return result;
  106. }
  107. // no reasons to expose this function in header
  108. static void sampler_queue(
  109. struct llama_context * ctx_main,
  110. const llama_sampling_params & params,
  111. llama_token_data_array & cur_p,
  112. size_t min_keep) {
  113. const float temp = params.temp;
  114. const float dynatemp_range = params.dynatemp_range;
  115. const float dynatemp_exponent = params.dynatemp_exponent;
  116. const int32_t top_k = params.top_k;
  117. const float top_p = params.top_p;
  118. const float min_p = params.min_p;
  119. const float tfs_z = params.tfs_z;
  120. const float typical_p = params.typical_p;
  121. const std::vector<llama_sampler_type> & samplers_sequence = params.samplers_sequence;
  122. for (auto sampler_type : samplers_sequence) {
  123. switch (sampler_type) {
  124. case llama_sampler_type::TOP_K : llama_sample_top_k (ctx_main, &cur_p, top_k, min_keep); break;
  125. case llama_sampler_type::TFS_Z : llama_sample_tail_free(ctx_main, &cur_p, tfs_z, min_keep); break;
  126. case llama_sampler_type::TYPICAL_P: llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); break;
  127. case llama_sampler_type::TOP_P : llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); break;
  128. case llama_sampler_type::MIN_P : llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); break;
  129. case llama_sampler_type::TEMPERATURE:
  130. if (dynatemp_range > 0) {
  131. float dynatemp_min = std::max(0.0f, temp - dynatemp_range);
  132. float dynatemp_max = std::max(0.0f, temp + dynatemp_range);
  133. llama_sample_entropy(ctx_main, &cur_p, dynatemp_min, dynatemp_max, dynatemp_exponent);
  134. } else {
  135. llama_sample_temp(ctx_main, &cur_p, temp);
  136. }
  137. break;
  138. default : break;
  139. }
  140. }
  141. }
  142. static llama_token llama_sampling_sample_impl(
  143. struct llama_sampling_context * ctx_sampling,
  144. struct llama_context * ctx_main,
  145. struct llama_context * ctx_cfg,
  146. const int idx,
  147. bool is_resampling) { // Add a parameter to indicate if we are resampling
  148. const llama_sampling_params & params = ctx_sampling->params;
  149. const float temp = params.temp;
  150. const int mirostat = params.mirostat;
  151. const float mirostat_tau = params.mirostat_tau;
  152. const float mirostat_eta = params.mirostat_eta;
  153. std::vector<float> original_logits;
  154. auto cur_p = llama_sampling_prepare(ctx_sampling, ctx_main, ctx_cfg, idx, !is_resampling, &original_logits);
  155. if (!is_resampling) {
  156. GGML_ASSERT(!original_logits.empty());
  157. }
  158. llama_token id = 0;
  159. // Get a pointer to the logits
  160. float * logits = llama_get_logits_ith(ctx_main, idx);
  161. if (temp < 0.0) {
  162. // greedy sampling, with probs
  163. llama_sample_softmax(ctx_main, &cur_p);
  164. id = cur_p.data[0].id;
  165. } else if (temp == 0.0) {
  166. // greedy sampling, no probs
  167. id = llama_sample_token_greedy(ctx_main, &cur_p);
  168. } else {
  169. if (mirostat == 1) {
  170. const int mirostat_m = 100;
  171. llama_sample_temp(ctx_main, &cur_p, temp);
  172. id = llama_sample_token_mirostat(ctx_main, &cur_p, mirostat_tau, mirostat_eta, mirostat_m, &ctx_sampling->mirostat_mu);
  173. } else if (mirostat == 2) {
  174. llama_sample_temp(ctx_main, &cur_p, temp);
  175. id = llama_sample_token_mirostat_v2(ctx_main, &cur_p, mirostat_tau, mirostat_eta, &ctx_sampling->mirostat_mu);
  176. } else {
  177. // temperature sampling
  178. size_t min_keep = std::max(1, params.min_keep);
  179. sampler_queue(ctx_main, params, cur_p, min_keep);
  180. id = llama_sample_token_with_rng(ctx_main, &cur_p, ctx_sampling->rng);
  181. //{
  182. // const int n_top = 10;
  183. // LOG("top %d candidates:\n", n_top);
  184. // for (int i = 0; i < n_top; i++) {
  185. // const llama_token id = cur_p.data[i].id;
  186. // (void)id; // To avoid a warning that id is unused when logging is disabled.
  187. // LOG(" - %5d: '%12s' (%.3f)\n", id, llama_token_to_piece(ctx_main, id).c_str(), cur_p.data[i].p);
  188. // }
  189. //}
  190. //LOG("sampled token: %5d: '%s'\n", id, llama_token_to_piece(ctx_main, id).c_str());
  191. }
  192. }
  193. if (ctx_sampling->grammar != NULL && !is_resampling) {
  194. // Create an array with a single token data element for the sampled id
  195. llama_token_data single_token_data = {id, logits[id], 0.0f};
  196. llama_token_data_array single_token_data_array = { &single_token_data, 1, false };
  197. // Apply grammar constraints to the single token
  198. llama_sample_grammar(ctx_main, &single_token_data_array, ctx_sampling->grammar);
  199. // Check if the token is valid according to the grammar by seeing if its logit has been set to -INFINITY
  200. bool is_valid = single_token_data_array.data[0].logit != -INFINITY;
  201. // If the token is not valid according to the grammar, perform resampling
  202. if (!is_valid) {
  203. LOG("Resampling because token %d: '%s' does not meet grammar rules\n", id, llama_token_to_piece(ctx_main, id).c_str());
  204. // Restore logits from the copy
  205. std::copy(original_logits.begin(), original_logits.end(), logits);
  206. return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, true); // Pass true for is_resampling
  207. }
  208. }
  209. ctx_sampling->n_considered = cur_p.size;
  210. return id;
  211. }
  212. static llama_token_data_array llama_sampling_prepare_impl(
  213. struct llama_sampling_context * ctx_sampling,
  214. struct llama_context * ctx_main,
  215. struct llama_context * ctx_cfg,
  216. const int idx,
  217. bool apply_grammar,
  218. std::vector<float> * original_logits) {
  219. const llama_sampling_params & params = ctx_sampling->params;
  220. const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
  221. const int32_t penalty_last_n = params.penalty_last_n < 0 ? params.n_prev : params.penalty_last_n;
  222. const float penalty_repeat = params.penalty_repeat;
  223. const float penalty_freq = params.penalty_freq;
  224. const float penalty_present = params.penalty_present;
  225. const bool penalize_nl = params.penalize_nl;
  226. auto & prev = ctx_sampling->prev;
  227. auto & cur = ctx_sampling->cur;
  228. // Get a pointer to the logits
  229. float * logits = llama_get_logits_ith(ctx_main, idx);
  230. if (apply_grammar && original_logits != NULL) {
  231. // Only make a copy of the original logits if we are not applying grammar checks, not sure if I actually have to do this.
  232. *original_logits = {logits, logits + llama_n_vocab(llama_get_model(ctx_main))};
  233. }
  234. // apply params.logit_bias map
  235. for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
  236. logits[it->first] += it->second;
  237. }
  238. if (ctx_cfg) {
  239. float * logits_guidance = llama_get_logits_ith(ctx_cfg, idx);
  240. llama_sample_apply_guidance(ctx_main, logits, logits_guidance, params.cfg_scale);
  241. }
  242. cur.clear();
  243. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  244. cur.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
  245. }
  246. llama_token_data_array cur_p = { cur.data(), cur.size(), false };
  247. // apply penalties
  248. const auto& penalty_tokens = params.use_penalty_prompt_tokens ? params.penalty_prompt_tokens : prev;
  249. const int penalty_tokens_used_size = std::min((int)penalty_tokens.size(), penalty_last_n);
  250. if (penalty_tokens_used_size) {
  251. const float nl_logit = logits[llama_token_nl(llama_get_model(ctx_main))];
  252. llama_sample_repetition_penalties(ctx_main, &cur_p,
  253. penalty_tokens.data() + penalty_tokens.size() - penalty_tokens_used_size,
  254. penalty_tokens_used_size, penalty_repeat, penalty_freq, penalty_present);
  255. if (!penalize_nl) {
  256. for (size_t idx = 0; idx < cur_p.size; idx++) {
  257. if (cur_p.data[idx].id == llama_token_nl(llama_get_model(ctx_main))) {
  258. cur_p.data[idx].logit = nl_logit;
  259. break;
  260. }
  261. }
  262. }
  263. }
  264. // apply grammar checks before sampling logic
  265. if (apply_grammar && ctx_sampling->grammar != NULL) {
  266. llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
  267. }
  268. return cur_p;
  269. }
  270. llama_token llama_sampling_sample(
  271. struct llama_sampling_context * ctx_sampling,
  272. struct llama_context * ctx_main,
  273. struct llama_context * ctx_cfg,
  274. const int idx) {
  275. // Call the implementation function with is_resampling set to false by default
  276. return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, false);
  277. }
  278. llama_token_data_array llama_sampling_prepare(
  279. struct llama_sampling_context * ctx_sampling,
  280. struct llama_context * ctx_main,
  281. struct llama_context * ctx_cfg,
  282. const int idx,
  283. bool apply_grammar,
  284. std::vector<float> * original_logits) {
  285. return llama_sampling_prepare_impl(ctx_sampling,ctx_main, ctx_cfg, idx, apply_grammar, original_logits);
  286. }
  287. void llama_sampling_accept(
  288. struct llama_sampling_context * ctx_sampling,
  289. struct llama_context * ctx_main,
  290. llama_token id,
  291. bool apply_grammar) {
  292. ctx_sampling->prev.erase(ctx_sampling->prev.begin());
  293. ctx_sampling->prev.push_back(id);
  294. if (ctx_sampling->grammar != NULL && apply_grammar) {
  295. llama_grammar_accept_token(ctx_main, ctx_sampling->grammar, id);
  296. }
  297. }