1
0

sampling.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. #pragma once
  2. #include "llama.h"
  3. #include "grammar-parser.h"
  4. #include <string>
  5. #include <vector>
  6. #include <unordered_map>
  7. // sampling parameters
  8. typedef struct llama_sampling_params {
  9. int32_t n_prev = 64; // number of previous tokens to remember
  10. int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
  11. int32_t top_k = 40; // <= 0 to use vocab size
  12. float top_p = 0.95f; // 1.0 = disabled
  13. float tfs_z = 1.00f; // 1.0 = disabled
  14. float typical_p = 1.00f; // 1.0 = disabled
  15. float temp = 0.80f; // 1.0 = disabled
  16. int32_t penalty_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
  17. float penalty_repeat = 1.10f; // 1.0 = disabled
  18. float penalty_freq = 0.00f; // 0.0 = disabled
  19. float penalty_present = 0.00f; // 0.0 = disabled
  20. int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
  21. float mirostat_tau = 5.00f; // target entropy
  22. float mirostat_eta = 0.10f; // learning rate
  23. bool penalize_nl = true; // consider newlines as a repeatable token
  24. std::string grammar; // optional BNF-like grammar to constrain sampling
  25. // Classifier-Free Guidance
  26. // https://arxiv.org/abs/2306.17806
  27. std::string cfg_negative_prompt; // string to help guidance
  28. float cfg_scale = 1.f; // how strong is guidance
  29. std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
  30. } llama_sampling_params;
  31. // general sampler context
  32. // TODO: move to llama.h
  33. struct llama_sampling_context {
  34. // parameters that will be used for sampling
  35. llama_sampling_params params;
  36. // mirostat sampler state
  37. float mirostat_mu;
  38. llama_grammar * grammar;
  39. // internal
  40. grammar_parser::parse_state parsed_grammar;
  41. // TODO: replace with ring-buffer
  42. std::vector<llama_token> prev;
  43. std::vector<llama_token_data> cur;
  44. };
  45. #include "common.h"
  46. // Create a new sampling context instance.
  47. struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_params & params);
  48. void llama_sampling_free(struct llama_sampling_context * ctx);
  49. // Reset the sampler context
  50. // - clear prev tokens
  51. // - reset grammar
  52. void llama_sampling_reset(llama_sampling_context * ctx);
  53. // Copy the sampler context
  54. void llama_sampling_cp(llama_sampling_context * src, llama_sampling_context * dst);
  55. // Get the last sampled token
  56. llama_token llama_sampling_last(llama_sampling_context * ctx);
  57. // Get a string representation of the last sampled tokens
  58. std::string llama_sampling_prev_str(llama_sampling_context * ctx_sampling, llama_context * ctx_main, int n);
  59. // Print sampling parameters into a string
  60. std::string llama_sampling_print(const llama_sampling_params & params);
  61. // this is a common sampling function used across the examples for convenience
  62. // it can serve as a starting point for implementing your own sampling function
  63. // Note: When using multiple sequences, it is the caller's responsibility to call
  64. // llama_sampling_reset when a sequence ends
  65. //
  66. // required:
  67. // - ctx_main: context to use for sampling
  68. // - ctx_sampling: sampling-specific context
  69. //
  70. // optional:
  71. // - ctx_cfg: context to use for classifier-free guidance
  72. // - idx: sample from llama_get_logits_ith(ctx, idx)
  73. //
  74. // returns:
  75. // - token: sampled token
  76. // - candidates: vector of candidate tokens
  77. //
  78. llama_token llama_sampling_sample(
  79. struct llama_sampling_context * ctx_sampling,
  80. struct llama_context * ctx_main,
  81. struct llama_context * ctx_cfg,
  82. int idx = 0);
  83. void llama_sampling_accept(
  84. struct llama_sampling_context * ctx_sampling,
  85. struct llama_context * ctx_main,
  86. llama_token id,
  87. bool apply_grammar);