sampling.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. #pragma once
  2. #include "llama.h"
  3. #include <string>
  4. #include <vector>
  5. #include <unordered_map>
  6. // sampling parameters
  7. typedef struct llama_sampling_params {
  8. int32_t top_k = 40; // <= 0 to use vocab size
  9. float top_p = 0.95f; // 1.0 = disabled
  10. float tfs_z = 1.00f; // 1.0 = disabled
  11. float typical_p = 1.00f; // 1.0 = disabled
  12. float temp = 0.80f; // 1.0 = disabled
  13. float repeat_penalty = 1.10f; // 1.0 = disabled
  14. int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
  15. float frequency_penalty = 0.00f; // 0.0 = disabled
  16. float presence_penalty = 0.00f; // 0.0 = disabled
  17. int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
  18. float mirostat_tau = 5.00f; // target entropy
  19. float mirostat_eta = 0.10f; // learning rate
  20. bool penalize_nl = true; // consider newlines as a repeatable token
  21. int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
  22. // Classifier-Free Guidance
  23. // https://arxiv.org/abs/2306.17806
  24. std::string cfg_negative_prompt; // string to help guidance
  25. float cfg_scale = 1.f; // How strong is guidance
  26. std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
  27. } llama_sampling_params;
  28. // per-sequence sampler context
  29. typedef struct llama_sampler_sequence_context {
  30. float mirostat_mu; // mirostat sampler state
  31. llama_grammar * grammar;
  32. } llama_sampler_sequence_context;
  33. // general sampler context
  34. typedef struct llama_sampling_context {
  35. ~llama_sampling_context();
  36. // parameters that will be used for sampling and when creating
  37. // new llama_sampler_sequence_context instances
  38. llama_sampling_params params;
  39. // map of sequence ids to sampler contexts
  40. std::unordered_map<llama_seq_id, llama_sampler_sequence_context> sequence_contexts;
  41. // when non-NULL, new instances of llama_sampler_sequence_context
  42. // will get a copy of the grammar here
  43. // note: only the pointer is stored here, it is not a copy of
  44. // the grammar and shouldn't be freed
  45. llama_grammar * grammar;
  46. } llama_sampling_context;
  47. #include "common.h"
  48. // Create a new sampling context instance.
  49. llama_sampling_context llama_sampling_context_init(
  50. const struct gpt_params & params,
  51. llama_grammar * grammar = NULL);
  52. // Fetches the sampler context for the specified sequence id (defaults to 0).
  53. // If the context for that sequence id doesn't already exist, it will be created with
  54. // default values based on the parameters in the ctx_sampling argument.
  55. llama_sampler_sequence_context & llama_sampling_get_sequence_context(
  56. llama_sampling_context & ctx_sampling,
  57. const llama_seq_id seq = 0);
  58. // Reset the sampler context for the supplied sequence id (defaults to 0).
  59. // This is necessary to reuse a sequence id or free memory used by sequences
  60. // that are no longer required.
  61. bool llama_sampling_context_reset(
  62. llama_sampling_context & ctx_sampling,
  63. const llama_seq_id seq = 0);
  64. // this is a common sampling function used across the examples for convenience
  65. // it can serve as a starting point for implementing your own sampling function
  66. // Note: When using multiple sequences, it is the caller's responsibility to call
  67. // llama_sampling_context_reset when a sequence ends
  68. //
  69. // required:
  70. // - ctx: context to use for sampling
  71. // - ctx_sampling: sampling-specific context
  72. //
  73. // optional:
  74. // - ctx_guidance: context to use for classifier-free guidance, ignore if NULL
  75. // - last_tokens: needed for repetition penalty, ignore if empty
  76. // - idx: sample from llama_get_logits_ith(ctx, idx)
  77. // - seq: sequence id to associate sampler state with
  78. //
  79. // returns:
  80. // - token: sampled token
  81. // - candidates: vector of candidate tokens
  82. //
  83. llama_token llama_sampling_sample(
  84. struct llama_context * ctx,
  85. struct llama_context * ctx_guidance,
  86. struct llama_sampling_context & ctx_sampling,
  87. const std::vector<llama_token> & last_tokens,
  88. std::vector<llama_token_data> & candidates,
  89. const int idx = 0,
  90. llama_seq_id seq = 0);