llama-context.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. #pragma once
  2. #include "llama.h"
  3. #include "llama-batch.h"
  4. #include "llama-cparams.h"
  5. #include "llama-graph.h"
  6. #include "llama-adapter.h"
  7. #include "ggml-cpp.h"
  8. #include <map>
  9. #include <vector>
  10. struct llama_model;
  11. struct llama_kv_cache;
  12. class llama_io_read_i;
  13. class llama_io_write_i;
  14. struct llama_context {
  15. // init scheduler and compute buffers, reserve worst-case graphs
  16. llama_context(
  17. const llama_model & model,
  18. llama_context_params params);
  19. ~llama_context();
  20. void synchronize();
  21. const llama_model & get_model() const;
  22. const llama_cparams & get_cparams() const;
  23. ggml_backend_sched_t get_sched() const;
  24. ggml_context * get_ctx_compute() const;
  25. uint32_t n_ctx() const;
  26. uint32_t n_ctx_per_seq() const;
  27. uint32_t n_batch() const;
  28. uint32_t n_ubatch() const;
  29. uint32_t n_seq_max() const;
  30. uint32_t n_threads() const;
  31. uint32_t n_threads_batch() const;
  32. llama_kv_cache * get_kv_self();
  33. const llama_kv_cache * get_kv_self() const;
  34. void kv_self_update();
  35. enum llama_pooling_type pooling_type() const;
  36. float * get_logits();
  37. float * get_logits_ith(int32_t i);
  38. float * get_embeddings();
  39. float * get_embeddings_ith(int32_t i);
  40. float * get_embeddings_seq(llama_seq_id seq_id);
  41. void attach_threadpool(
  42. ggml_threadpool_t threadpool,
  43. ggml_threadpool_t threadpool_batch);
  44. void detach_threadpool();
  45. void set_n_threads(int32_t n_threads, int32_t n_threads_batch);
  46. void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data);
  47. void set_embeddings (bool value);
  48. void set_causal_attn(bool value);
  49. void set_warmup(bool value);
  50. void set_adapter_lora(
  51. llama_adapter_lora * adapter,
  52. float scale);
  53. bool rm_adapter_lora(
  54. llama_adapter_lora * adapter);
  55. void clear_adapter_lora();
  56. bool apply_adapter_cvec(
  57. const float * data,
  58. size_t len,
  59. int32_t n_embd,
  60. int32_t il_start,
  61. int32_t il_end);
  62. int encode(llama_batch & inp_batch);
  63. int decode(llama_batch & inp_batch);
  64. //
  65. // state save/load
  66. //
  67. size_t state_get_size();
  68. size_t state_get_data( uint8_t * dst, size_t size);
  69. size_t state_set_data(const uint8_t * src, size_t size);
  70. size_t state_seq_get_size(llama_seq_id seq_id);
  71. size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size);
  72. size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size);
  73. bool state_load_file(
  74. const char * filepath,
  75. llama_token * tokens_out,
  76. size_t n_token_capacity,
  77. size_t * n_token_count_out);
  78. bool state_save_file(
  79. const char * filepath,
  80. const llama_token * tokens,
  81. size_t n_token_count);
  82. size_t state_seq_load_file(
  83. llama_seq_id seq_id,
  84. const char * filepath,
  85. llama_token * tokens_out,
  86. size_t n_token_capacity,
  87. size_t * n_token_count_out);
  88. size_t state_seq_save_file(
  89. llama_seq_id seq_id,
  90. const char * filepath,
  91. const llama_token * tokens,
  92. size_t n_token_count);
  93. //
  94. // perf
  95. //
  96. llama_perf_context_data perf_get_data() const;
  97. void perf_reset();
  98. private:
  99. //
  100. // output
  101. //
  102. // Make sure enough space is available for outputs.
  103. // Returns max number of outputs for which space was reserved.
  104. int32_t output_reserve(int32_t n_outputs);
  105. //
  106. // graph
  107. //
  108. public:
  109. int32_t graph_max_nodes() const;
  110. // zero-out inputs and create the ctx_compute for the compute graph
  111. ggml_cgraph * graph_init();
  112. // returns the result of ggml_backend_sched_graph_compute_async execution
  113. ggml_status graph_compute(
  114. ggml_cgraph * gf,
  115. bool batched);
  116. private:
  117. llm_graph_result_ptr graph_build(
  118. ggml_context * ctx,
  119. ggml_cgraph * gf,
  120. const llama_ubatch & ubatch,
  121. llm_graph_type gtype);
  122. llm_graph_cb graph_get_cb() const;
  123. // TODO: read/write lora adapters and cvec
  124. size_t state_write_data(llama_io_write_i & io);
  125. size_t state_read_data (llama_io_read_i & io);
  126. size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id);
  127. size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id);
  128. //
  129. // members
  130. //
  131. const llama_model & model;
  132. llama_cparams cparams;
  133. llama_adapter_cvec cvec;
  134. llama_adapter_loras loras;
  135. llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably
  136. std::unique_ptr<llama_memory_i> memory;
  137. // TODO: remove
  138. bool logits_all = false;
  139. // decode output (2-dimensional array: [n_outputs][n_vocab])
  140. size_t logits_size = 0; // capacity (of floats) for logits
  141. float * logits = nullptr;
  142. // embeddings output (2-dimensional array: [n_outputs][n_embd])
  143. // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
  144. size_t embd_size = 0; // capacity (of floats) for embeddings
  145. float * embd = nullptr;
  146. // sequence embeddings output (map of [n_embd] vectors)
  147. // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
  148. std::map<llama_seq_id, std::vector<float>> embd_seq;
  149. int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
  150. int32_t n_outputs_max = 0; // capacity (of tokens positions) for the output buffers
  151. std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
  152. ggml_backend_sched_ptr sched;
  153. ggml_backend_t backend_cpu = nullptr;
  154. std::vector<ggml_backend_ptr> backends;
  155. ggml_context_ptr ctx_compute;
  156. ggml_threadpool_t threadpool = nullptr;
  157. ggml_threadpool_t threadpool_batch = nullptr;
  158. ggml_abort_callback abort_callback = nullptr;
  159. void * abort_callback_data = nullptr;
  160. std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
  161. // buffer types used for the compute buffer of each backend
  162. std::vector<ggml_backend_t> backend_ptrs;
  163. std::vector<ggml_backend_buffer_type_t> backend_buft;
  164. // memory buffers used to evaluate the model
  165. std::vector<uint8_t> buf_compute_meta;
  166. // host buffer for the model output (logits and embeddings)
  167. ggml_backend_buffer_ptr buf_output;
  168. bool has_evaluated_once = false;
  169. // perf
  170. mutable int64_t t_start_us = 0;
  171. mutable int64_t t_load_us = 0;
  172. mutable int64_t t_p_eval_us = 0;
  173. mutable int64_t t_eval_us = 0;
  174. mutable int64_t t_compute_start_us = 0;
  175. mutable int64_t n_queued_tokens = 0;
  176. mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  177. mutable int32_t n_eval = 0; // number of eval calls
  178. };