llama-context.h 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. #pragma once
  2. #include "llama.h"
  3. #include "llama-cparams.h"
  4. #include "llama-graph.h"
  5. #include "llama-adapter.h"
  6. #include "ggml-cpp.h"
  7. #include "ggml-opt.h"
  8. #include <map>
  9. #include <vector>
  10. struct llama_model;
  11. class llama_batch_allocr;
  12. class llama_io_read_i;
  13. class llama_io_write_i;
  14. // "memory" as in abstract memory for the context
  15. struct llama_memory_i;
  16. struct llama_memory_context_i;
  17. // "memory" as in physical memory for a buffer type, in bytes
  18. struct llama_memory_breakdown_data {
  19. size_t model = 0; // memory allocated for the model
  20. size_t context = 0; // memory allocated for the context
  21. size_t compute = 0; // memory allocated for temporary compute buffers
  22. size_t total() const {
  23. return model + context + compute;
  24. }
  25. };
  26. struct llama_context {
  27. // init scheduler and compute buffers, reserve worst-case graphs
  28. llama_context(
  29. const llama_model & model,
  30. llama_context_params params);
  31. ~llama_context();
  32. void synchronize();
  33. const llama_model & get_model() const;
  34. const llama_cparams & get_cparams() const;
  35. ggml_backend_sched_t get_sched() const;
  36. uint32_t n_ctx() const;
  37. uint32_t n_ctx_seq() const;
  38. uint32_t n_batch() const;
  39. uint32_t n_ubatch() const;
  40. uint32_t n_seq_max() const;
  41. uint32_t n_threads() const;
  42. uint32_t n_threads_batch() const;
  43. llama_memory_t get_memory() const;
  44. // return true if the memory was updated
  45. bool memory_update(bool optimize);
  46. enum llama_pooling_type pooling_type() const;
  47. float * get_logits();
  48. float * get_logits_ith(int32_t i);
  49. float * get_embeddings();
  50. float * get_embeddings_ith(int32_t i);
  51. float * get_embeddings_seq(llama_seq_id seq_id);
  52. void attach_threadpool(
  53. ggml_threadpool_t threadpool,
  54. ggml_threadpool_t threadpool_batch);
  55. void detach_threadpool();
  56. void set_n_threads(int32_t n_threads, int32_t n_threads_batch);
  57. void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data);
  58. void set_embeddings (bool value);
  59. void set_causal_attn(bool value);
  60. void set_warmup(bool value);
  61. void set_adapter_lora(
  62. llama_adapter_lora * adapter,
  63. float scale);
  64. bool rm_adapter_lora(
  65. llama_adapter_lora * adapter);
  66. void clear_adapter_lora();
  67. bool apply_adapter_cvec(
  68. const float * data,
  69. size_t len,
  70. int32_t n_embd,
  71. int32_t il_start,
  72. int32_t il_end);
  73. // process a single ubatch with a specific graph type
  74. // if memory_context is provided, it will be applied first to the context's memory
  75. // ret contains the status of the graph computation
  76. // returns nullptr only if ret != GGML_STATUS_SUCCESS
  77. llm_graph_result * process_ubatch(
  78. const llama_ubatch & ubatch,
  79. llm_graph_type gtype,
  80. llama_memory_context_i * mctx,
  81. ggml_status & ret);
  82. int encode(const llama_batch & batch_inp);
  83. int decode(const llama_batch & batch_inp);
  84. //
  85. // state save/load
  86. //
  87. size_t state_get_size();
  88. size_t state_get_data( uint8_t * dst, size_t size);
  89. size_t state_set_data(const uint8_t * src, size_t size);
  90. size_t state_seq_get_size(llama_seq_id seq_id, llama_state_seq_flags flags);
  91. size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size, llama_state_seq_flags flags);
  92. size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size, llama_state_seq_flags flags);
  93. bool state_load_file(
  94. const char * filepath,
  95. llama_token * tokens_out,
  96. size_t n_token_capacity,
  97. size_t * n_token_count_out);
  98. bool state_save_file(
  99. const char * filepath,
  100. const llama_token * tokens,
  101. size_t n_token_count);
  102. size_t state_seq_load_file(
  103. llama_seq_id seq_id,
  104. const char * filepath,
  105. llama_token * tokens_out,
  106. size_t n_token_capacity,
  107. size_t * n_token_count_out);
  108. size_t state_seq_save_file(
  109. llama_seq_id seq_id,
  110. const char * filepath,
  111. const llama_token * tokens,
  112. size_t n_token_count);
  113. //
  114. // perf
  115. //
  116. llama_perf_context_data perf_get_data() const;
  117. void perf_reset();
  118. std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> memory_breakdown() const;
  119. //
  120. // training
  121. //
  122. void opt_init(struct llama_model * model, struct llama_opt_params lopt_params);
  123. // TODO: more flexible combinations of logical/physical batch size and context size
  124. void opt_epoch(
  125. ggml_opt_dataset_t dataset,
  126. ggml_opt_result_t result_train,
  127. ggml_opt_result_t result_eval,
  128. int64_t idata_split,
  129. ggml_opt_epoch_callback callback_train,
  130. ggml_opt_epoch_callback callback_eval);
  131. void opt_epoch_iter(
  132. ggml_opt_dataset_t dataset,
  133. ggml_opt_result_t result,
  134. const std::vector<llama_token> & tokens,
  135. const std::vector<llama_token> & labels_sparse,
  136. llama_batch & batch,
  137. ggml_opt_epoch_callback callback,
  138. bool train,
  139. int64_t idata_in_loop,
  140. int64_t ndata_in_loop,
  141. int64_t t_loop_start);
  142. private:
  143. //
  144. // output
  145. //
  146. // Make sure enough space is available for outputs.
  147. // Returns max number of outputs for which space was reserved.
  148. uint32_t output_reserve(int32_t n_outputs);
  149. void output_reorder();
  150. //
  151. // graph
  152. //
  153. public:
  154. uint32_t graph_max_nodes(uint32_t n_tokens) const;
  155. // can reuse the llm_graph_result instance of the context (for example to update a memory module)
  156. llm_graph_result * get_gf_res_reserve() const;
  157. // returns the result of ggml_backend_sched_graph_compute_async execution
  158. ggml_status graph_compute(ggml_cgraph * gf, bool batched);
  159. // reserve a graph with a dummy ubatch of the specified size
  160. ggml_cgraph * graph_reserve(
  161. uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only = false, size_t * sizes = nullptr);
  162. private:
  163. llm_graph_params graph_params(
  164. llm_graph_result * res,
  165. const llama_ubatch & ubatch,
  166. const llama_memory_context_i * mctx,
  167. llm_graph_type gtype) const;
  168. llm_graph_cb graph_get_cb() const;
  169. // TODO: read/write lora adapters and cvec
  170. size_t state_write_data(llama_io_write_i & io);
  171. size_t state_read_data (llama_io_read_i & io);
  172. size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
  173. size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
  174. //
  175. // members
  176. //
  177. const llama_model & model;
  178. llama_cparams cparams;
  179. llama_adapter_cvec cvec;
  180. llama_adapter_loras loras;
  181. llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably
  182. std::unique_ptr<llama_memory_i> memory;
  183. // decode output (2-dimensional array: [n_outputs][n_vocab])
  184. size_t logits_size = 0; // capacity (of floats) for logits
  185. float * logits = nullptr;
  186. // embeddings output (2-dimensional array: [n_outputs][n_embd])
  187. // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
  188. size_t embd_size = 0; // capacity (of floats) for embeddings
  189. float * embd = nullptr;
  190. // sequence embeddings output (map of [n_embd] vectors)
  191. // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
  192. std::map<llama_seq_id, std::vector<float>> embd_seq;
  193. // reuse the batch_allocr to avoid unnecessary memory allocations
  194. std::unique_ptr<llama_batch_allocr> balloc;
  195. uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
  196. std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
  197. struct swap_info {
  198. uint32_t i0;
  199. uint32_t i1;
  200. };
  201. std::vector<swap_info> output_swaps;
  202. ggml_backend_sched_ptr sched;
  203. ggml_backend_t backend_cpu = nullptr;
  204. std::vector<ggml_backend_ptr> backends;
  205. // training
  206. ggml_opt_context_t opt_ctx = nullptr;
  207. ggml_threadpool_t threadpool = nullptr;
  208. ggml_threadpool_t threadpool_batch = nullptr;
  209. ggml_abort_callback abort_callback = nullptr;
  210. void * abort_callback_data = nullptr;
  211. std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
  212. // pointers and buffer types used for the compute buffer of each backend
  213. std::vector<ggml_backend_t> backend_ptrs;
  214. std::vector<ggml_backend_buffer_type_t> backend_buft;
  215. std::vector<size_t> backend_buf_exp_size; // expected buffer sizes
  216. llm_graph_result_ptr gf_res_prev;
  217. llm_graph_result_ptr gf_res_reserve;
  218. // host buffer for the model output (logits and embeddings)
  219. ggml_backend_buffer_ptr buf_output;
  220. bool has_evaluated_once = false;
  221. // env: LLAMA_GRAPH_REUSE_DISABLE
  222. bool graph_reuse_disable = false;
  223. // perf
  224. mutable int64_t t_start_us = 0;
  225. mutable int64_t t_load_us = 0;
  226. mutable int64_t t_p_eval_us = 0;
  227. mutable int64_t t_eval_us = 0;
  228. mutable int64_t t_compute_start_us = 0;
  229. mutable int64_t n_queued_tokens = 0;
  230. mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  231. mutable int32_t n_eval = 0; // number of eval calls
  232. mutable int32_t n_reused = 0; // number of times the previous graph was reused
  233. };