llama-context.h 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. #pragma once
  2. #include "llama.h"
  3. #include "llama-cparams.h"
  4. #include "llama-graph.h"
  5. #include "llama-adapter.h"
  6. #include "ggml-cpp.h"
  7. #include "ggml-opt.h"
  8. #include <map>
  9. #include <vector>
  10. struct llama_model;
  11. class llama_batch_allocr;
  12. class llama_io_read_i;
  13. class llama_io_write_i;
  14. // "memory" as in abstract memory for the context
  15. struct llama_memory_i;
  16. struct llama_memory_context_i;
  17. // "memory" as in physical memory for a buffer type, in bytes
  18. struct llama_memory_breakdown_data {
  19. size_t model = 0; // memory allocated for the model
  20. size_t context = 0; // memory allocated for the context
  21. size_t compute = 0; // memory allocated for temporary compute buffers
  22. };
  23. struct llama_context {
  24. // init scheduler and compute buffers, reserve worst-case graphs
  25. llama_context(
  26. const llama_model & model,
  27. llama_context_params params);
  28. ~llama_context();
  29. void synchronize();
  30. const llama_model & get_model() const;
  31. const llama_cparams & get_cparams() const;
  32. ggml_backend_sched_t get_sched() const;
  33. uint32_t n_ctx() const;
  34. uint32_t n_ctx_seq() const;
  35. uint32_t n_batch() const;
  36. uint32_t n_ubatch() const;
  37. uint32_t n_seq_max() const;
  38. uint32_t n_threads() const;
  39. uint32_t n_threads_batch() const;
  40. llama_memory_t get_memory() const;
  41. // return true if the memory was updated
  42. bool memory_update(bool optimize);
  43. enum llama_pooling_type pooling_type() const;
  44. float * get_logits();
  45. float * get_logits_ith(int32_t i);
  46. float * get_embeddings();
  47. float * get_embeddings_ith(int32_t i);
  48. float * get_embeddings_seq(llama_seq_id seq_id);
  49. void attach_threadpool(
  50. ggml_threadpool_t threadpool,
  51. ggml_threadpool_t threadpool_batch);
  52. void detach_threadpool();
  53. void set_n_threads(int32_t n_threads, int32_t n_threads_batch);
  54. void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data);
  55. void set_embeddings (bool value);
  56. void set_causal_attn(bool value);
  57. void set_warmup(bool value);
  58. void set_adapter_lora(
  59. llama_adapter_lora * adapter,
  60. float scale);
  61. bool rm_adapter_lora(
  62. llama_adapter_lora * adapter);
  63. void clear_adapter_lora();
  64. bool apply_adapter_cvec(
  65. const float * data,
  66. size_t len,
  67. int32_t n_embd,
  68. int32_t il_start,
  69. int32_t il_end);
  70. // process a single ubatch with a specific graph type
  71. // if memory_context is provided, it will be applied first to the context's memory
  72. // ret contains the status of the graph computation
  73. // returns nullptr only if ret != GGML_STATUS_SUCCESS
  74. llm_graph_result * process_ubatch(
  75. const llama_ubatch & ubatch,
  76. llm_graph_type gtype,
  77. llama_memory_context_i * mctx,
  78. ggml_status & ret);
  79. int encode(const llama_batch & batch_inp);
  80. int decode(const llama_batch & batch_inp);
  81. //
  82. // state save/load
  83. //
  84. size_t state_get_size();
  85. size_t state_get_data( uint8_t * dst, size_t size);
  86. size_t state_set_data(const uint8_t * src, size_t size);
  87. size_t state_seq_get_size(llama_seq_id seq_id, llama_state_seq_flags flags);
  88. size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size, llama_state_seq_flags flags);
  89. size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size, llama_state_seq_flags flags);
  90. bool state_load_file(
  91. const char * filepath,
  92. llama_token * tokens_out,
  93. size_t n_token_capacity,
  94. size_t * n_token_count_out);
  95. bool state_save_file(
  96. const char * filepath,
  97. const llama_token * tokens,
  98. size_t n_token_count);
  99. size_t state_seq_load_file(
  100. llama_seq_id seq_id,
  101. const char * filepath,
  102. llama_token * tokens_out,
  103. size_t n_token_capacity,
  104. size_t * n_token_count_out);
  105. size_t state_seq_save_file(
  106. llama_seq_id seq_id,
  107. const char * filepath,
  108. const llama_token * tokens,
  109. size_t n_token_count);
  110. //
  111. // perf
  112. //
  113. llama_perf_context_data perf_get_data() const;
  114. void perf_reset();
  115. std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> memory_breakdown() const;
  116. //
  117. // training
  118. //
  119. void opt_init(struct llama_model * model, struct llama_opt_params lopt_params);
  120. // TODO: more flexible combinations of logical/physical batch size and context size
  121. void opt_epoch(
  122. ggml_opt_dataset_t dataset,
  123. ggml_opt_result_t result_train,
  124. ggml_opt_result_t result_eval,
  125. int64_t idata_split,
  126. ggml_opt_epoch_callback callback_train,
  127. ggml_opt_epoch_callback callback_eval);
  128. void opt_epoch_iter(
  129. ggml_opt_dataset_t dataset,
  130. ggml_opt_result_t result,
  131. const std::vector<llama_token> & tokens,
  132. const std::vector<llama_token> & labels_sparse,
  133. llama_batch & batch,
  134. ggml_opt_epoch_callback callback,
  135. bool train,
  136. int64_t idata_in_loop,
  137. int64_t ndata_in_loop,
  138. int64_t t_loop_start);
  139. private:
  140. //
  141. // output
  142. //
  143. // Make sure enough space is available for outputs.
  144. // Returns max number of outputs for which space was reserved.
  145. uint32_t output_reserve(int32_t n_outputs);
  146. void output_reorder();
  147. //
  148. // graph
  149. //
  150. public:
  151. uint32_t graph_max_nodes() const;
  152. // can reuse the llm_graph_result instance of the context (for example to update a memory module)
  153. llm_graph_result * get_gf_res_reserve() const;
  154. // returns the result of ggml_backend_sched_graph_compute_async execution
  155. ggml_status graph_compute(ggml_cgraph * gf, bool batched);
  156. // reserve a graph with a dummy ubatch of the specified size
  157. ggml_cgraph * graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only = false);
  158. private:
  159. llm_graph_params graph_params(
  160. llm_graph_result * res,
  161. const llama_ubatch & ubatch,
  162. const llama_memory_context_i * mctx,
  163. llm_graph_type gtype) const;
  164. llm_graph_cb graph_get_cb() const;
  165. // TODO: read/write lora adapters and cvec
  166. size_t state_write_data(llama_io_write_i & io);
  167. size_t state_read_data (llama_io_read_i & io);
  168. size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
  169. size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
  170. //
  171. // members
  172. //
  173. const llama_model & model;
  174. llama_cparams cparams;
  175. llama_adapter_cvec cvec;
  176. llama_adapter_loras loras;
  177. llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably
  178. std::unique_ptr<llama_memory_i> memory;
  179. // decode output (2-dimensional array: [n_outputs][n_vocab])
  180. size_t logits_size = 0; // capacity (of floats) for logits
  181. float * logits = nullptr;
  182. // embeddings output (2-dimensional array: [n_outputs][n_embd])
  183. // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
  184. size_t embd_size = 0; // capacity (of floats) for embeddings
  185. float * embd = nullptr;
  186. // sequence embeddings output (map of [n_embd] vectors)
  187. // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
  188. std::map<llama_seq_id, std::vector<float>> embd_seq;
  189. // reuse the batch_allocr to avoid unnecessary memory allocations
  190. std::unique_ptr<llama_batch_allocr> balloc;
  191. uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
  192. std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
  193. struct swap_info {
  194. uint32_t i0;
  195. uint32_t i1;
  196. };
  197. std::vector<swap_info> output_swaps;
  198. ggml_backend_sched_ptr sched;
  199. ggml_backend_t backend_cpu = nullptr;
  200. std::vector<ggml_backend_ptr> backends;
  201. // training
  202. ggml_opt_context_t opt_ctx = nullptr;
  203. ggml_threadpool_t threadpool = nullptr;
  204. ggml_threadpool_t threadpool_batch = nullptr;
  205. ggml_abort_callback abort_callback = nullptr;
  206. void * abort_callback_data = nullptr;
  207. std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
  208. // buffer types used for the compute buffer of each backend
  209. std::vector<ggml_backend_t> backend_ptrs;
  210. std::vector<ggml_backend_buffer_type_t> backend_buft;
  211. llm_graph_result_ptr gf_res_prev;
  212. llm_graph_result_ptr gf_res_reserve;
  213. // host buffer for the model output (logits and embeddings)
  214. ggml_backend_buffer_ptr buf_output;
  215. bool has_evaluated_once = false;
  216. // env: LLAMA_GRAPH_REUSE_DISABLE
  217. bool graph_reuse_disable = false;
  218. // perf
  219. mutable int64_t t_start_us = 0;
  220. mutable int64_t t_load_us = 0;
  221. mutable int64_t t_p_eval_us = 0;
  222. mutable int64_t t_eval_us = 0;
  223. mutable int64_t t_compute_start_us = 0;
  224. mutable int64_t n_queued_tokens = 0;
  225. mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  226. mutable int32_t n_eval = 0; // number of eval calls
  227. mutable int32_t n_reused = 0; // number of times the previous graph was reused
  228. };