llama-context.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. #pragma once
  2. #include "llama.h"
  3. #include "llama-cparams.h"
  4. #include "llama-graph.h"
  5. #include "llama-adapter.h"
  6. #include "ggml-cpp.h"
  7. #include "ggml-opt.h"
  8. #include <map>
  9. #include <vector>
  10. struct llama_model;
  11. class llama_batch_allocr;
  12. class llama_io_read_i;
  13. class llama_io_write_i;
  14. struct llama_memory_i;
  15. struct llama_memory_context_i;
  16. struct llama_context {
  17. // init scheduler and compute buffers, reserve worst-case graphs
  18. llama_context(
  19. const llama_model & model,
  20. llama_context_params params);
  21. ~llama_context();
  22. void synchronize();
  23. const llama_model & get_model() const;
  24. const llama_cparams & get_cparams() const;
  25. ggml_backend_sched_t get_sched() const;
  26. uint32_t n_ctx() const;
  27. uint32_t n_ctx_per_seq() const;
  28. uint32_t n_batch() const;
  29. uint32_t n_ubatch() const;
  30. uint32_t n_seq_max() const;
  31. uint32_t n_threads() const;
  32. uint32_t n_threads_batch() const;
  33. llama_memory_t get_memory() const;
  34. // return true if the memory was updated
  35. bool memory_update(bool optimize);
  36. enum llama_pooling_type pooling_type() const;
  37. float * get_logits();
  38. float * get_logits_ith(int32_t i);
  39. float * get_embeddings();
  40. float * get_embeddings_ith(int32_t i);
  41. float * get_embeddings_seq(llama_seq_id seq_id);
  42. void attach_threadpool(
  43. ggml_threadpool_t threadpool,
  44. ggml_threadpool_t threadpool_batch);
  45. void detach_threadpool();
  46. void set_n_threads(int32_t n_threads, int32_t n_threads_batch);
  47. void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data);
  48. void set_embeddings (bool value);
  49. void set_causal_attn(bool value);
  50. void set_warmup(bool value);
  51. void set_adapter_lora(
  52. llama_adapter_lora * adapter,
  53. float scale);
  54. bool rm_adapter_lora(
  55. llama_adapter_lora * adapter);
  56. void clear_adapter_lora();
  57. bool apply_adapter_cvec(
  58. const float * data,
  59. size_t len,
  60. int32_t n_embd,
  61. int32_t il_start,
  62. int32_t il_end);
  63. // process a single ubatch with a specific graph type
  64. // if memory_context is provided, it will be applied first to the context's memory
  65. // ret contains the status of the graph computation
  66. // returns nullptr only if ret != GGML_STATUS_SUCCESS
  67. llm_graph_result * process_ubatch(
  68. const llama_ubatch & ubatch,
  69. llm_graph_type gtype,
  70. llama_memory_context_i * mctx,
  71. ggml_status & ret);
  72. int encode(const llama_batch & batch_inp);
  73. int decode(const llama_batch & batch_inp);
  74. //
  75. // state save/load
  76. //
  77. size_t state_get_size();
  78. size_t state_get_data( uint8_t * dst, size_t size);
  79. size_t state_set_data(const uint8_t * src, size_t size);
  80. size_t state_seq_get_size(llama_seq_id seq_id, llama_state_seq_flags flags);
  81. size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size, llama_state_seq_flags flags);
  82. size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size, llama_state_seq_flags flags);
  83. bool state_load_file(
  84. const char * filepath,
  85. llama_token * tokens_out,
  86. size_t n_token_capacity,
  87. size_t * n_token_count_out);
  88. bool state_save_file(
  89. const char * filepath,
  90. const llama_token * tokens,
  91. size_t n_token_count);
  92. size_t state_seq_load_file(
  93. llama_seq_id seq_id,
  94. const char * filepath,
  95. llama_token * tokens_out,
  96. size_t n_token_capacity,
  97. size_t * n_token_count_out);
  98. size_t state_seq_save_file(
  99. llama_seq_id seq_id,
  100. const char * filepath,
  101. const llama_token * tokens,
  102. size_t n_token_count);
  103. //
  104. // perf
  105. //
  106. llama_perf_context_data perf_get_data() const;
  107. void perf_reset();
  108. //
  109. // training
  110. //
  111. void opt_init(struct llama_model * model, struct llama_opt_params lopt_params);
  112. // TODO: more flexible combinations of logical/physical batch size and context size
  113. void opt_epoch(
  114. ggml_opt_dataset_t dataset,
  115. ggml_opt_result_t result_train,
  116. ggml_opt_result_t result_eval,
  117. int64_t idata_split,
  118. ggml_opt_epoch_callback callback_train,
  119. ggml_opt_epoch_callback callback_eval);
  120. void opt_epoch_iter(
  121. ggml_opt_dataset_t dataset,
  122. ggml_opt_result_t result,
  123. const std::vector<llama_token> & tokens,
  124. const std::vector<llama_token> & labels_sparse,
  125. llama_batch & batch,
  126. ggml_opt_epoch_callback callback,
  127. bool train,
  128. int64_t idata_in_loop,
  129. int64_t ndata_in_loop,
  130. int64_t t_loop_start);
  131. private:
  132. //
  133. // output
  134. //
  135. // Make sure enough space is available for outputs.
  136. // Returns max number of outputs for which space was reserved.
  137. uint32_t output_reserve(int32_t n_outputs);
  138. void output_reorder();
  139. //
  140. // graph
  141. //
  142. public:
  143. uint32_t graph_max_nodes() const;
  144. // can reuse the llm_graph_result instance of the context (for example to update a memory module)
  145. llm_graph_result * get_gf_res_reserve() const;
  146. // returns the result of ggml_backend_sched_graph_compute_async execution
  147. ggml_status graph_compute(ggml_cgraph * gf, bool batched);
  148. // reserve a graph with a dummy ubatch of the specified size
  149. ggml_cgraph * graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx);
  150. private:
  151. llm_graph_params graph_params(
  152. llm_graph_result * res,
  153. const llama_ubatch & ubatch,
  154. const llama_memory_context_i * mctx,
  155. llm_graph_type gtype) const;
  156. llm_graph_cb graph_get_cb() const;
  157. // TODO: read/write lora adapters and cvec
  158. size_t state_write_data(llama_io_write_i & io);
  159. size_t state_read_data (llama_io_read_i & io);
  160. size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
  161. size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
  162. //
  163. // members
  164. //
  165. const llama_model & model;
  166. llama_cparams cparams;
  167. llama_adapter_cvec cvec;
  168. llama_adapter_loras loras;
  169. llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably
  170. std::unique_ptr<llama_memory_i> memory;
  171. // decode output (2-dimensional array: [n_outputs][n_vocab])
  172. size_t logits_size = 0; // capacity (of floats) for logits
  173. float * logits = nullptr;
  174. // embeddings output (2-dimensional array: [n_outputs][n_embd])
  175. // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
  176. size_t embd_size = 0; // capacity (of floats) for embeddings
  177. float * embd = nullptr;
  178. // sequence embeddings output (map of [n_embd] vectors)
  179. // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
  180. std::map<llama_seq_id, std::vector<float>> embd_seq;
  181. // reuse the batch_allocr to avoid unnecessary memory allocations
  182. std::unique_ptr<llama_batch_allocr> balloc;
  183. uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
  184. std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
  185. struct swap_info {
  186. uint32_t i0;
  187. uint32_t i1;
  188. };
  189. std::vector<swap_info> output_swaps;
  190. ggml_backend_sched_ptr sched;
  191. ggml_backend_t backend_cpu = nullptr;
  192. std::vector<ggml_backend_ptr> backends;
  193. // training
  194. ggml_opt_context_t opt_ctx = nullptr;
  195. ggml_threadpool_t threadpool = nullptr;
  196. ggml_threadpool_t threadpool_batch = nullptr;
  197. ggml_abort_callback abort_callback = nullptr;
  198. void * abort_callback_data = nullptr;
  199. std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
  200. // buffer types used for the compute buffer of each backend
  201. std::vector<ggml_backend_t> backend_ptrs;
  202. std::vector<ggml_backend_buffer_type_t> backend_buft;
  203. llm_graph_result_ptr gf_res_prev;
  204. llm_graph_result_ptr gf_res_reserve;
  205. // host buffer for the model output (logits and embeddings)
  206. ggml_backend_buffer_ptr buf_output;
  207. bool has_evaluated_once = false;
  208. // env: LLAMA_SET_ROWS (temporary)
  209. // ref: https://github.com/ggml-org/llama.cpp/pull/14285
  210. bool supports_set_rows = true;
  211. // env: LLAMA_GRAPH_REUSE_DISABLE
  212. bool graph_reuse_disable = false;
  213. // perf
  214. mutable int64_t t_start_us = 0;
  215. mutable int64_t t_load_us = 0;
  216. mutable int64_t t_p_eval_us = 0;
  217. mutable int64_t t_eval_us = 0;
  218. mutable int64_t t_compute_start_us = 0;
  219. mutable int64_t n_queued_tokens = 0;
  220. mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  221. mutable int32_t n_eval = 0; // number of eval calls
  222. mutable int32_t n_reused = 0; // number of times the previous graph was reused
  223. };