llama-context.cpp 101 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960
  1. #include "llama-context.h"
  2. #include "llama-impl.h"
  3. #include "llama-batch.h"
  4. #include "llama-io.h"
  5. #include "llama-memory.h"
  6. #include "llama-mmap.h"
  7. #include "llama-model.h"
  8. #include <cinttypes>
  9. #include <cstring>
  10. #include <limits>
  11. #include <stdexcept>
  12. //
  13. // llama_context
  14. //
  15. llama_context::llama_context(
  16. const llama_model & model,
  17. llama_context_params params) :
  18. model(model),
  19. balloc(std::make_unique<llama_batch_allocr>(model.hparams.n_pos_per_embd())) {
  20. LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__);
  21. t_start_us = model.t_start_us;
  22. t_load_us = model.t_load_us;
  23. const auto & hparams = model.hparams;
  24. cparams.n_seq_max = std::max(1u, params.n_seq_max);
  25. if (cparams.n_seq_max > LLAMA_MAX_SEQ) {
  26. throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_SEQ));
  27. }
  28. cparams.n_threads = params.n_threads;
  29. cparams.n_threads_batch = params.n_threads_batch;
  30. cparams.yarn_ext_factor = params.yarn_ext_factor >= 0.0f ? params.yarn_ext_factor : hparams.yarn_ext_factor;
  31. cparams.yarn_attn_factor = params.yarn_attn_factor >= 0.0f ? params.yarn_attn_factor : hparams.yarn_attn_factor;
  32. cparams.yarn_beta_fast = params.yarn_beta_fast >= 0.0f ? params.yarn_beta_fast : hparams.yarn_beta_fast;
  33. cparams.yarn_beta_slow = params.yarn_beta_slow >= 0.0f ? params.yarn_beta_slow : hparams.yarn_beta_slow;
  34. cparams.embeddings = params.embeddings;
  35. cparams.offload_kqv = params.offload_kqv;
  36. cparams.no_perf = params.no_perf;
  37. cparams.pooling_type = params.pooling_type;
  38. cparams.warmup = false;
  39. cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
  40. cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
  41. cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
  42. cparams.n_ctx_orig_yarn = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
  43. hparams.n_ctx_orig_yarn != 0 ? hparams.n_ctx_orig_yarn :
  44. hparams.n_ctx_train;
  45. cparams.cb_eval = params.cb_eval;
  46. cparams.cb_eval_user_data = params.cb_eval_user_data;
  47. auto rope_scaling_type = params.rope_scaling_type;
  48. if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) {
  49. rope_scaling_type = hparams.rope_scaling_type_train;
  50. }
  51. if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) {
  52. cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
  53. }
  54. if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
  55. cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f;
  56. }
  57. cparams.yarn_attn_factor *= hparams.rope_attn_factor;
  58. if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
  59. if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
  60. cparams.pooling_type = LLAMA_POOLING_TYPE_NONE;
  61. } else {
  62. cparams.pooling_type = hparams.pooling_type;
  63. }
  64. }
  65. if (params.attention_type == LLAMA_ATTENTION_TYPE_UNSPECIFIED) {
  66. cparams.causal_attn = hparams.causal_attn;
  67. } else {
  68. cparams.causal_attn = params.attention_type == LLAMA_ATTENTION_TYPE_CAUSAL;
  69. }
  70. cparams.flash_attn = params.flash_attn_type != LLAMA_FLASH_ATTN_TYPE_DISABLED;
  71. // with causal attention, the batch size is limited by the context size
  72. cparams.n_batch = cparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
  73. // the batch has to be at least GGML_KQ_MASK_PAD because we will be padding the KQ_mask
  74. // this is required by GPU kernels in order to avoid out-of-bounds accesses (e.g. ggml_flash_attn_ext)
  75. // ref: https://github.com/ggerganov/llama.cpp/pull/5021
  76. // TODO: this padding is not needed for the cache-less context so we should probably move it to llama_memory
  77. if (cparams.n_batch < GGML_KQ_MASK_PAD) {
  78. LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD);
  79. cparams.n_batch = GGML_KQ_MASK_PAD;
  80. }
  81. cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
  82. cparams.op_offload = params.op_offload;
  83. cparams.kv_unified = params.kv_unified;
  84. {
  85. const char * LLAMA_GRAPH_REUSE_DISABLE = getenv("LLAMA_GRAPH_REUSE_DISABLE");
  86. graph_reuse_disable = LLAMA_GRAPH_REUSE_DISABLE ? (atoi(LLAMA_GRAPH_REUSE_DISABLE) != 0) : graph_reuse_disable;
  87. if (graph_reuse_disable) {
  88. LLAMA_LOG_WARN("%s: graph reuse disabled\n", __func__);
  89. }
  90. }
  91. const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max;
  92. LLAMA_LOG_INFO("%s: n_seq_max = %u\n", __func__, cparams.n_seq_max);
  93. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
  94. LLAMA_LOG_INFO("%s: n_ctx_per_seq = %u\n", __func__, n_ctx_per_seq);
  95. LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch);
  96. LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch);
  97. LLAMA_LOG_INFO("%s: causal_attn = %d\n", __func__, cparams.causal_attn);
  98. LLAMA_LOG_INFO("%s: flash_attn = %s\n", __func__, llama_flash_attn_type_name(params.flash_attn_type));
  99. LLAMA_LOG_INFO("%s: kv_unified = %s\n", __func__, cparams.kv_unified ? "true" : "false");
  100. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
  101. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
  102. if (n_ctx_per_seq < hparams.n_ctx_train) {
  103. LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) < n_ctx_train (%u) -- the full capacity of the model will not be utilized\n",
  104. __func__, n_ctx_per_seq, hparams.n_ctx_train);
  105. }
  106. if (n_ctx_per_seq > hparams.n_ctx_train) {
  107. LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) > n_ctx_train (%u) -- possible training context overflow\n",
  108. __func__, n_ctx_per_seq, hparams.n_ctx_train);
  109. }
  110. if (!hparams.vocab_only) {
  111. // GPU backends
  112. for (auto * dev : model.devices) {
  113. ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
  114. if (backend == nullptr) {
  115. throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev)));
  116. }
  117. backends.emplace_back(backend);
  118. }
  119. // add ACCEL backends (such as BLAS)
  120. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  121. ggml_backend_dev_t dev = ggml_backend_dev_get(i);
  122. if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
  123. ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
  124. if (backend == nullptr) {
  125. throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev)));
  126. }
  127. backends.emplace_back(backend);
  128. }
  129. }
  130. // add CPU backend
  131. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  132. if (backend_cpu == nullptr) {
  133. throw std::runtime_error("failed to initialize CPU backend");
  134. }
  135. backends.emplace_back(backend_cpu);
  136. // create a list of the set_n_threads functions in the backends
  137. for (auto & backend : backends) {
  138. ggml_backend_dev_t dev = ggml_backend_get_device(backend.get());
  139. ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
  140. if (reg) {
  141. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  142. if (ggml_backend_set_n_threads_fn) {
  143. set_n_threads_fns.emplace_back(backend.get(), ggml_backend_set_n_threads_fn);
  144. }
  145. }
  146. }
  147. llama_set_abort_callback(this, params.abort_callback, params.abort_callback_data);
  148. // graph outputs buffer
  149. {
  150. // resized during inference when a batch uses more outputs
  151. if (output_reserve(params.n_seq_max) < params.n_seq_max) {
  152. throw std::runtime_error("failed to reserve initial output buffer");
  153. }
  154. LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__,
  155. ggml_backend_buffer_name (buf_output.get()),
  156. ggml_backend_buffer_get_size(buf_output.get()) / 1024.0 / 1024.0);
  157. }
  158. }
  159. // init the memory module
  160. if (!hparams.vocab_only) {
  161. llama_memory_params params_mem = {
  162. /*.type_k =*/ params.type_k,
  163. /*.type_v =*/ params.type_v,
  164. /*.swa_full =*/ params.swa_full,
  165. };
  166. memory.reset(model.create_memory(params_mem, cparams));
  167. }
  168. // init backends
  169. if (!hparams.vocab_only) {
  170. LLAMA_LOG_DEBUG("%s: enumerating backends\n", __func__);
  171. backend_buft.clear();
  172. backend_ptrs.clear();
  173. for (auto & backend : backends) {
  174. auto * buft = ggml_backend_get_default_buffer_type(backend.get());
  175. auto backend_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get()));
  176. if (backend_type == GGML_BACKEND_DEVICE_TYPE_CPU && !model.devices.empty()) {
  177. // use the host buffer of the first device CPU for faster transfer of the intermediate state
  178. auto * dev = model.devices[0];
  179. auto * host_buft = ggml_backend_dev_host_buffer_type(dev);
  180. if (host_buft) {
  181. buft = host_buft;
  182. }
  183. }
  184. backend_buft.push_back(buft);
  185. backend_ptrs.push_back(backend.get());
  186. }
  187. LLAMA_LOG_DEBUG("%s: backend_ptrs.size() = %zu\n", __func__, backend_ptrs.size());
  188. const size_t max_nodes = this->graph_max_nodes();
  189. LLAMA_LOG_DEBUG("%s: max_nodes = %zu\n", __func__, max_nodes);
  190. gf_res_prev.reset(new llm_graph_result(max_nodes));
  191. gf_res_reserve.reset(new llm_graph_result(max_nodes));
  192. // TODO: move these checks to ggml_backend_sched
  193. // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary
  194. bool pipeline_parallel =
  195. model.n_devices() > 1 &&
  196. model.params.n_gpu_layers > (int) model.hparams.n_layer &&
  197. model.params.split_mode == LLAMA_SPLIT_MODE_LAYER &&
  198. cparams.offload_kqv &&
  199. !model.has_tensor_overrides();
  200. // pipeline parallelism requires support for async compute and events in all devices
  201. if (pipeline_parallel) {
  202. for (auto & backend : backends) {
  203. auto dev_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get()));
  204. if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU) {
  205. // ignore CPU backend
  206. continue;
  207. }
  208. auto * dev = ggml_backend_get_device(backend.get());
  209. ggml_backend_dev_props props;
  210. ggml_backend_dev_get_props(dev, &props);
  211. if (!props.caps.async || !props.caps.events) {
  212. // device does not support async compute or events
  213. pipeline_parallel = false;
  214. break;
  215. }
  216. }
  217. }
  218. sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, pipeline_parallel, cparams.op_offload));
  219. if (pipeline_parallel) {
  220. LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(sched.get()));
  221. }
  222. llama_memory_context_ptr mctx;
  223. if (memory) {
  224. LLAMA_LOG_DEBUG("%s: reserving full memory module\n", __func__);
  225. mctx = memory->init_full();
  226. if (!mctx) {
  227. throw std::runtime_error("failed to initialize memory module");
  228. }
  229. }
  230. cross.v_embd.clear();
  231. const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max;
  232. const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
  233. // avoid reserving graphs with zero outputs - assume one output per sequence
  234. n_outputs = n_seqs;
  235. LLAMA_LOG_DEBUG("%s: worst-case: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs);
  236. // resolve automatic Flash Attention use
  237. if (params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO) {
  238. auto * gf = graph_reserve(1, n_seqs, n_outputs, mctx.get(), true);
  239. if (!gf) {
  240. throw std::runtime_error("failed to split graph for Flash Attention check");
  241. }
  242. const size_t prefix_len = strlen(LLAMA_TENSOR_NAME_FATTN) + 1;
  243. bool fa_device_mismatch = false;
  244. for (int i = 0; i < ggml_graph_n_nodes(gf); i++) {
  245. ggml_tensor * n = ggml_graph_node(gf, i);
  246. if (n->op != GGML_OP_FLASH_ATTN_EXT) {
  247. continue;
  248. }
  249. ggml_backend_dev_t device_fa = ggml_backend_get_device(
  250. ggml_backend_sched_get_tensor_backend(sched.get(), n));
  251. // TODO: instead of the tensor names, use a map to keep track of which (FA) tensors belong to which layer
  252. GGML_ASSERT(strncmp(n->name, LLAMA_TENSOR_NAME_FATTN "-", prefix_len) == 0);
  253. const int il = std::stoi(n->name + prefix_len);
  254. ggml_backend_dev_t device_kv = model.dev_layer(il);
  255. if (device_fa != device_kv) {
  256. LLAMA_LOG_WARN("%s: layer %d is assigned to device %s but the Flash Attention tensor "
  257. "is assigned to device %s (usually due to missing support)\n",
  258. __func__, il, ggml_backend_dev_name(device_kv), ggml_backend_dev_name(device_fa));
  259. // FIXME: fa_device_mismatch logic is wrong for --no-kv-offload, but this is broken anyways
  260. fa_device_mismatch = true;
  261. break;
  262. }
  263. }
  264. if (fa_device_mismatch) {
  265. cparams.flash_attn = false;
  266. LLAMA_LOG_WARN("%s: Flash Attention was auto, set to disabled\n", __func__);
  267. if (ggml_is_quantized(params.type_v)) {
  268. throw std::runtime_error("quantized V cache was requested, but this requires Flash Attention");
  269. }
  270. } else {
  271. cparams.flash_attn = true;
  272. LLAMA_LOG_INFO("%s: Flash Attention was auto, set to enabled\n", __func__);
  273. }
  274. }
  275. // reserve worst-case graph
  276. int n_splits_pp = -1;
  277. int n_nodes_pp = -1;
  278. int n_splits_tg = -1;
  279. int n_nodes_tg = -1;
  280. // reserve pp (prompt processing) graph first so that buffers are only allocated once
  281. {
  282. auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get());
  283. if (!gf) {
  284. if (pipeline_parallel) {
  285. LLAMA_LOG_WARN("%s: compute buffer allocation failed, retrying without pipeline parallelism\n", __func__);
  286. sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, false, cparams.op_offload));
  287. gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get());
  288. }
  289. if (!gf) {
  290. throw std::runtime_error("failed to allocate compute pp buffers");
  291. }
  292. }
  293. n_splits_pp = ggml_backend_sched_get_n_splits(sched.get());
  294. n_nodes_pp = ggml_graph_n_nodes(gf);
  295. }
  296. // reserve with tg (token generation) graph to get the number of splits and nodes
  297. {
  298. auto * gf = graph_reserve(n_seqs, n_seqs, n_seqs, mctx.get());
  299. if (!gf) {
  300. throw std::runtime_error("failed to allocate compute tg buffers");
  301. }
  302. n_splits_tg = ggml_backend_sched_get_n_splits(sched.get());
  303. n_nodes_tg = ggml_graph_n_nodes(gf);
  304. }
  305. // reserve again with pp graph to avoid ggml-alloc reallocations during inference
  306. {
  307. // TODO: not sure if the following graph would be worster case for multi-stream KV caches:
  308. //
  309. // auto * gf = graph_reserve(n_tokens, 1, n_tokens, mctx.get());
  310. //
  311. auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get());
  312. if (!gf) {
  313. throw std::runtime_error("failed to allocate compute pp buffers");
  314. }
  315. }
  316. for (size_t i = 0; i < backend_ptrs.size(); ++i) {
  317. ggml_backend_t backend = backend_ptrs[i];
  318. ggml_backend_buffer_type_t buft = backend_buft[i];
  319. size_t size = ggml_backend_sched_get_buffer_size(sched.get(), backend);
  320. if (size > 1) {
  321. LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  322. ggml_backend_buft_name(buft),
  323. size / 1024.0 / 1024.0);
  324. }
  325. }
  326. if (n_nodes_pp == n_nodes_tg) {
  327. LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, n_nodes_pp);
  328. } else {
  329. LLAMA_LOG_INFO("%s: graph nodes = %d (with bs=%d), %d (with bs=1)\n", __func__, n_nodes_pp, n_tokens, n_nodes_tg);
  330. }
  331. if (n_splits_pp == n_splits_tg) {
  332. LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits_pp);
  333. } else {
  334. LLAMA_LOG_INFO("%s: graph splits = %d (with bs=%d), %d (with bs=1)\n", __func__, n_splits_pp, n_tokens, n_splits_tg);
  335. }
  336. }
  337. }
  338. llama_context::~llama_context() {
  339. ggml_opt_free(opt_ctx);
  340. }
  341. void llama_context::synchronize() {
  342. ggml_backend_sched_synchronize(sched.get());
  343. // FIXME: if multiple single tokens are evaluated without a synchronization,
  344. // the stats will be added to the prompt evaluation stats
  345. // this should only happen when using batch size 1 to evaluate a batch
  346. // add the evaluation to the stats
  347. if (n_queued_tokens == 1) {
  348. if (!cparams.no_perf) {
  349. t_eval_us += ggml_time_us() - t_compute_start_us;
  350. }
  351. n_eval++;
  352. } else if (n_queued_tokens > 1) {
  353. if (!cparams.no_perf) {
  354. t_p_eval_us += ggml_time_us() - t_compute_start_us;
  355. }
  356. n_p_eval += n_queued_tokens;
  357. }
  358. // get a more accurate load time, upon first eval
  359. if (n_queued_tokens > 0 && !has_evaluated_once) {
  360. t_load_us = ggml_time_us() - t_start_us;
  361. has_evaluated_once = true;
  362. }
  363. n_queued_tokens = 0;
  364. t_compute_start_us = 0;
  365. }
  366. const llama_model & llama_context::get_model() const {
  367. return model;
  368. }
  369. const llama_cparams & llama_context::get_cparams() const {
  370. return cparams;
  371. }
  372. ggml_backend_sched_t llama_context::get_sched() const {
  373. return sched.get();
  374. }
  375. uint32_t llama_context::n_ctx() const {
  376. return cparams.n_ctx;
  377. }
  378. uint32_t llama_context::n_ctx_per_seq() const {
  379. return cparams.n_ctx / cparams.n_seq_max;
  380. }
  381. uint32_t llama_context::n_batch() const {
  382. return cparams.n_batch;
  383. }
  384. uint32_t llama_context::n_ubatch() const {
  385. return cparams.n_ubatch;
  386. }
  387. uint32_t llama_context::n_seq_max() const {
  388. return cparams.n_seq_max;
  389. }
  390. uint32_t llama_context::n_threads() const {
  391. return cparams.n_threads;
  392. }
  393. uint32_t llama_context::n_threads_batch() const {
  394. return cparams.n_threads_batch;
  395. }
  396. llama_memory_t llama_context::get_memory() const {
  397. return memory.get();
  398. }
  399. bool llama_context::memory_update(bool optimize) {
  400. if (!memory) {
  401. return false;
  402. }
  403. {
  404. const auto mctx = memory->init_update(this, optimize);
  405. switch (mctx->get_status()) {
  406. case LLAMA_MEMORY_STATUS_SUCCESS:
  407. {
  408. // noop
  409. } break;
  410. case LLAMA_MEMORY_STATUS_NO_UPDATE:
  411. {
  412. // no updates need to be performed
  413. return false;
  414. }
  415. case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
  416. case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
  417. {
  418. LLAMA_LOG_ERROR("%s: failed to prepare memory update\n", __func__);
  419. return false;
  420. }
  421. }
  422. // reset the previous graph result to make sure that it won't be reused
  423. // TODO: change the mctx->apply() to return information if a graph reserve is needed
  424. // reset the graph result only if the memory module did reset the scheduler
  425. gf_res_prev->reset();
  426. if (!mctx->apply()) {
  427. LLAMA_LOG_ERROR("%s: failed to apply memory update\n", __func__);
  428. }
  429. }
  430. // if the memory module did any computation, we have to reserve a new worst-case graph
  431. {
  432. const auto mctx = memory->init_full();
  433. if (!mctx) {
  434. throw std::runtime_error("failed to initialize memory context");
  435. }
  436. const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max;
  437. const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
  438. auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get());
  439. if (!gf) {
  440. LLAMA_LOG_ERROR("%s: failed to reserve graph after the memory update\n", __func__);
  441. }
  442. }
  443. return true;
  444. }
  445. enum llama_pooling_type llama_context::pooling_type() const {
  446. return cparams.pooling_type;
  447. }
  448. float * llama_context::get_logits() {
  449. output_reorder();
  450. return logits;
  451. }
  452. float * llama_context::get_logits_ith(int32_t i) {
  453. int64_t j = -1;
  454. output_reorder();
  455. try {
  456. if (logits == nullptr) {
  457. throw std::runtime_error("no logits");
  458. }
  459. if (i < 0) {
  460. j = n_outputs + i;
  461. if (j < 0) {
  462. throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs));
  463. }
  464. } else if ((size_t) i >= output_ids.size()) {
  465. throw std::runtime_error(format("out of range [0, %zu)", output_ids.size()));
  466. } else {
  467. j = output_ids[i];
  468. }
  469. if (j < 0) {
  470. throw std::runtime_error(format("batch.logits[%d] != true", i));
  471. }
  472. if (j >= n_outputs) {
  473. // This should not happen
  474. throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs));
  475. }
  476. return logits + j*model.vocab.n_tokens();
  477. } catch (const std::exception & err) {
  478. LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what());
  479. #ifndef NDEBUG
  480. GGML_ABORT("fatal error");
  481. #else
  482. return nullptr;
  483. #endif
  484. }
  485. }
  486. float * llama_context::get_embeddings() {
  487. output_reorder();
  488. return embd;
  489. }
  490. float * llama_context::get_embeddings_ith(int32_t i) {
  491. int64_t j = -1;
  492. output_reorder();
  493. try {
  494. if (embd == nullptr) {
  495. throw std::runtime_error("no embeddings");
  496. }
  497. if (i < 0) {
  498. j = n_outputs + i;
  499. if (j < 0) {
  500. throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs));
  501. }
  502. } else if ((size_t) i >= output_ids.size()) {
  503. throw std::runtime_error(format("out of range [0, %zu)", output_ids.size()));
  504. } else {
  505. j = output_ids[i];
  506. }
  507. if (j < 0) {
  508. throw std::runtime_error(format("batch.logits[%d] != true", i));
  509. }
  510. if (j >= n_outputs) {
  511. // This should not happen
  512. throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs));
  513. }
  514. return embd + j*model.hparams.n_embd;
  515. } catch (const std::exception & err) {
  516. LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what());
  517. #ifndef NDEBUG
  518. GGML_ABORT("fatal error");
  519. #else
  520. return nullptr;
  521. #endif
  522. }
  523. }
  524. float * llama_context::get_embeddings_seq(llama_seq_id seq_id) {
  525. auto it = embd_seq.find(seq_id);
  526. if (it == embd_seq.end()) {
  527. return nullptr;
  528. }
  529. return it->second.data();
  530. }
  531. void llama_context::attach_threadpool(
  532. ggml_threadpool_t threadpool,
  533. ggml_threadpool_t threadpool_batch) {
  534. LLAMA_LOG_DEBUG("%s: call\n", __func__);
  535. this->threadpool = threadpool;
  536. this->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool;
  537. }
  538. void llama_context::detach_threadpool() {
  539. LLAMA_LOG_DEBUG("%s: call\n", __func__);
  540. this->threadpool = nullptr;
  541. this->threadpool_batch = nullptr;
  542. }
  543. void llama_context::set_n_threads(int32_t n_threads, int32_t n_threads_batch) {
  544. LLAMA_LOG_DEBUG("%s: n_threads = %d, n_threads_batch = %d\n", __func__, n_threads, n_threads_batch);
  545. cparams.n_threads = n_threads;
  546. cparams.n_threads_batch = n_threads_batch;
  547. }
  548. void llama_context::set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data) {
  549. LLAMA_LOG_DEBUG("%s: call\n", __func__);
  550. this->abort_callback = abort_callback;
  551. this->abort_callback_data = abort_callback_data;
  552. for (auto & backend : backends) {
  553. auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get()));
  554. auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback");
  555. if (set_abort_callback_fn) {
  556. set_abort_callback_fn(backend.get(), this->abort_callback, this->abort_callback_data);
  557. }
  558. }
  559. }
  560. void llama_context::set_embeddings(bool value) {
  561. LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
  562. cparams.embeddings = value;
  563. }
  564. void llama_context::set_causal_attn(bool value) {
  565. LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
  566. cparams.causal_attn = value;
  567. }
  568. void llama_context::set_warmup(bool value) {
  569. LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
  570. cparams.warmup = value;
  571. }
  572. void llama_context::set_adapter_lora(
  573. llama_adapter_lora * adapter,
  574. float scale) {
  575. LLAMA_LOG_DEBUG("%s: adapter = %p, scale = %f\n", __func__, (void *) adapter, scale);
  576. loras[adapter] = scale;
  577. }
  578. bool llama_context::rm_adapter_lora(
  579. llama_adapter_lora * adapter) {
  580. LLAMA_LOG_DEBUG("%s: adapter = %p\n", __func__, (void *) adapter);
  581. auto pos = loras.find(adapter);
  582. if (pos != loras.end()) {
  583. loras.erase(pos);
  584. return true;
  585. }
  586. return false;
  587. }
  588. void llama_context::clear_adapter_lora() {
  589. LLAMA_LOG_DEBUG("%s: call\n", __func__);
  590. loras.clear();
  591. }
  592. bool llama_context::apply_adapter_cvec(
  593. const float * data,
  594. size_t len,
  595. int32_t n_embd,
  596. int32_t il_start,
  597. int32_t il_end) {
  598. LLAMA_LOG_DEBUG("%s: il_start = %d, il_end = %d\n", __func__, il_start, il_end);
  599. return cvec.apply(model, data, len, n_embd, il_start, il_end);
  600. }
  601. llm_graph_result * llama_context::process_ubatch(const llama_ubatch & ubatch, llm_graph_type gtype, llama_memory_context_i * mctx, ggml_status & ret) {
  602. if (mctx && !mctx->apply()) {
  603. LLAMA_LOG_ERROR("%s: failed to apply memory context\n", __func__);
  604. ret = GGML_STATUS_FAILED;
  605. return nullptr;
  606. }
  607. auto * res = gf_res_prev.get();
  608. auto * gf = res->get_gf();
  609. // the new graph parameters
  610. // in order to correctly reuse a graph, it's full topology has to be uniquely determined by these parameters
  611. const auto gparams = graph_params(res, ubatch, mctx, gtype);
  612. if (!graph_reuse_disable && res->can_reuse(gparams)) {
  613. //LLAMA_LOG_DEBUG("%s: reusing previous graph\n", __func__);
  614. n_reused++;
  615. } else {
  616. res->reset();
  617. ggml_backend_sched_reset(sched.get());
  618. ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data);
  619. //const auto t_start_us = ggml_time_us();
  620. gf = model.build_graph(gparams);
  621. //LLAMA_LOG_INFO("graph build time: %.3f ms\n", (ggml_time_us() - t_start_us)/1000.0);
  622. if (!gf) {
  623. LLAMA_LOG_ERROR("%s: failed to initialize graph\n", __func__);
  624. ret = GGML_STATUS_FAILED;
  625. return nullptr;
  626. }
  627. if (!ggml_backend_sched_alloc_graph(sched.get(), gf)) {
  628. LLAMA_LOG_ERROR("%s: failed to allocate graph\n", __func__);
  629. ret = GGML_STATUS_ALLOC_FAILED;
  630. return nullptr;
  631. }
  632. }
  633. // set the input data for the input tensors
  634. {
  635. //const auto t_start_us = ggml_time_us();
  636. res->set_inputs(&ubatch);
  637. //LLAMA_LOG_INFO("graph set inputs time: %.3f ms\n", (ggml_time_us() - t_start_us)/1000.0);
  638. }
  639. const auto status = graph_compute(res->get_gf(), ubatch.n_tokens > 1);
  640. if (status != GGML_STATUS_SUCCESS) {
  641. LLAMA_LOG_ERROR("%s: failed to compute graph, compute status: %d\n", __func__, status);
  642. ret = status;
  643. return nullptr;
  644. }
  645. ret = GGML_STATUS_SUCCESS;
  646. return res;
  647. }
  648. int llama_context::encode(const llama_batch & batch_inp) {
  649. GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
  650. if (batch_inp.n_tokens == 0) {
  651. LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
  652. return -1;
  653. }
  654. const auto & hparams = model.hparams;
  655. const int64_t n_embd = hparams.n_embd;
  656. const int64_t n_vocab = model.vocab.n_tokens();
  657. // note: during encode, we always pass the full sequence starting from pos = 0
  658. if (!balloc->init(batch_inp, model.vocab, nullptr, n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) {
  659. LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
  660. return -1;
  661. }
  662. const uint32_t n_tokens = balloc->get_n_tokens();
  663. // [TAG_NO_CACHE_PAD]
  664. // TODO: add new split mode where we pad the input sequences so that ubatch.equal_seqs == true
  665. const llama_ubatch ubatch = balloc->split_simple(n_tokens);
  666. // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot
  667. GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens");
  668. if (t_compute_start_us == 0) {
  669. t_compute_start_us = ggml_time_us();
  670. }
  671. // TODO: this clear of the buffer can easily be forgotten - need something better
  672. embd_seq.clear();
  673. n_queued_tokens += n_tokens;
  674. // reserve output buffer
  675. if (output_reserve(n_tokens) < n_tokens) {
  676. LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens);
  677. return -2;
  678. };
  679. for (uint32_t i = 0; i < n_tokens; ++i) {
  680. output_ids[i] = i;
  681. }
  682. n_outputs = n_tokens;
  683. const auto causal_attn_org = cparams.causal_attn;
  684. // always use non-causal attention for encoder graphs
  685. // TODO: this is a tmp solution until we have a proper way to support enc-dec models
  686. // ref: https://github.com/ggml-org/llama.cpp/pull/12181#issuecomment-2730451223
  687. cparams.causal_attn = false;
  688. ggml_status status;
  689. const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_ENCODER, nullptr, status);
  690. cparams.causal_attn = causal_attn_org;
  691. if (!res) {
  692. switch (status) {
  693. case GGML_STATUS_ABORTED: return 2;
  694. case GGML_STATUS_ALLOC_FAILED: return -2;
  695. case GGML_STATUS_FAILED: return -3;
  696. case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen");
  697. }
  698. }
  699. auto * t_logits = res->get_logits();
  700. auto * t_embd = res->get_embd_pooled() ? res->get_embd_pooled() : res->get_embd();
  701. // extract logits
  702. if (logits && t_logits) {
  703. ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits);
  704. GGML_ASSERT(backend_res != nullptr);
  705. GGML_ASSERT(logits != nullptr);
  706. ggml_backend_tensor_get_async(backend_res, t_logits, logits, 0, n_tokens*n_vocab*sizeof(float));
  707. }
  708. // extract embeddings
  709. if (embd && t_embd) {
  710. ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd);
  711. GGML_ASSERT(backend_embd != nullptr);
  712. switch (cparams.pooling_type) {
  713. case LLAMA_POOLING_TYPE_NONE:
  714. {
  715. // extract token embeddings
  716. GGML_ASSERT(embd != nullptr);
  717. GGML_ASSERT(n_tokens*n_embd <= (int64_t) embd_size);
  718. ggml_backend_tensor_get_async(backend_embd, t_embd, embd, 0, n_tokens*n_embd*sizeof(float));
  719. } break;
  720. case LLAMA_POOLING_TYPE_MEAN:
  721. case LLAMA_POOLING_TYPE_CLS:
  722. case LLAMA_POOLING_TYPE_LAST:
  723. {
  724. // extract sequence embeddings
  725. auto & embd_seq_out = embd_seq;
  726. for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
  727. const llama_seq_id seq_id = ubatch.seq_id_unq[s];
  728. const int32_t seq_idx = ubatch.seq_idx[seq_id];
  729. embd_seq_out[seq_id].resize(n_embd);
  730. ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
  731. }
  732. } break;
  733. case LLAMA_POOLING_TYPE_RANK:
  734. {
  735. // extract the rerank score - n_cls_out floats per sequence
  736. auto & embd_seq_out = embd_seq;
  737. const uint32_t n_cls_out = hparams.n_cls_out;
  738. for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
  739. const llama_seq_id seq_id = ubatch.seq_id_unq[s];
  740. const int32_t seq_idx = ubatch.seq_idx[seq_id];
  741. embd_seq_out[seq_id].resize(n_cls_out);
  742. ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
  743. }
  744. } break;
  745. case LLAMA_POOLING_TYPE_UNSPECIFIED:
  746. {
  747. GGML_ABORT("unknown pooling type");
  748. }
  749. }
  750. }
  751. // TODO: hacky solution
  752. if (model.arch == LLM_ARCH_T5 && t_embd) {
  753. //cross.t_embd = t_embd;
  754. synchronize();
  755. cross.n_embd = t_embd->ne[0];
  756. cross.n_enc = t_embd->ne[1];
  757. cross.v_embd.resize(cross.n_embd*cross.n_enc);
  758. memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd));
  759. const auto & batch = balloc->get_batch();
  760. // remember the sequence ids used during the encoding - needed for cross attention later
  761. cross.seq_ids_enc.resize(n_tokens);
  762. for (uint32_t i = 0; i < n_tokens; i++) {
  763. cross.seq_ids_enc[i].clear();
  764. for (int s = 0; s < batch.n_seq_id[i]; s++) {
  765. const llama_seq_id seq_id = batch.seq_id[i][s];
  766. cross.seq_ids_enc[i].insert(seq_id);
  767. }
  768. }
  769. }
  770. return 0;
  771. }
  772. int llama_context::decode(const llama_batch & batch_inp) {
  773. GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
  774. if (!memory) {
  775. LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__);
  776. return encode(batch_inp);
  777. }
  778. if (batch_inp.n_tokens == 0) {
  779. LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
  780. return -1;
  781. }
  782. const auto & vocab = model.vocab;
  783. const auto & hparams = model.hparams;
  784. const int64_t n_vocab = vocab.n_tokens();
  785. const int64_t n_embd = hparams.n_embd;
  786. // when computing embeddings, all tokens are output
  787. const bool output_all = cparams.embeddings;
  788. if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, output_all)) {
  789. LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
  790. return -1;
  791. }
  792. const uint32_t n_tokens_all = balloc->get_n_tokens();
  793. const uint32_t n_outputs_all = balloc->get_n_outputs();
  794. if (output_all) {
  795. // require that all tokens are output
  796. if (n_outputs_all != n_tokens_all) {
  797. LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n",
  798. __func__, n_outputs_all, n_tokens_all);
  799. return -1;
  800. }
  801. }
  802. GGML_ASSERT(n_tokens_all <= cparams.n_batch);
  803. GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
  804. if (t_compute_start_us == 0) {
  805. t_compute_start_us = ggml_time_us();
  806. }
  807. n_queued_tokens += n_tokens_all;
  808. // TODO: this clear of the buffer can easily be forgotten - need something better
  809. embd_seq.clear();
  810. output_swaps.clear();
  811. bool did_optimize = false;
  812. // handle any pending shifts/copies
  813. memory_update(false);
  814. llama_memory_context_ptr mctx;
  815. while (true) {
  816. mctx = memory->init_batch(*balloc, cparams.n_ubatch, output_all);
  817. if (!mctx) {
  818. return -2;
  819. }
  820. switch (mctx->get_status()) {
  821. case LLAMA_MEMORY_STATUS_SUCCESS:
  822. {
  823. } break;
  824. case LLAMA_MEMORY_STATUS_NO_UPDATE:
  825. {
  826. LLAMA_LOG_ERROR("%s: unexpected memory context status: %d\n", __func__, mctx->get_status());
  827. return -2;
  828. }
  829. case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
  830. {
  831. if (!did_optimize) {
  832. did_optimize = true;
  833. if (memory_update(true)) {
  834. LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, balloc->get_n_tokens());
  835. continue;
  836. }
  837. }
  838. LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, balloc->get_n_tokens());
  839. return 1;
  840. }
  841. case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
  842. {
  843. LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, balloc->get_n_tokens());
  844. return -2;
  845. }
  846. }
  847. break;
  848. }
  849. // reserve output buffer
  850. if (output_reserve(n_outputs_all) < n_outputs_all) {
  851. LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all);
  852. return -2;
  853. };
  854. int64_t n_outputs_prev = 0;
  855. do {
  856. const auto & ubatch = mctx->get_ubatch();
  857. // count the outputs in this ubatch
  858. {
  859. int32_t n_outputs_new = 0;
  860. if (n_outputs_all == n_tokens_all) {
  861. n_outputs_new = ubatch.n_tokens;
  862. } else {
  863. for (uint32_t i = 0; i < ubatch.n_tokens; i++) {
  864. n_outputs_new += (int32_t) (ubatch.output[i] != 0);
  865. }
  866. }
  867. // needs to happen before the graph is built
  868. n_outputs = n_outputs_new;
  869. }
  870. ggml_status status;
  871. const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mctx.get(), status);
  872. if (!res) {
  873. // the last ubatch failed or was aborted -> remove all positions of that ubatch from the memory module
  874. llama_pos pos_min[LLAMA_MAX_SEQ];
  875. for (int s = 0; s < LLAMA_MAX_SEQ; ++s) {
  876. pos_min[s] = std::numeric_limits<llama_pos>::max();
  877. }
  878. for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
  879. const auto & seq_id = ubatch.seq_id[i][0];
  880. pos_min[seq_id] = std::min(pos_min[seq_id], ubatch.pos[i]);
  881. }
  882. for (int s = 0; s < LLAMA_MAX_SEQ; ++s) {
  883. if (pos_min[s] == std::numeric_limits<llama_pos>::max()) {
  884. continue;
  885. }
  886. LLAMA_LOG_WARN("%s: removing memory module entries for seq_id = %d, pos = [%d, +inf)\n", __func__, s, pos_min[s]);
  887. memory->seq_rm(s, pos_min[s], -1);
  888. }
  889. switch (status) {
  890. case GGML_STATUS_ABORTED: return 2;
  891. case GGML_STATUS_ALLOC_FAILED: return -2;
  892. case GGML_STATUS_FAILED: return -3;
  893. case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen");
  894. }
  895. }
  896. // plot the computation graph in dot format (for debugging purposes)
  897. //if (n_past%100 == 0) {
  898. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  899. //}
  900. auto * t_logits = res->get_logits();
  901. auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr;
  902. if (t_embd && res->get_embd_pooled()) {
  903. t_embd = res->get_embd_pooled();
  904. }
  905. // extract logits
  906. if (t_logits && n_outputs > 0) {
  907. ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits);
  908. GGML_ASSERT(backend_res != nullptr);
  909. GGML_ASSERT(logits != nullptr);
  910. float * logits_out = logits + n_outputs_prev*n_vocab;
  911. if (n_outputs) {
  912. GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all);
  913. GGML_ASSERT((n_outputs_prev + n_outputs)*n_vocab <= (int64_t) logits_size);
  914. ggml_backend_tensor_get_async(backend_res, t_logits, logits_out, 0, n_outputs*n_vocab*sizeof(float));
  915. }
  916. }
  917. // extract embeddings
  918. if (t_embd && n_outputs > 0) {
  919. ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd);
  920. GGML_ASSERT(backend_embd != nullptr);
  921. switch (cparams.pooling_type) {
  922. case LLAMA_POOLING_TYPE_NONE:
  923. {
  924. // extract token embeddings
  925. GGML_ASSERT(embd != nullptr);
  926. float * embd_out = embd + n_outputs_prev*n_embd;
  927. if (n_outputs) {
  928. GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all);
  929. GGML_ASSERT((n_outputs_prev + n_outputs)*n_embd <= (int64_t) embd_size);
  930. ggml_backend_tensor_get_async(backend_embd, t_embd, embd_out, 0, n_outputs*n_embd*sizeof(float));
  931. }
  932. } break;
  933. case LLAMA_POOLING_TYPE_MEAN:
  934. case LLAMA_POOLING_TYPE_CLS:
  935. case LLAMA_POOLING_TYPE_LAST:
  936. {
  937. // extract sequence embeddings (cleared before processing each batch)
  938. auto & embd_seq_out = embd_seq;
  939. for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
  940. const llama_seq_id seq_id = ubatch.seq_id_unq[s];
  941. const int32_t seq_idx = ubatch.seq_idx[seq_id];
  942. embd_seq_out[seq_id].resize(n_embd);
  943. ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
  944. }
  945. } break;
  946. case LLAMA_POOLING_TYPE_RANK:
  947. {
  948. // extract the rerank score - n_cls_out floats per sequence
  949. auto & embd_seq_out = embd_seq;
  950. const uint32_t n_cls_out = hparams.n_cls_out;
  951. for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
  952. const llama_seq_id seq_id = ubatch.seq_id_unq[s];
  953. const int32_t seq_idx = ubatch.seq_idx[seq_id];
  954. embd_seq_out[seq_id].resize(n_cls_out);
  955. ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
  956. }
  957. } break;
  958. case LLAMA_POOLING_TYPE_UNSPECIFIED:
  959. {
  960. GGML_ABORT("unknown pooling type");
  961. }
  962. }
  963. }
  964. n_outputs_prev += n_outputs;
  965. } while (mctx->next());
  966. // set to total number of outputs in the batch, for use in llama_get_logits_ith
  967. n_outputs = n_outputs_all;
  968. // set output mappings
  969. if (n_outputs > 0) {
  970. bool sorted_output = true;
  971. auto & out_ids = balloc->get_out_ids();
  972. GGML_ASSERT(out_ids.size() == (size_t) n_outputs);
  973. for (int64_t i = 0; i < n_outputs; ++i) {
  974. int64_t out_id = out_ids[i];
  975. output_ids[out_id] = i;
  976. if (out_id != i) {
  977. sorted_output = false;
  978. }
  979. }
  980. // make the outputs have the same order they had in the user-provided batch
  981. // note: this is mostly relevant for recurrent models atm
  982. if (!sorted_output) {
  983. GGML_ASSERT((size_t) n_outputs == out_ids.size());
  984. // TODO: is there something more efficient which also minimizes swaps?
  985. // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort)
  986. for (uint32_t i = 0; i < n_outputs - 1; ++i) {
  987. uint32_t j_min = i;
  988. for (uint32_t j = i + 1; j < n_outputs; ++j) {
  989. if (out_ids[j] < out_ids[j_min]) {
  990. j_min = j;
  991. }
  992. }
  993. if (j_min == i) {
  994. continue;
  995. }
  996. std::swap(out_ids[i], out_ids[j_min]);
  997. // remember the swaps and apply them lazily upon logits/embeddings access
  998. output_swaps.push_back({ i, j_min });
  999. }
  1000. std::fill(output_ids.begin(), output_ids.end(), -1);
  1001. for (uint32_t i = 0; i < n_outputs; ++i) {
  1002. output_ids[out_ids[i]] = i;
  1003. }
  1004. }
  1005. }
  1006. // wait for the computation to finish (automatically done when obtaining the model output)
  1007. //synchronize();
  1008. return 0;
  1009. }
  1010. //
  1011. // output
  1012. //
  1013. uint32_t llama_context::output_reserve(int32_t n_outputs) {
  1014. const auto & hparams = model.hparams;
  1015. const auto & vocab = model.vocab;
  1016. const int64_t n_outputs_max = std::max<int64_t>(n_outputs, n_seq_max());
  1017. const auto n_batch = cparams.n_batch;
  1018. const auto n_vocab = vocab.n_tokens();
  1019. const auto n_embd = hparams.n_embd;
  1020. bool has_logits = true;
  1021. bool has_embd = cparams.embeddings;
  1022. // TODO: hacky enc-dec support
  1023. if (model.arch == LLM_ARCH_T5) {
  1024. has_logits = true;
  1025. has_embd = true;
  1026. }
  1027. logits_size = has_logits ? n_vocab*n_outputs_max : 0;
  1028. embd_size = has_embd ? n_embd*n_outputs_max : 0;
  1029. if (output_ids.empty()) {
  1030. // init, never resized afterwards
  1031. output_ids.resize(n_batch);
  1032. }
  1033. const size_t prev_size = buf_output ? ggml_backend_buffer_get_size(buf_output.get()) : 0;
  1034. const size_t new_size = (logits_size + embd_size) * sizeof(float);
  1035. // alloc only when more than the current capacity is required
  1036. // TODO: also consider shrinking the buffer
  1037. if (!buf_output || prev_size < new_size) {
  1038. if (buf_output) {
  1039. #ifndef NDEBUG
  1040. // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark)
  1041. LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
  1042. #endif
  1043. buf_output = nullptr;
  1044. logits = nullptr;
  1045. embd = nullptr;
  1046. }
  1047. auto * buft = ggml_backend_cpu_buffer_type();
  1048. // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory
  1049. auto * output_dev = model.dev_output();
  1050. auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr;
  1051. if (output_dev_host_buft) {
  1052. buft = output_dev_host_buft;
  1053. }
  1054. buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size));
  1055. if (buf_output == nullptr) {
  1056. LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0));
  1057. return 0;
  1058. }
  1059. }
  1060. float * output_base = (float *) ggml_backend_buffer_get_base(buf_output.get());
  1061. logits = has_logits ? output_base : nullptr;
  1062. embd = has_embd ? output_base + logits_size : nullptr;
  1063. // set all ids as invalid (negative)
  1064. std::fill(output_ids.begin(), output_ids.end(), -1);
  1065. this->n_outputs = 0;
  1066. return n_outputs_max;
  1067. }
  1068. void llama_context::output_reorder() {
  1069. const uint64_t n_vocab = model.vocab.n_tokens();
  1070. const uint64_t n_embd = model.hparams.n_embd;
  1071. for (size_t s = 0; s < output_swaps.size(); ++s) {
  1072. const uint64_t i0 = output_swaps[s].i0;
  1073. const uint64_t i1 = output_swaps[s].i1;
  1074. if (logits_size > 0) {
  1075. for (uint64_t k = 0; k < n_vocab; k++) {
  1076. std::swap(logits[i0*n_vocab + k], logits[i1*n_vocab + k]);
  1077. }
  1078. }
  1079. if (embd_size > 0) {
  1080. for (uint64_t k = 0; k < n_embd; k++) {
  1081. std::swap(embd[i0*n_embd + k], embd[i1*n_embd + k]);
  1082. }
  1083. }
  1084. }
  1085. output_swaps.clear();
  1086. }
  1087. //
  1088. // graph
  1089. //
  1090. uint32_t llama_context::graph_max_nodes() const {
  1091. return std::max<uint32_t>(1024u, 8u*model.n_tensors());
  1092. }
  1093. llm_graph_result * llama_context::get_gf_res_reserve() const {
  1094. return static_cast<llm_graph_result *>(gf_res_reserve.get());
  1095. }
  1096. ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only) {
  1097. LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs);
  1098. GGML_ASSERT(n_outputs >= 1);
  1099. if (n_tokens % n_seqs != 0) {
  1100. n_tokens = ((n_tokens + (n_seqs - 1)) / n_seqs) * n_seqs; // round to next multiple of n_seqs
  1101. n_outputs = std::min(n_outputs, n_tokens);
  1102. LLAMA_LOG_DEBUG("%s: making n_tokens a multiple of n_seqs - n_tokens = %u, n_seqs = %u, n_outputs = %u\n", __func__, n_tokens, n_seqs, n_outputs);
  1103. }
  1104. ggml_backend_sched_reset(sched.get());
  1105. // when the scheduler is reset, we cannnot reuse the old graph, so we reset the previous graph result to prevent that
  1106. gf_res_prev->reset();
  1107. // store the n_outputs as it is, and restore it afterwards
  1108. // TODO: not sure if needed, might simplify in the future by removing this
  1109. const auto save_n_outputs = this->n_outputs;
  1110. this->n_outputs = n_outputs;
  1111. llama_batch_allocr balloc(model.hparams.n_pos_per_embd());
  1112. llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs);
  1113. auto * res = gf_res_reserve.get();
  1114. const auto gparams = graph_params(res, ubatch, mctx, LLM_GRAPH_TYPE_DEFAULT);
  1115. res->reset();
  1116. auto * gf = model.build_graph(gparams);
  1117. this->n_outputs = save_n_outputs;
  1118. // initialize scheduler with the specified graph
  1119. if (split_only) {
  1120. ggml_backend_sched_split_graph(sched.get(), gf);
  1121. } else if (!ggml_backend_sched_reserve(sched.get(), gf)) {
  1122. LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
  1123. return nullptr;
  1124. }
  1125. return gf;
  1126. }
  1127. llm_graph_params llama_context::graph_params(
  1128. llm_graph_result * res,
  1129. const llama_ubatch & ubatch,
  1130. const llama_memory_context_i * mctx,
  1131. llm_graph_type gtype) const {
  1132. return {
  1133. /*.arch =*/ model.arch,
  1134. /*.hparams =*/ model.hparams,
  1135. /*.cparams =*/ cparams,
  1136. /*.ubatch =*/ ubatch,
  1137. /*.gtype =*/ gtype,
  1138. /*.sched =*/ sched.get(),
  1139. /*.backend_cpu =*/ backend_cpu,
  1140. /*.cvec =*/ &cvec,
  1141. /*.loras =*/ &loras,
  1142. /*.mctx =*/ mctx,
  1143. /*.cross =*/ &cross,
  1144. /*.n_outputs =*/ n_outputs,
  1145. /*.cb =*/ graph_get_cb(),
  1146. /*.res =*/ res,
  1147. };
  1148. }
  1149. ggml_status llama_context::graph_compute(
  1150. ggml_cgraph * gf,
  1151. bool batched) {
  1152. int n_threads = batched ? cparams.n_threads_batch : cparams.n_threads;
  1153. ggml_threadpool_t tp = batched ? threadpool_batch : threadpool;
  1154. if (backend_cpu != nullptr) {
  1155. auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend_cpu));
  1156. auto * set_threadpool_fn = (decltype(ggml_backend_cpu_set_threadpool) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool");
  1157. if (set_threadpool_fn) {
  1158. set_threadpool_fn(backend_cpu, tp);
  1159. }
  1160. }
  1161. // set the number of threads for all the backends
  1162. for (const auto & set_n_threads_fn : set_n_threads_fns) {
  1163. set_n_threads_fn.second(set_n_threads_fn.first, n_threads);
  1164. }
  1165. auto status = ggml_backend_sched_graph_compute_async(sched.get(), gf);
  1166. if (status != GGML_STATUS_SUCCESS) {
  1167. LLAMA_LOG_ERROR("%s: ggml_backend_sched_graph_compute_async failed with error %d\n", __func__, status);
  1168. }
  1169. // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(sched));
  1170. return status;
  1171. }
  1172. llm_graph_cb llama_context::graph_get_cb() const {
  1173. return [&](const llama_ubatch & ubatch, ggml_tensor * cur, const char * name, int il) {
  1174. if (il >= 0) {
  1175. ggml_format_name(cur, "%s-%d", name, il);
  1176. } else {
  1177. ggml_set_name(cur, name);
  1178. }
  1179. if (!cparams.offload_kqv) {
  1180. if (strcmp(name, "kqv_merged_cont") == 0) {
  1181. // all nodes between the KV store and the attention output are run on the CPU
  1182. ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend_cpu);
  1183. }
  1184. }
  1185. // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends
  1186. // FIXME: fix in ggml_backend_sched
  1187. const bool full_offload = model.params.n_gpu_layers > (int) model.hparams.n_layer;
  1188. if (ubatch.n_tokens < 32 || full_offload) {
  1189. if (il != -1 && strcmp(name, "norm") == 0) {
  1190. const auto & dev_layer = model.dev_layer(il);
  1191. for (const auto & backend : backends) {
  1192. if (ggml_backend_get_device(backend.get()) == dev_layer) {
  1193. if (ggml_backend_supports_op(backend.get(), cur)) {
  1194. ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend.get());
  1195. }
  1196. }
  1197. }
  1198. }
  1199. }
  1200. };
  1201. }
  1202. //
  1203. // state save/load
  1204. //
  1205. class llama_io_write_dummy : public llama_io_write_i {
  1206. public:
  1207. llama_io_write_dummy() = default;
  1208. void write(const void * /* src */, size_t size) override {
  1209. size_written += size;
  1210. }
  1211. void write_tensor(const ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override {
  1212. size_written += size;
  1213. }
  1214. size_t n_bytes() override {
  1215. return size_written;
  1216. }
  1217. private:
  1218. size_t size_written = 0;
  1219. };
  1220. class llama_io_write_buffer : public llama_io_write_i {
  1221. public:
  1222. llama_io_write_buffer(
  1223. uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
  1224. void write(const void * src, size_t size) override {
  1225. if (size > buf_size) {
  1226. throw std::runtime_error("unexpectedly reached end of buffer");
  1227. }
  1228. memcpy(ptr, src, size);
  1229. ptr += size;
  1230. size_written += size;
  1231. buf_size -= size;
  1232. }
  1233. void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override {
  1234. if (size > buf_size) {
  1235. throw std::runtime_error("unexpectedly reached end of buffer");
  1236. }
  1237. ggml_backend_tensor_get(tensor, ptr, offset, size);
  1238. ptr += size;
  1239. size_written += size;
  1240. buf_size -= size;
  1241. }
  1242. size_t n_bytes() override {
  1243. return size_written;
  1244. }
  1245. private:
  1246. uint8_t * ptr;
  1247. size_t buf_size = 0;
  1248. size_t size_written = 0;
  1249. };
  1250. class llama_io_read_buffer : public llama_io_read_i {
  1251. public:
  1252. llama_io_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
  1253. const uint8_t * read(size_t size) override {
  1254. const uint8_t * base_ptr = ptr;
  1255. if (size > buf_size) {
  1256. throw std::runtime_error("unexpectedly reached end of buffer");
  1257. }
  1258. ptr += size;
  1259. size_read += size;
  1260. buf_size -= size;
  1261. return base_ptr;
  1262. }
  1263. void read_to(void * dst, size_t size) override {
  1264. memcpy(dst, read(size), size);
  1265. }
  1266. size_t n_bytes() override {
  1267. return size_read;
  1268. }
  1269. private:
  1270. const uint8_t * ptr;
  1271. size_t buf_size = 0;
  1272. size_t size_read = 0;
  1273. };
  1274. class llama_io_write_file : public llama_io_write_i {
  1275. public:
  1276. llama_io_write_file(llama_file * f) : file(f) {}
  1277. void write(const void * src, size_t size) override {
  1278. file->write_raw(src, size);
  1279. size_written += size;
  1280. }
  1281. void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override {
  1282. temp_buffer.resize(size);
  1283. ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size);
  1284. write(temp_buffer.data(), temp_buffer.size());
  1285. }
  1286. size_t n_bytes() override {
  1287. return size_written;
  1288. }
  1289. private:
  1290. llama_file * file;
  1291. size_t size_written = 0;
  1292. std::vector<uint8_t> temp_buffer;
  1293. };
  1294. class llama_io_read_file : public llama_io_read_i {
  1295. public:
  1296. llama_io_read_file(llama_file * f) : file(f) {}
  1297. void read_to(void * dst, size_t size) override {
  1298. file->read_raw(dst, size);
  1299. size_read += size;
  1300. }
  1301. const uint8_t * read(size_t size) override {
  1302. temp_buffer.resize(size);
  1303. read_to(temp_buffer.data(), size);
  1304. return temp_buffer.data();
  1305. }
  1306. size_t n_bytes() override {
  1307. return size_read;
  1308. }
  1309. private:
  1310. llama_file * file;
  1311. size_t size_read = 0;
  1312. std::vector<uint8_t> temp_buffer;
  1313. };
  1314. size_t llama_context::state_get_size() {
  1315. llama_io_write_dummy io;
  1316. try {
  1317. return state_write_data(io);
  1318. } catch (const std::exception & err) {
  1319. LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what());
  1320. return 0;
  1321. }
  1322. }
  1323. size_t llama_context::state_get_data(uint8_t * dst, size_t size) {
  1324. llama_io_write_buffer io(dst, size);
  1325. try {
  1326. return state_write_data(io);
  1327. } catch (const std::exception & err) {
  1328. LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what());
  1329. return 0;
  1330. }
  1331. }
  1332. size_t llama_context::state_set_data(const uint8_t * src, size_t size) {
  1333. llama_io_read_buffer io(src, size);
  1334. try {
  1335. return state_read_data(io);
  1336. } catch (const std::exception & err) {
  1337. LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what());
  1338. return 0;
  1339. }
  1340. }
  1341. size_t llama_context::state_seq_get_size(llama_seq_id seq_id, llama_state_seq_flags flags) {
  1342. llama_io_write_dummy io;
  1343. try {
  1344. return state_seq_write_data(io, seq_id, flags);
  1345. } catch (const std::exception & err) {
  1346. LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what());
  1347. return 0;
  1348. }
  1349. }
  1350. size_t llama_context::state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size, llama_state_seq_flags flags) {
  1351. llama_io_write_buffer io(dst, size);
  1352. try {
  1353. return state_seq_write_data(io, seq_id, flags);
  1354. } catch (const std::exception & err) {
  1355. LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what());
  1356. return 0;
  1357. }
  1358. }
  1359. size_t llama_context::state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size, llama_state_seq_flags flags) {
  1360. llama_io_read_buffer io(src, size);
  1361. try {
  1362. return state_seq_read_data(io, seq_id, flags);
  1363. } catch (const std::exception & err) {
  1364. LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what());
  1365. return 0;
  1366. }
  1367. }
  1368. bool llama_context::state_load_file(const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  1369. llama_file file(filepath, "rb");
  1370. // sanity checks
  1371. {
  1372. const uint32_t magic = file.read_u32();
  1373. const uint32_t version = file.read_u32();
  1374. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  1375. LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  1376. return false;
  1377. }
  1378. }
  1379. // load the prompt
  1380. {
  1381. const uint32_t n_token_count = file.read_u32();
  1382. if (n_token_count > n_token_capacity) {
  1383. LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  1384. return false;
  1385. }
  1386. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  1387. *n_token_count_out = n_token_count;
  1388. }
  1389. // restore the context state
  1390. {
  1391. const size_t n_state_size_cur = file.size() - file.tell();
  1392. llama_io_read_file io( &file);
  1393. const size_t n_read = state_read_data(io);
  1394. if (n_read != n_state_size_cur) {
  1395. LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read);
  1396. return false;
  1397. }
  1398. }
  1399. return true;
  1400. }
  1401. bool llama_context::state_save_file(const char * filepath, const llama_token * tokens, size_t n_token_count) {
  1402. llama_file file(filepath, "wb");
  1403. file.write_u32(LLAMA_SESSION_MAGIC);
  1404. file.write_u32(LLAMA_SESSION_VERSION);
  1405. // save the prompt
  1406. file.write_u32((uint32_t) n_token_count);
  1407. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  1408. // save the context state using stream saving
  1409. llama_io_write_file io(&file);
  1410. state_write_data(io);
  1411. return true;
  1412. }
  1413. size_t llama_context::state_seq_load_file(llama_seq_id seq_id, const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  1414. llama_file file(filepath, "rb");
  1415. // version checks
  1416. {
  1417. const uint32_t magic = file.read_u32();
  1418. const uint32_t version = file.read_u32();
  1419. if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) {
  1420. LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version);
  1421. return 0;
  1422. }
  1423. }
  1424. // load the prompt
  1425. {
  1426. const uint32_t n_token_count = file.read_u32();
  1427. if (n_token_count > n_token_capacity) {
  1428. LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  1429. return 0;
  1430. }
  1431. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  1432. *n_token_count_out = n_token_count;
  1433. }
  1434. // restore the context state
  1435. {
  1436. const size_t state_size = file.size() - file.tell();
  1437. llama_io_read_file io(&file);
  1438. const size_t nread = state_seq_read_data(io, seq_id, 0);
  1439. if (!nread) {
  1440. LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__);
  1441. return 0;
  1442. }
  1443. GGML_ASSERT(nread <= state_size);
  1444. GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell());
  1445. }
  1446. return file.tell();
  1447. }
  1448. size_t llama_context::state_seq_save_file(llama_seq_id seq_id, const char * filepath, const llama_token * tokens, size_t n_token_count) {
  1449. llama_file file(filepath, "wb");
  1450. file.write_u32(LLAMA_STATE_SEQ_MAGIC);
  1451. file.write_u32(LLAMA_STATE_SEQ_VERSION);
  1452. // save the prompt
  1453. file.write_u32((uint32_t) n_token_count);
  1454. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  1455. // save the context state using stream saving
  1456. llama_io_write_file io(&file);
  1457. state_seq_write_data(io, seq_id, 0);
  1458. const size_t res = file.tell();
  1459. GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + io.n_bytes());
  1460. return res;
  1461. }
  1462. size_t llama_context::state_write_data(llama_io_write_i & io) {
  1463. LLAMA_LOG_DEBUG("%s: writing state\n", __func__);
  1464. // write model info
  1465. {
  1466. LLAMA_LOG_DEBUG("%s: - writing model info\n", __func__);
  1467. const std::string arch_str = llm_arch_name(model.arch);
  1468. io.write_string(arch_str);
  1469. // TODO: add more model-specific info which should prevent loading the session file if not identical
  1470. }
  1471. // write output ids
  1472. {
  1473. LLAMA_LOG_DEBUG("%s: - writing output ids\n", __func__);
  1474. const auto n_outputs = this->n_outputs;
  1475. const auto & output_ids = this->output_ids;
  1476. std::vector<int32_t> w_output_pos;
  1477. w_output_pos.resize(n_outputs);
  1478. // build a more compact representation of the output ids
  1479. for (size_t i = 0; i < n_batch(); ++i) {
  1480. // map an output id to a position in the batch
  1481. int64_t pos = output_ids[i];
  1482. if (pos >= 0) {
  1483. GGML_ASSERT(pos < n_outputs);
  1484. w_output_pos[pos] = i;
  1485. }
  1486. }
  1487. io.write(&n_outputs, sizeof(n_outputs));
  1488. if (n_outputs) {
  1489. io.write(w_output_pos.data(), n_outputs * sizeof(int32_t));
  1490. }
  1491. }
  1492. // write logits
  1493. {
  1494. LLAMA_LOG_DEBUG("%s: - writing logits\n", __func__);
  1495. const uint64_t logits_size = std::min((uint64_t) this->logits_size, (uint64_t) n_outputs * model.vocab.n_tokens());
  1496. io.write(&logits_size, sizeof(logits_size));
  1497. if (logits_size) {
  1498. io.write(logits, logits_size * sizeof(float));
  1499. }
  1500. }
  1501. // write embeddings
  1502. {
  1503. LLAMA_LOG_DEBUG("%s: - writing embeddings\n", __func__);
  1504. const uint64_t embd_size = std::min((uint64_t) this->embd_size, (uint64_t) n_outputs * model.hparams.n_embd);
  1505. io.write(&embd_size, sizeof(embd_size));
  1506. if (embd_size) {
  1507. io.write(embd, embd_size * sizeof(float));
  1508. }
  1509. }
  1510. if (memory != nullptr) {
  1511. LLAMA_LOG_DEBUG("%s: - writing memory module\n", __func__);
  1512. memory->state_write(io);
  1513. }
  1514. return io.n_bytes();
  1515. }
  1516. size_t llama_context::state_read_data(llama_io_read_i & io) {
  1517. LLAMA_LOG_DEBUG("%s: reading state\n", __func__);
  1518. // read model info
  1519. {
  1520. LLAMA_LOG_DEBUG("%s: - reading model info\n", __func__);
  1521. const std::string cur_arch_str = llm_arch_name(model.arch);
  1522. std::string arch_str;
  1523. io.read_string(arch_str);
  1524. if (cur_arch_str != arch_str) {
  1525. throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str()));
  1526. }
  1527. // TODO: add more info which needs to be identical but which is not verified otherwise
  1528. }
  1529. // read output ids
  1530. {
  1531. LLAMA_LOG_DEBUG("%s: - reading output ids\n", __func__);
  1532. auto n_outputs = this->n_outputs;
  1533. io.read_to(&n_outputs, sizeof(n_outputs));
  1534. if (n_outputs > output_reserve(n_outputs)) {
  1535. throw std::runtime_error("could not reserve outputs");
  1536. }
  1537. std::vector<int32_t> output_pos;
  1538. if (n_outputs) {
  1539. output_pos.resize(n_outputs);
  1540. io.read_to(output_pos.data(), n_outputs * sizeof(int32_t));
  1541. for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) {
  1542. int32_t id = output_pos[i];
  1543. if ((uint32_t) id >= n_batch()) {
  1544. throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, n_batch()));
  1545. }
  1546. this->output_ids[id] = i;
  1547. }
  1548. this->n_outputs = n_outputs;
  1549. }
  1550. }
  1551. // read logits
  1552. {
  1553. LLAMA_LOG_DEBUG("%s: - reading logits\n", __func__);
  1554. uint64_t logits_size;
  1555. io.read_to(&logits_size, sizeof(logits_size));
  1556. if (this->logits_size < logits_size) {
  1557. throw std::runtime_error("logits buffer too small");
  1558. }
  1559. if (logits_size) {
  1560. io.read_to(this->logits, logits_size * sizeof(float));
  1561. }
  1562. }
  1563. // read embeddings
  1564. {
  1565. LLAMA_LOG_DEBUG("%s: - reading embeddings\n", __func__);
  1566. uint64_t embd_size;
  1567. io.read_to(&embd_size, sizeof(embd_size));
  1568. if (this->embd_size < embd_size) {
  1569. throw std::runtime_error("embeddings buffer too small");
  1570. }
  1571. if (embd_size) {
  1572. io.read_to(this->embd, embd_size * sizeof(float));
  1573. }
  1574. }
  1575. if (memory) {
  1576. LLAMA_LOG_DEBUG("%s: - reading memory module\n", __func__);
  1577. memory->state_read(io);
  1578. }
  1579. return io.n_bytes();
  1580. }
  1581. size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
  1582. GGML_UNUSED(seq_id);
  1583. if (memory) {
  1584. memory->state_write(io, seq_id, flags);
  1585. }
  1586. return io.n_bytes();
  1587. }
  1588. size_t llama_context::state_seq_read_data(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
  1589. GGML_UNUSED(seq_id);
  1590. if (memory) {
  1591. memory->state_read(io, seq_id, flags);
  1592. }
  1593. return io.n_bytes();
  1594. }
  1595. //
  1596. // perf
  1597. //
  1598. llama_perf_context_data llama_context::perf_get_data() const {
  1599. llama_perf_context_data data = {};
  1600. data.t_start_ms = 1e-3 * t_start_us;
  1601. data.t_load_ms = 1e-3 * t_load_us;
  1602. data.t_p_eval_ms = 1e-3 * t_p_eval_us;
  1603. data.t_eval_ms = 1e-3 * t_eval_us;
  1604. data.n_p_eval = std::max(1, n_p_eval);
  1605. data.n_eval = std::max(1, n_eval);
  1606. data.n_reused = std::max(0, n_reused);
  1607. return data;
  1608. }
  1609. void llama_context::perf_reset() {
  1610. t_start_us = ggml_time_us();
  1611. t_eval_us = n_eval = 0;
  1612. t_p_eval_us = n_p_eval = 0;
  1613. n_reused = 0;
  1614. }
  1615. std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> llama_context::memory_breakdown() const {
  1616. std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> ret;
  1617. for (const auto & buft_size : model.memory_breakdown()) {
  1618. ret[buft_size.first].model += buft_size.second;
  1619. }
  1620. for (const auto & buft_size : memory->memory_breakdown()) {
  1621. ret[buft_size.first].context += buft_size.second;
  1622. }
  1623. for (const auto & backend_ptr : backends) {
  1624. ggml_backend_t backend = backend_ptr.get();
  1625. ret[ggml_backend_sched_get_buffer_type(sched.get(), backend)].compute += ggml_backend_sched_get_buffer_size(sched.get(), backend);
  1626. }
  1627. return ret;
  1628. }
  1629. //
  1630. // training
  1631. //
  1632. static void llama_set_param(struct ggml_tensor * tensor, llama_opt_param_filter param_filter, void * userdata) {
  1633. if (!tensor || tensor->type != GGML_TYPE_F32) {
  1634. return;
  1635. }
  1636. if (!param_filter(tensor, userdata)) {
  1637. return;
  1638. }
  1639. if (strcmp(tensor->name, "token_embd.weight") == 0) {
  1640. return; // FIXME
  1641. }
  1642. if (strcmp(tensor->name, "rope_freqs.weight") == 0) {
  1643. return; // FIXME
  1644. }
  1645. ggml_set_param(tensor);
  1646. }
  1647. void llama_context::opt_init(struct llama_model * model, struct llama_opt_params lopt_params) {
  1648. GGML_ASSERT(!opt_ctx);
  1649. model->hparams.n_ctx_train = lopt_params.n_ctx_train > 0 ? lopt_params.n_ctx_train : n_ctx();
  1650. const uint32_t n_batch = std::min(this->n_batch(), model->hparams.n_ctx_train);
  1651. const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch);
  1652. GGML_ASSERT(model->hparams.n_ctx_train % n_batch == 0);
  1653. GGML_ASSERT(n_batch % n_ubatch == 0);
  1654. ggml_opt_params opt_params = ggml_opt_default_params(sched.get(), GGML_OPT_LOSS_TYPE_CROSS_ENTROPY);
  1655. opt_params.opt_period = n_batch / n_ubatch;
  1656. opt_params.get_opt_pars = lopt_params.get_opt_pars;
  1657. opt_params.get_opt_pars_ud = lopt_params.get_opt_pars_ud;
  1658. opt_params.optimizer = lopt_params.optimizer_type;
  1659. opt_ctx = ggml_opt_init(opt_params);
  1660. llama_opt_param_filter param_filter = lopt_params.param_filter;
  1661. void * param_filter_ud = lopt_params.param_filter_ud;
  1662. //llama_set_param(model->tok_embd, param_filter, param_filter_ud); // FIXME
  1663. llama_set_param(model->type_embd, param_filter, param_filter_ud);
  1664. llama_set_param(model->pos_embd, param_filter, param_filter_ud);
  1665. llama_set_param(model->tok_norm, param_filter, param_filter_ud);
  1666. llama_set_param(model->tok_norm_b, param_filter, param_filter_ud);
  1667. llama_set_param(model->output_norm, param_filter, param_filter_ud);
  1668. llama_set_param(model->output_norm_b, param_filter, param_filter_ud);
  1669. llama_set_param(model->output, param_filter, param_filter_ud);
  1670. llama_set_param(model->output_b, param_filter, param_filter_ud);
  1671. llama_set_param(model->output_norm_enc, param_filter, param_filter_ud);
  1672. llama_set_param(model->cls, param_filter, param_filter_ud);
  1673. llama_set_param(model->cls_b, param_filter, param_filter_ud);
  1674. llama_set_param(model->cls_out, param_filter, param_filter_ud);
  1675. llama_set_param(model->cls_out_b, param_filter, param_filter_ud);
  1676. for (struct llama_layer & layer : model->layers) {
  1677. for (size_t i = 0; i < sizeof(layer)/sizeof(struct ggml_tensor *); ++i) {
  1678. llama_set_param(reinterpret_cast<struct ggml_tensor **>(&layer)[i], param_filter, param_filter_ud);
  1679. }
  1680. }
  1681. }
  1682. void llama_context::opt_epoch_iter(
  1683. ggml_opt_dataset_t dataset,
  1684. ggml_opt_result_t result,
  1685. const std::vector<llama_token> & tokens,
  1686. const std::vector<llama_token> & labels_sparse,
  1687. llama_batch & batch,
  1688. ggml_opt_epoch_callback callback,
  1689. bool train,
  1690. int64_t idata_in_loop,
  1691. int64_t ndata_in_loop,
  1692. int64_t t_loop_start) {
  1693. GGML_ASSERT(opt_ctx);
  1694. const uint32_t n_ctx = llama_model_n_ctx_train(&model);
  1695. const uint32_t n_batch = std::min(this->n_batch(), n_ctx);
  1696. const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch);
  1697. memory->clear(true);
  1698. for (uint32_t pos_ctx = 0; pos_ctx < n_ctx; pos_ctx += n_batch) {
  1699. batch.n_tokens = n_batch;
  1700. for (uint32_t pos_batch = 0; pos_batch < n_batch; ++pos_batch) {
  1701. batch.token [pos_batch] = tokens[pos_ctx + pos_batch];
  1702. batch.pos [pos_batch] = pos_ctx + pos_batch;
  1703. batch.n_seq_id[pos_batch] = 1;
  1704. batch.seq_id [pos_batch][0] = 0;
  1705. batch.logits [pos_batch] = true;
  1706. }
  1707. if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) {
  1708. LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
  1709. return;
  1710. }
  1711. const uint32_t n_tokens_all = balloc->get_n_tokens();
  1712. n_queued_tokens += n_tokens_all;
  1713. embd_seq.clear();
  1714. uint32_t n_outputs_all = n_tokens_all;
  1715. auto mctx = memory->init_batch(*balloc, cparams.n_ubatch, true);
  1716. if (!mctx || mctx->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) {
  1717. LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__);
  1718. break;
  1719. }
  1720. // reserve output buffer
  1721. if (output_reserve(n_outputs_all) < n_outputs_all) {
  1722. LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all);
  1723. GGML_ABORT("TODO: handle this error");
  1724. };
  1725. uint32_t pos_batch = 0;
  1726. do {
  1727. const auto & ubatch = mctx->get_ubatch();
  1728. n_outputs = ubatch.n_tokens;
  1729. if (!mctx->apply()) {
  1730. LLAMA_LOG_ERROR("%s: failed to update the memory context\n", __func__);
  1731. break;
  1732. }
  1733. auto * res = gf_res_prev.get();
  1734. const auto gparams = graph_params(res, ubatch, mctx.get(), LLM_GRAPH_TYPE_DEFAULT);
  1735. res->reset();
  1736. auto * gf = model.build_graph(gparams);
  1737. struct ggml_context * ctx_compute_opt;
  1738. {
  1739. const size_t size_gf = ggml_graph_size(gf);
  1740. const size_t size_meta = 4*size_gf*ggml_tensor_overhead() + 2*ggml_graph_overhead_custom(size_gf, /*grads = */ true);
  1741. struct ggml_init_params params = {
  1742. /*.mem_size =*/ size_meta,
  1743. /*.mem_buffer =*/ nullptr,
  1744. /*.no_alloc =*/ true,
  1745. };
  1746. ctx_compute_opt = ggml_init(params);
  1747. }
  1748. ggml_opt_prepare_alloc(opt_ctx, ctx_compute_opt, gf, res->get_tokens(), res->get_logits());
  1749. ggml_opt_alloc(opt_ctx, train);
  1750. res->set_inputs(&ubatch);
  1751. {
  1752. struct ggml_tensor * labels = ggml_opt_labels(opt_ctx);
  1753. GGML_ASSERT(labels->ne[1] == n_ubatch);
  1754. ggml_set_zero(labels);
  1755. const float onef = 1.0f;
  1756. for (uint32_t pos_ubatch = 0; pos_ubatch < n_ubatch; ++pos_ubatch) {
  1757. const uint32_t ilabel = pos_ctx + pos_batch + pos_ubatch;
  1758. GGML_ASSERT(labels_sparse[ilabel] < labels->ne[0]);
  1759. ggml_backend_tensor_set(labels, &onef, (pos_ubatch*labels->ne[0] + labels_sparse[ilabel])*sizeof(float), sizeof(float));
  1760. }
  1761. }
  1762. ggml_opt_eval(opt_ctx, result);
  1763. if (callback) {
  1764. callback(train, opt_ctx, dataset, result, idata_in_loop + (pos_ctx + pos_batch)/n_ubatch + 1, ndata_in_loop, t_loop_start);
  1765. }
  1766. ggml_free(ctx_compute_opt);
  1767. pos_batch += ubatch.n_tokens;
  1768. } while (mctx->next());
  1769. }
  1770. }
  1771. void llama_context::opt_epoch(
  1772. ggml_opt_dataset_t dataset,
  1773. ggml_opt_result_t result_train,
  1774. ggml_opt_result_t result_eval,
  1775. int64_t idata_split,
  1776. ggml_opt_epoch_callback callback_train,
  1777. ggml_opt_epoch_callback callback_eval) {
  1778. const uint32_t n_ctx = this->n_ctx();
  1779. const uint32_t n_batch = std::min(cparams.n_batch, n_ctx);
  1780. const uint32_t n_ubatch = std::min(cparams.n_ubatch, n_batch);
  1781. const int64_t ndata = ggml_opt_dataset_ndata(dataset);
  1782. GGML_ASSERT(idata_split >= 0);
  1783. GGML_ASSERT(idata_split <= ndata);
  1784. const uint32_t ubatch_per_ctx = n_ctx / n_ubatch;
  1785. struct llama_batch batch = llama_batch_init(n_batch, 0, 1);
  1786. std::vector<llama_token> tokens(n_ctx);
  1787. std::vector<llama_token> labels_sparse(n_ctx);
  1788. int64_t idata = 0;
  1789. int64_t t_loop_start = ggml_time_us();
  1790. int64_t ndata_in_loop = idata_split*ubatch_per_ctx;
  1791. for (; idata < idata_split; ++idata) {
  1792. constexpr bool train = true;
  1793. const int64_t idata_in_loop = idata*ubatch_per_ctx;
  1794. ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata);
  1795. opt_epoch_iter(dataset, result_train, tokens, labels_sparse, batch,
  1796. callback_train, train, idata_in_loop, ndata_in_loop, t_loop_start);
  1797. }
  1798. t_loop_start = ggml_time_us();
  1799. ndata_in_loop = (ndata - idata_split)*ubatch_per_ctx;
  1800. for (; idata < ndata; ++idata) {
  1801. constexpr bool train = false;
  1802. const int64_t idata_in_loop = (idata - idata_split)*ubatch_per_ctx;
  1803. ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata);
  1804. opt_epoch_iter(dataset, result_eval, tokens, labels_sparse, batch,
  1805. callback_eval, train, idata_in_loop, ndata_in_loop, t_loop_start);
  1806. }
  1807. llama_batch_free(batch);
  1808. }
  1809. //
  1810. // interface implementation
  1811. //
  1812. llama_context_params llama_context_default_params() {
  1813. llama_context_params result = {
  1814. /*.n_ctx =*/ 512,
  1815. /*.n_batch =*/ 2048,
  1816. /*.n_ubatch =*/ 512,
  1817. /*.n_seq_max =*/ 1,
  1818. /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
  1819. /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
  1820. /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
  1821. /*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
  1822. /*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED,
  1823. /*.flash_attn_type =*/ LLAMA_FLASH_ATTN_TYPE_AUTO,
  1824. /*.rope_freq_base =*/ 0.0f,
  1825. /*.rope_freq_scale =*/ 0.0f,
  1826. /*.yarn_ext_factor =*/ -1.0f,
  1827. /*.yarn_attn_factor =*/ -1.0f,
  1828. /*.yarn_beta_fast =*/ -1.0f,
  1829. /*.yarn_beta_slow =*/ -1.0f,
  1830. /*.yarn_orig_ctx =*/ 0,
  1831. /*.defrag_thold =*/ -1.0f,
  1832. /*.cb_eval =*/ nullptr,
  1833. /*.cb_eval_user_data =*/ nullptr,
  1834. /*.type_k =*/ GGML_TYPE_F16,
  1835. /*.type_v =*/ GGML_TYPE_F16,
  1836. /*.abort_callback =*/ nullptr,
  1837. /*.abort_callback_data =*/ nullptr,
  1838. /*.embeddings =*/ false,
  1839. /*.offload_kqv =*/ true,
  1840. /*.no_perf =*/ true,
  1841. /*.op_offload =*/ true,
  1842. /*.swa_full =*/ true,
  1843. /*.kv_unified =*/ false,
  1844. };
  1845. return result;
  1846. }
  1847. llama_context * llama_init_from_model(
  1848. llama_model * model,
  1849. llama_context_params params) {
  1850. if (!model) {
  1851. LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__);
  1852. return nullptr;
  1853. }
  1854. if (params.n_batch == 0 && params.n_ubatch == 0) {
  1855. LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__);
  1856. return nullptr;
  1857. }
  1858. if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) {
  1859. LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__);
  1860. return nullptr;
  1861. }
  1862. if (params.flash_attn_type != LLAMA_FLASH_ATTN_TYPE_DISABLED && model->arch == LLM_ARCH_GROK) {
  1863. LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
  1864. params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
  1865. }
  1866. if (params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO && ggml_is_quantized(params.type_k)) {
  1867. const uint32_t blck_size = ggml_blck_size(params.type_k);
  1868. if (model->hparams.n_embd_head_k % blck_size != 0) {
  1869. LLAMA_LOG_ERROR("%s: K cache type %s with block size %u does not divide n_embd_head_k=%u\n",
  1870. __func__, ggml_type_name(params.type_k), blck_size, model->hparams.n_embd_head_k);
  1871. return nullptr;
  1872. }
  1873. }
  1874. if (params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO && ggml_is_quantized(params.type_v)) {
  1875. const uint32_t blck_size = ggml_blck_size(params.type_v);
  1876. if (model->hparams.n_embd_head_v % blck_size != 0) {
  1877. LLAMA_LOG_ERROR("%s: V cache type %s with block size %u does not divide n_embd_head_k=%u\n",
  1878. __func__, ggml_type_name(params.type_v), blck_size, model->hparams.n_embd_head_v);
  1879. return nullptr;
  1880. }
  1881. }
  1882. if (ggml_is_quantized(params.type_v) && params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_DISABLED) {
  1883. LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
  1884. return nullptr;
  1885. }
  1886. if (params.pooling_type != LLAMA_POOLING_TYPE_UNSPECIFIED &&
  1887. params.pooling_type != model->hparams.pooling_type) {
  1888. //user-specified pooling-type is different from the model default
  1889. LLAMA_LOG_WARN("%s: model default pooling_type is [%d], but [%d] was specified\n", __func__,
  1890. model->hparams.pooling_type, params.pooling_type);
  1891. }
  1892. try {
  1893. auto * ctx = new llama_context(*model, params);
  1894. return ctx;
  1895. } catch (const std::exception & err) {
  1896. LLAMA_LOG_ERROR("%s: failed to initialize the context: %s\n", __func__, err.what());
  1897. }
  1898. return nullptr;
  1899. }
  1900. // deprecated
  1901. llama_context * llama_new_context_with_model(
  1902. llama_model * model,
  1903. llama_context_params params) {
  1904. return llama_init_from_model(model, params);
  1905. }
  1906. void llama_free(llama_context * ctx) {
  1907. delete ctx;
  1908. }
  1909. uint32_t llama_n_ctx(const llama_context * ctx) {
  1910. return ctx->n_ctx();
  1911. }
  1912. uint32_t llama_n_batch(const llama_context * ctx) {
  1913. return ctx->n_batch();
  1914. }
  1915. uint32_t llama_n_ubatch(const llama_context * ctx) {
  1916. return ctx->n_ubatch();
  1917. }
  1918. uint32_t llama_n_seq_max(const llama_context * ctx) {
  1919. return ctx->n_seq_max();
  1920. }
  1921. const llama_model * llama_get_model(const llama_context * ctx) {
  1922. return &ctx->get_model();
  1923. }
  1924. enum llama_pooling_type llama_pooling_type(const llama_context * ctx) {
  1925. return ctx->pooling_type();
  1926. }
  1927. void llama_attach_threadpool(
  1928. llama_context * ctx,
  1929. ggml_threadpool_t threadpool,
  1930. ggml_threadpool_t threadpool_batch) {
  1931. ctx->attach_threadpool(threadpool, threadpool_batch);
  1932. }
  1933. void llama_detach_threadpool(llama_context * ctx) {
  1934. ctx->detach_threadpool();
  1935. }
  1936. void llama_set_n_threads(llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) {
  1937. ctx->set_n_threads(n_threads, n_threads_batch);
  1938. }
  1939. int32_t llama_n_threads(llama_context * ctx) {
  1940. return ctx->n_threads();
  1941. }
  1942. int32_t llama_n_threads_batch(llama_context * ctx) {
  1943. return ctx->n_threads_batch();
  1944. }
  1945. void llama_set_abort_callback(llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) {
  1946. ctx->set_abort_callback(abort_callback, abort_callback_data);
  1947. }
  1948. void llama_set_embeddings(llama_context * ctx, bool embeddings) {
  1949. ctx->set_embeddings(embeddings);
  1950. }
  1951. void llama_set_causal_attn(llama_context * ctx, bool causal_attn) {
  1952. ctx->set_causal_attn(causal_attn);
  1953. }
  1954. void llama_set_warmup(llama_context * ctx, bool warmup) {
  1955. ctx->set_warmup(warmup);
  1956. }
  1957. void llama_synchronize(llama_context * ctx) {
  1958. ctx->synchronize();
  1959. }
  1960. float * llama_get_logits(llama_context * ctx) {
  1961. ctx->synchronize();
  1962. return ctx->get_logits();
  1963. }
  1964. float * llama_get_logits_ith(llama_context * ctx, int32_t i) {
  1965. ctx->synchronize();
  1966. return ctx->get_logits_ith(i);
  1967. }
  1968. float * llama_get_embeddings(llama_context * ctx) {
  1969. ctx->synchronize();
  1970. return ctx->get_embeddings();
  1971. }
  1972. float * llama_get_embeddings_ith(llama_context * ctx, int32_t i) {
  1973. ctx->synchronize();
  1974. return ctx->get_embeddings_ith(i);
  1975. }
  1976. float * llama_get_embeddings_seq(llama_context * ctx, llama_seq_id seq_id) {
  1977. ctx->synchronize();
  1978. return ctx->get_embeddings_seq(seq_id);
  1979. }
  1980. // llama adapter API
  1981. int32_t llama_set_adapter_lora(
  1982. llama_context * ctx,
  1983. llama_adapter_lora * adapter,
  1984. float scale) {
  1985. ctx->set_adapter_lora(adapter, scale);
  1986. return 0;
  1987. }
  1988. int32_t llama_rm_adapter_lora(
  1989. llama_context * ctx,
  1990. llama_adapter_lora * adapter) {
  1991. bool res = ctx->rm_adapter_lora(adapter);
  1992. return res ? 0 : -1;
  1993. }
  1994. void llama_clear_adapter_lora(llama_context * ctx) {
  1995. ctx->clear_adapter_lora();
  1996. }
  1997. int32_t llama_apply_adapter_cvec(
  1998. llama_context * ctx,
  1999. const float * data,
  2000. size_t len,
  2001. int32_t n_embd,
  2002. int32_t il_start,
  2003. int32_t il_end) {
  2004. bool res = ctx->apply_adapter_cvec(data, len, n_embd, il_start, il_end);
  2005. return res ? 0 : -1;
  2006. }
  2007. //
  2008. // memory
  2009. //
  2010. llama_memory_t llama_get_memory(const struct llama_context * ctx) {
  2011. return ctx->get_memory();
  2012. }
  2013. void llama_memory_clear(llama_memory_t mem, bool data) {
  2014. if (!mem) {
  2015. return;
  2016. }
  2017. mem->clear(data);
  2018. }
  2019. bool llama_memory_seq_rm(
  2020. llama_memory_t mem,
  2021. llama_seq_id seq_id,
  2022. llama_pos p0,
  2023. llama_pos p1) {
  2024. if (!mem) {
  2025. return true;
  2026. }
  2027. return mem->seq_rm(seq_id, p0, p1);
  2028. }
  2029. void llama_memory_seq_cp(
  2030. llama_memory_t mem,
  2031. llama_seq_id seq_id_src,
  2032. llama_seq_id seq_id_dst,
  2033. llama_pos p0,
  2034. llama_pos p1) {
  2035. if (!mem) {
  2036. return;
  2037. }
  2038. mem->seq_cp(seq_id_src, seq_id_dst, p0, p1);
  2039. }
  2040. void llama_memory_seq_keep(
  2041. llama_memory_t mem,
  2042. llama_seq_id seq_id) {
  2043. if (!mem) {
  2044. return;
  2045. }
  2046. mem->seq_keep(seq_id);
  2047. }
  2048. void llama_memory_seq_add(
  2049. llama_memory_t mem,
  2050. llama_seq_id seq_id,
  2051. llama_pos p0,
  2052. llama_pos p1,
  2053. llama_pos delta) {
  2054. if (!mem) {
  2055. return;
  2056. }
  2057. mem->seq_add(seq_id, p0, p1, delta);
  2058. }
  2059. void llama_memory_seq_div(
  2060. llama_memory_t mem,
  2061. llama_seq_id seq_id,
  2062. llama_pos p0,
  2063. llama_pos p1,
  2064. int d) {
  2065. if (!mem) {
  2066. return;
  2067. }
  2068. mem->seq_div(seq_id, p0, p1, d);
  2069. }
  2070. llama_pos llama_memory_seq_pos_min(
  2071. llama_memory_t mem,
  2072. llama_seq_id seq_id) {
  2073. if (!mem) {
  2074. return -1;
  2075. }
  2076. return mem->seq_pos_min(seq_id);
  2077. }
  2078. llama_pos llama_memory_seq_pos_max(
  2079. llama_memory_t mem,
  2080. llama_seq_id seq_id) {
  2081. if (!mem) {
  2082. return -1;
  2083. }
  2084. return mem->seq_pos_max(seq_id);
  2085. }
  2086. bool llama_memory_can_shift(llama_memory_t mem) {
  2087. if (!mem) {
  2088. return false;
  2089. }
  2090. return mem->get_can_shift();
  2091. }
  2092. // llama state API
  2093. // deprecated
  2094. size_t llama_get_state_size(llama_context * ctx) {
  2095. return llama_state_get_size(ctx);
  2096. }
  2097. // deprecated
  2098. size_t llama_copy_state_data(llama_context * ctx, uint8_t * dst) {
  2099. return llama_state_get_data(ctx, dst, -1);
  2100. }
  2101. // deprecated
  2102. size_t llama_set_state_data(llama_context * ctx, const uint8_t * src) {
  2103. return llama_state_set_data(ctx, src, -1);
  2104. }
  2105. // deprecated
  2106. bool llama_load_session_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  2107. return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  2108. }
  2109. // deprecated
  2110. bool llama_save_session_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  2111. return llama_state_save_file(ctx, path_session, tokens, n_token_count);
  2112. }
  2113. // Returns the *actual* size of the state.
  2114. // Intended to be used when saving to state to a buffer.
  2115. size_t llama_state_get_size(llama_context * ctx) {
  2116. return ctx->state_get_size();
  2117. }
  2118. size_t llama_state_get_data(llama_context * ctx, uint8_t * dst, size_t size) {
  2119. ctx->synchronize();
  2120. return ctx->state_get_data(dst, size);
  2121. }
  2122. // Sets the state reading from the specified source address
  2123. size_t llama_state_set_data(llama_context * ctx, const uint8_t * src, size_t size) {
  2124. ctx->synchronize();
  2125. return ctx->state_set_data(src, size);
  2126. }
  2127. bool llama_state_load_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  2128. ctx->synchronize();
  2129. try {
  2130. return ctx->state_load_file(path_session, tokens_out, n_token_capacity, n_token_count_out);
  2131. } catch (const std::exception & err) {
  2132. LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what());
  2133. return false;
  2134. }
  2135. }
  2136. bool llama_state_save_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  2137. ctx->synchronize();
  2138. try {
  2139. return ctx->state_save_file(path_session, tokens, n_token_count);
  2140. } catch (const std::exception & err) {
  2141. LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what());
  2142. return false;
  2143. }
  2144. }
  2145. size_t llama_state_seq_get_size(llama_context * ctx, llama_seq_id seq_id) {
  2146. return llama_state_seq_get_size_ext(ctx, seq_id, 0);
  2147. }
  2148. size_t llama_state_seq_get_data(llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) {
  2149. return llama_state_seq_get_data_ext(ctx, dst, size, seq_id, 0);
  2150. }
  2151. size_t llama_state_seq_set_data(llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id seq_id) {
  2152. return llama_state_seq_set_data_ext(ctx, src, size, seq_id, 0);
  2153. }
  2154. size_t llama_state_seq_get_size_ext(llama_context * ctx, llama_seq_id seq_id, llama_state_seq_flags flags) {
  2155. return ctx->state_seq_get_size(seq_id, flags);
  2156. }
  2157. size_t llama_state_seq_get_data_ext(llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id, llama_state_seq_flags flags) {
  2158. ctx->synchronize();
  2159. return ctx->state_seq_get_data(seq_id, dst, size, flags);
  2160. }
  2161. size_t llama_state_seq_set_data_ext(llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id seq_id, llama_state_seq_flags flags) {
  2162. ctx->synchronize();
  2163. return ctx->state_seq_set_data(seq_id, src, size, flags);
  2164. }
  2165. size_t llama_state_seq_save_file(llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
  2166. ctx->synchronize();
  2167. try {
  2168. return ctx->state_seq_save_file(seq_id, filepath, tokens, n_token_count);
  2169. } catch (const std::exception & err) {
  2170. LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what());
  2171. return 0;
  2172. }
  2173. }
  2174. size_t llama_state_seq_load_file(llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  2175. ctx->synchronize();
  2176. try {
  2177. return ctx->state_seq_load_file(dest_seq_id, filepath, tokens_out, n_token_capacity, n_token_count_out);
  2178. } catch (const std::exception & err) {
  2179. LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what());
  2180. return 0;
  2181. }
  2182. }
  2183. ///
  2184. int32_t llama_encode(
  2185. llama_context * ctx,
  2186. llama_batch batch) {
  2187. const int ret = ctx->encode(batch);
  2188. if (ret != 0) {
  2189. LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret);
  2190. }
  2191. return ret;
  2192. }
  2193. int32_t llama_decode(
  2194. llama_context * ctx,
  2195. llama_batch batch) {
  2196. const int ret = ctx->decode(batch);
  2197. if (ret != 0 && ret != 1) {
  2198. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  2199. }
  2200. return ret;
  2201. }
  2202. //
  2203. // perf
  2204. //
  2205. llama_perf_context_data llama_perf_context(const llama_context * ctx) {
  2206. llama_perf_context_data data = {};
  2207. if (ctx == nullptr) {
  2208. return data;
  2209. }
  2210. data = ctx->perf_get_data();
  2211. return data;
  2212. }
  2213. void llama_perf_context_print(const llama_context * ctx) {
  2214. const auto data = llama_perf_context(ctx);
  2215. const double t_end_ms = 1e-3 * ggml_time_us();
  2216. LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, data.t_load_ms);
  2217. LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  2218. __func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval);
  2219. LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  2220. __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval);
  2221. LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval));
  2222. LLAMA_LOG_INFO("%s: graphs reused = %10d\n", __func__, data.n_reused);
  2223. }
  2224. void llama_perf_context_reset(llama_context * ctx) {
  2225. ctx->perf_reset();
  2226. }
  2227. void llama_memory_breakdown_print(const struct llama_context * ctx) {
  2228. const std::vector<ggml_backend_dev_t> & devices = ctx->get_model().devices;
  2229. std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> memory_breakdown = ctx->memory_breakdown();
  2230. std::vector<std::array<std::string, 9>> table_data;
  2231. table_data.reserve(devices.size());
  2232. const std::string template_header = "%s: | %s | %s %s %s %s %s %s %s |\n";
  2233. const std::string template_gpu = "%s: | %s | %s = %s + (%s = %s + %s + %s) + %s |\n";
  2234. const std::string template_other = "%s: | %s | %s %s %s = %s + %s + %s %s |\n";
  2235. table_data.push_back({template_header, "memory breakdown [MiB]", "total", "free", "self", "model", "context", "compute", "unaccounted"});
  2236. constexpr size_t MiB = 1024 * 1024;
  2237. const std::vector<std::string> desc_prefixes_strip = {"NVIDIA ", "GeForce ", "Tesla ", "AMD ", "Radeon ", "Instinct "};
  2238. // track seen buffer types to avoid double counting:
  2239. std::set<ggml_backend_buffer_type_t> seen_buffer_types;
  2240. // accumulative memory breakdown for each device and for host:
  2241. std::vector<llama_memory_breakdown_data> mb_dev(devices.size());
  2242. llama_memory_breakdown_data mb_host;
  2243. for (const auto & buft_mb : memory_breakdown) {
  2244. ggml_backend_buffer_type_t buft = buft_mb.first;
  2245. const llama_memory_breakdown_data & mb = buft_mb.second;
  2246. if (ggml_backend_buft_is_host(buft)) {
  2247. mb_host.model += mb.model;
  2248. mb_host.context += mb.context;
  2249. mb_host.compute += mb.compute;
  2250. seen_buffer_types.insert(buft);
  2251. continue;
  2252. }
  2253. ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
  2254. if (dev) {
  2255. int i_dev = -1;
  2256. for (size_t i = 0; i < devices.size(); i++) {
  2257. if (devices[i] == dev) {
  2258. i_dev = i;
  2259. break;
  2260. }
  2261. }
  2262. if (i_dev != -1) {
  2263. mb_dev[i_dev].model += mb.model;
  2264. mb_dev[i_dev].context += mb.context;
  2265. mb_dev[i_dev].compute += mb.compute;
  2266. seen_buffer_types.insert(buft);
  2267. continue;
  2268. }
  2269. }
  2270. }
  2271. // print memory breakdown for each device:
  2272. for (size_t i = 0; i < devices.size(); i++) {
  2273. ggml_backend_dev_t dev = devices[i];
  2274. llama_memory_breakdown_data mb = mb_dev[i];
  2275. const std::string name = ggml_backend_dev_name(dev);
  2276. std::string desc = ggml_backend_dev_description(dev);
  2277. for (const std::string & prefix : desc_prefixes_strip) {
  2278. if (desc.length() >= prefix.length() && desc.substr(0, prefix.length()) == prefix) {
  2279. desc = desc.substr(prefix.length());
  2280. }
  2281. }
  2282. size_t free, total;
  2283. ggml_backend_dev_memory(dev, &free, &total);
  2284. const size_t self = mb.model + mb.context + mb.compute;
  2285. const size_t unaccounted = total - self - free;
  2286. table_data.push_back({
  2287. template_gpu,
  2288. " - " + name + " (" + desc + ")",
  2289. std::to_string(total / MiB),
  2290. std::to_string(free / MiB),
  2291. std::to_string(self / MiB),
  2292. std::to_string(mb.model / MiB),
  2293. std::to_string(mb.context / MiB),
  2294. std::to_string(mb.compute / MiB),
  2295. std::to_string(unaccounted / MiB)});
  2296. }
  2297. // print memory breakdown for host:
  2298. {
  2299. const size_t self = mb_host.model + mb_host.context + mb_host.compute;
  2300. table_data.push_back({
  2301. template_other,
  2302. " - Host",
  2303. "", // total
  2304. "", // free
  2305. std::to_string(self / MiB),
  2306. std::to_string(mb_host.model / MiB),
  2307. std::to_string(mb_host.context / MiB),
  2308. std::to_string(mb_host.compute / MiB),
  2309. ""}); // unaccounted
  2310. }
  2311. // print memory breakdown for all remaining buffer types:
  2312. for (const auto & buft_mb : memory_breakdown) {
  2313. ggml_backend_buffer_type_t buft = buft_mb.first;
  2314. const llama_memory_breakdown_data & mb = buft_mb.second;
  2315. if (seen_buffer_types.count(buft) == 1) {
  2316. continue;
  2317. }
  2318. const std::string name = ggml_backend_buft_name(buft);
  2319. const size_t self = mb.model + mb.context + mb.compute;
  2320. table_data.push_back({
  2321. template_other,
  2322. " - " + name,
  2323. "", // total
  2324. "", // free
  2325. std::to_string(self / MiB),
  2326. std::to_string(mb.model / MiB),
  2327. std::to_string(mb.context / MiB),
  2328. std::to_string(mb.compute / MiB),
  2329. ""}); // unaccounted
  2330. seen_buffer_types.insert(buft);
  2331. }
  2332. for (size_t j = 1; j < table_data[0].size(); j++) {
  2333. size_t max_len = 0;
  2334. for (const auto & td : table_data) {
  2335. max_len = std::max(max_len, td[j].length());
  2336. }
  2337. for (auto & td : table_data) {
  2338. td[j].insert(j == 1 ? td[j].length() : 0, max_len - td[j].length(), ' ');
  2339. }
  2340. }
  2341. for (const auto & td : table_data) {
  2342. LLAMA_LOG_INFO(td[0].c_str(),
  2343. __func__, td[1].c_str(), td[2].c_str(), td[3].c_str(), td[4].c_str(), td[5].c_str(),
  2344. td[6].c_str(), td[7].c_str(), td[8].c_str());
  2345. }
  2346. }
  2347. //
  2348. // training
  2349. //
  2350. bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata) {
  2351. GGML_UNUSED(tensor);
  2352. GGML_UNUSED(userdata);
  2353. return true;
  2354. }
  2355. void llama_opt_init(struct llama_context * ctx, struct llama_model * model, struct llama_opt_params lopt_params) {
  2356. ctx->opt_init(model, lopt_params);
  2357. }
  2358. void llama_opt_epoch(
  2359. struct llama_context * ctx,
  2360. ggml_opt_dataset_t dataset,
  2361. ggml_opt_result_t result_train,
  2362. ggml_opt_result_t result_eval,
  2363. int64_t idata_split,
  2364. ggml_opt_epoch_callback callback_train,
  2365. ggml_opt_epoch_callback callback_eval) {
  2366. ctx->opt_epoch(
  2367. dataset,
  2368. result_train,
  2369. result_eval,
  2370. idata_split,
  2371. callback_train,
  2372. callback_eval);
  2373. }