| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749 |
- #include "llama-context.h"
- #include "llama-impl.h"
- #include "llama-io.h"
- #include "llama-mmap.h"
- #include "llama-model.h"
- #include "llama-kv-cache.h"
- #include <cinttypes>
- #include <cstring>
- #include <limits>
- #include <stdexcept>
- //
- // llama_context
- //
- llama_context::llama_context(
- const llama_model & model,
- llama_context_params params) :
- model(model) {
- LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__);
- t_start_us = model.t_start_us;
- t_load_us = model.t_load_us;
- const auto & hparams = model.hparams;
- cparams.n_seq_max = std::max(1u, params.n_seq_max);
- if (cparams.n_seq_max > LLAMA_MAX_PARALLEL_SEQUENCES) {
- throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_PARALLEL_SEQUENCES));
- }
- cparams.n_threads = params.n_threads;
- cparams.n_threads_batch = params.n_threads_batch;
- cparams.yarn_ext_factor = params.yarn_ext_factor;
- cparams.yarn_attn_factor = params.yarn_attn_factor;
- cparams.yarn_beta_fast = params.yarn_beta_fast;
- cparams.yarn_beta_slow = params.yarn_beta_slow;
- cparams.defrag_thold = params.defrag_thold;
- cparams.embeddings = params.embeddings;
- cparams.offload_kqv = params.offload_kqv;
- cparams.flash_attn = params.flash_attn;
- cparams.no_perf = params.no_perf;
- cparams.pooling_type = params.pooling_type;
- cparams.warmup = false;
- cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
- cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
- cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
- cparams.n_ctx_orig_yarn = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
- hparams.n_ctx_orig_yarn != 0 ? hparams.n_ctx_orig_yarn :
- hparams.n_ctx_train;
- cparams.cb_eval = params.cb_eval;
- cparams.cb_eval_user_data = params.cb_eval_user_data;
- auto rope_scaling_type = params.rope_scaling_type;
- if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) {
- rope_scaling_type = hparams.rope_scaling_type_train;
- }
- if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) {
- cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
- }
- if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
- cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f;
- }
- cparams.yarn_attn_factor *= hparams.rope_attn_factor;
- if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
- if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
- cparams.pooling_type = LLAMA_POOLING_TYPE_NONE;
- } else {
- cparams.pooling_type = hparams.pooling_type;
- }
- }
- if (params.attention_type == LLAMA_ATTENTION_TYPE_UNSPECIFIED) {
- cparams.causal_attn = hparams.causal_attn;
- } else {
- cparams.causal_attn = params.attention_type == LLAMA_ATTENTION_TYPE_CAUSAL;
- }
- // with causal attention, the batch size is limited by the context size
- cparams.n_batch = cparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
- // the batch has to be at least GGML_KQ_MASK_PAD because we will be padding the KQ_mask
- // this is required by GPU kernels in order to avoid out-of-bounds accesses (e.g. ggml_flash_attn_ext)
- // ref: https://github.com/ggerganov/llama.cpp/pull/5021
- // TODO: this padding is not needed for the cache-less context so we should probably move it to llama_context_kv_self
- if (cparams.n_batch < GGML_KQ_MASK_PAD) {
- LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD);
- cparams.n_batch = GGML_KQ_MASK_PAD;
- }
- cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
- cparams.op_offload = params.op_offload;
- const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max;
- LLAMA_LOG_INFO("%s: n_seq_max = %u\n", __func__, cparams.n_seq_max);
- LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
- LLAMA_LOG_INFO("%s: n_ctx_per_seq = %u\n", __func__, n_ctx_per_seq);
- LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch);
- LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch);
- LLAMA_LOG_INFO("%s: causal_attn = %d\n", __func__, cparams.causal_attn);
- LLAMA_LOG_INFO("%s: flash_attn = %d\n", __func__, cparams.flash_attn);
- LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
- LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
- if (n_ctx_per_seq < hparams.n_ctx_train) {
- LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) < n_ctx_train (%u) -- the full capacity of the model will not be utilized\n",
- __func__, n_ctx_per_seq, hparams.n_ctx_train);
- }
- if (n_ctx_per_seq > hparams.n_ctx_train) {
- LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) > n_ctx_train (%u) -- possible training context overflow\n",
- __func__, n_ctx_per_seq, hparams.n_ctx_train);
- }
- if (!params.swa_full && cparams.n_seq_max > 1) {
- LLAMA_LOG_WARN("%s: requested n_seq_max (%u) > 1, but swa_full is not enabled -- performance may be degraded: %s\n",
- __func__, cparams.n_seq_max, "https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573");
- }
- if (!hparams.vocab_only) {
- // GPU backends
- for (auto * dev : model.devices) {
- ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
- if (backend == nullptr) {
- throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev)));
- }
- backends.emplace_back(backend);
- }
- // add ACCEL backends (such as BLAS)
- for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
- ggml_backend_dev_t dev = ggml_backend_dev_get(i);
- if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
- ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
- if (backend == nullptr) {
- throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev)));
- }
- backends.emplace_back(backend);
- }
- }
- // add CPU backend
- backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
- if (backend_cpu == nullptr) {
- throw std::runtime_error("failed to initialize CPU backend");
- }
- backends.emplace_back(backend_cpu);
- // create a list of the set_n_threads functions in the backends
- for (auto & backend : backends) {
- ggml_backend_dev_t dev = ggml_backend_get_device(backend.get());
- ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
- if (reg) {
- auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
- if (ggml_backend_set_n_threads_fn) {
- set_n_threads_fns.emplace_back(backend.get(), ggml_backend_set_n_threads_fn);
- }
- }
- }
- llama_set_abort_callback(this, params.abort_callback, params.abort_callback_data);
- // graph outputs buffer
- {
- // resized during inference when a batch uses more outputs
- if ((uint32_t) output_reserve(params.n_seq_max) < params.n_seq_max) {
- throw std::runtime_error("failed to reserve initial output buffer");
- }
- LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__,
- ggml_backend_buffer_name (buf_output.get()),
- ggml_backend_buffer_get_size(buf_output.get()) / 1024.0 / 1024.0);
- }
- }
- // init the memory module
- if (!hparams.vocab_only) {
- llama_memory_params params_mem = {
- /*.type_k =*/ params.type_k,
- /*.type_v =*/ params.type_v,
- /*.swa_full =*/ params.swa_full,
- };
- memory.reset(model.create_memory(params_mem, cparams));
- }
- // init backends
- if (!hparams.vocab_only) {
- LLAMA_LOG_DEBUG("%s: enumerating backends\n", __func__);
- backend_buft.clear();
- backend_ptrs.clear();
- for (auto & backend : backends) {
- auto * buft = ggml_backend_get_default_buffer_type(backend.get());
- auto backend_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get()));
- if (backend_type == GGML_BACKEND_DEVICE_TYPE_CPU && !model.devices.empty()) {
- // use the host buffer of the first device CPU for faster transfer of the intermediate state
- auto * dev = model.devices[0];
- auto * host_buft = ggml_backend_dev_host_buffer_type(dev);
- if (host_buft) {
- buft = host_buft;
- }
- }
- backend_buft.push_back(buft);
- backend_ptrs.push_back(backend.get());
- }
- LLAMA_LOG_DEBUG("%s: backend_ptrs.size() = %zu\n", __func__, backend_ptrs.size());
- const size_t max_nodes = this->graph_max_nodes();
- LLAMA_LOG_DEBUG("%s: max_nodes = %zu\n", __func__, max_nodes);
- // buffer used to store the computation graph and the tensor meta data
- buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false));
- // TODO: move these checks to ggml_backend_sched
- // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary
- bool pipeline_parallel =
- model.n_devices() > 1 &&
- model.params.n_gpu_layers > (int) model.hparams.n_layer &&
- model.params.split_mode == LLAMA_SPLIT_MODE_LAYER &&
- cparams.offload_kqv &&
- !model.has_tensor_overrides();
- // pipeline parallelism requires support for async compute and events in all devices
- if (pipeline_parallel) {
- for (auto & backend : backends) {
- auto dev_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get()));
- if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU) {
- // ignore CPU backend
- continue;
- }
- auto * dev = ggml_backend_get_device(backend.get());
- ggml_backend_dev_props props;
- ggml_backend_dev_get_props(dev, &props);
- if (!props.caps.async || !props.caps.events) {
- // device does not support async compute or events
- pipeline_parallel = false;
- break;
- }
- }
- }
- sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, pipeline_parallel, cparams.op_offload));
- if (pipeline_parallel) {
- LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(sched.get()));
- }
- }
- // reserve worst-case graph
- if (!hparams.vocab_only && memory) {
- const uint32_t n_seqs = cparams.n_seq_max;
- const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
- LLAMA_LOG_DEBUG("%s: worst-case: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs);
- int n_splits_pp = -1;
- int n_nodes_pp = -1;
- int n_splits_tg = -1;
- int n_nodes_tg = -1;
- // simulate full KV cache
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- const auto kv_state = kv_self->init_full();
- if (!kv_state) {
- throw std::runtime_error("failed to initialize KV cache");
- }
- cross.v_embd.clear();
- // reserve pp graph first so that buffers are only allocated once
- {
- auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, kv_state.get());
- if (!gf) {
- throw std::runtime_error("failed to allocate compute pp buffers");
- }
- n_splits_pp = ggml_backend_sched_get_n_splits(sched.get());
- n_nodes_pp = ggml_graph_n_nodes(gf);
- }
- // reserve with tg graph to get the number of splits and nodes
- {
- auto * gf = graph_reserve(1, 1, 1, kv_state.get());
- if (!gf) {
- throw std::runtime_error("failed to allocate compute tg buffers");
- }
- n_splits_tg = ggml_backend_sched_get_n_splits(sched.get());
- n_nodes_tg = ggml_graph_n_nodes(gf);
- }
- // reserve again with pp graph to avoid ggml-alloc reallocations during inference
- {
- auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, kv_state.get());
- if (!gf) {
- throw std::runtime_error("failed to allocate compute pp buffers");
- }
- }
- for (size_t i = 0; i < backend_ptrs.size(); ++i) {
- ggml_backend_t backend = backend_ptrs[i];
- ggml_backend_buffer_type_t buft = backend_buft[i];
- size_t size = ggml_backend_sched_get_buffer_size(sched.get(), backend);
- if (size > 1) {
- LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
- ggml_backend_buft_name(buft),
- size / 1024.0 / 1024.0);
- }
- }
- if (n_nodes_pp == n_nodes_tg) {
- LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, n_nodes_pp);
- } else {
- LLAMA_LOG_INFO("%s: graph nodes = %d (with bs=%d), %d (with bs=1)\n", __func__, n_nodes_pp, n_tokens, n_nodes_tg);
- }
- if (n_splits_pp == n_splits_tg) {
- LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits_pp);
- } else {
- LLAMA_LOG_INFO("%s: graph splits = %d (with bs=%d), %d (with bs=1)\n", __func__, n_splits_pp, n_tokens, n_splits_tg);
- }
- }
- }
- llama_context::~llama_context() {
- ggml_opt_free(opt_ctx);
- }
- void llama_context::synchronize() {
- ggml_backend_sched_synchronize(sched.get());
- // FIXME: if multiple single tokens are evaluated without a synchronization,
- // the stats will be added to the prompt evaluation stats
- // this should only happen when using batch size 1 to evaluate a batch
- // add the evaluation to the stats
- if (n_queued_tokens == 1) {
- if (!cparams.no_perf) {
- t_eval_us += ggml_time_us() - t_compute_start_us;
- }
- n_eval++;
- } else if (n_queued_tokens > 1) {
- if (!cparams.no_perf) {
- t_p_eval_us += ggml_time_us() - t_compute_start_us;
- }
- n_p_eval += n_queued_tokens;
- }
- // get a more accurate load time, upon first eval
- if (n_queued_tokens > 0 && !has_evaluated_once) {
- t_load_us = ggml_time_us() - t_start_us;
- has_evaluated_once = true;
- }
- n_queued_tokens = 0;
- t_compute_start_us = 0;
- }
- const llama_model & llama_context::get_model() const {
- return model;
- }
- const llama_cparams & llama_context::get_cparams() const {
- return cparams;
- }
- ggml_backend_sched_t llama_context::get_sched() const {
- return sched.get();
- }
- ggml_context * llama_context::get_ctx_compute() const {
- return ctx_compute.get();
- }
- uint32_t llama_context::n_ctx() const {
- return cparams.n_ctx;
- }
- uint32_t llama_context::n_ctx_per_seq() const {
- return cparams.n_ctx / cparams.n_seq_max;
- }
- uint32_t llama_context::n_batch() const {
- return cparams.n_batch;
- }
- uint32_t llama_context::n_ubatch() const {
- return cparams.n_ubatch;
- }
- uint32_t llama_context::n_seq_max() const {
- return cparams.n_seq_max;
- }
- uint32_t llama_context::n_threads() const {
- return cparams.n_threads;
- }
- uint32_t llama_context::n_threads_batch() const {
- return cparams.n_threads_batch;
- }
- llama_kv_cache * llama_context::get_kv_self() {
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- return kv_self;
- }
- const llama_kv_cache * llama_context::get_kv_self() const {
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- return kv_self;
- }
- bool llama_context::kv_self_update() {
- if (!memory) {
- return false;
- }
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- if (!kv_self->update(*this)) {
- // no updates have been performed
- return false;
- }
- // if the KV cache did any computation, we have to reserve a new worst-case graph
- const auto kv_state = kv_self->init_full();
- if (!kv_state) {
- throw std::runtime_error("failed to initialize KV cache");
- }
- const uint32_t n_seqs = cparams.n_seq_max;
- const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
- auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, kv_state.get());
- if (!gf) {
- LLAMA_LOG_ERROR("%s: failed to reserve graph after the KV cache update\n", __func__);
- }
- return true;
- }
- enum llama_pooling_type llama_context::pooling_type() const {
- return cparams.pooling_type;
- }
- float * llama_context::get_logits() {
- return logits;
- }
- float * llama_context::get_logits_ith(int32_t i) {
- int32_t j = -1;
- try {
- if (logits == nullptr) {
- throw std::runtime_error("no logits");
- }
- if (i < 0) {
- j = n_outputs + i;
- if (j < 0) {
- throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs));
- }
- } else if ((size_t) i >= output_ids.size()) {
- throw std::runtime_error(format("out of range [0, %zu)", output_ids.size()));
- } else {
- j = output_ids[i];
- }
- if (j < 0) {
- throw std::runtime_error(format("batch.logits[%d] != true", i));
- }
- if (j >= n_outputs) {
- // This should not happen
- throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, n_outputs));
- }
- return logits + j*model.vocab.n_tokens();
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what());
- #ifndef NDEBUG
- GGML_ABORT("fatal error");
- #else
- return nullptr;
- #endif
- }
- }
- float * llama_context::get_embeddings() {
- return embd;
- }
- float * llama_context::get_embeddings_ith(int32_t i) {
- int32_t j = -1;
- try {
- if (embd == nullptr) {
- throw std::runtime_error("no embeddings");
- }
- if (i < 0) {
- j = n_outputs + i;
- if (j < 0) {
- throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs));
- }
- } else if ((size_t) i >= output_ids.size()) {
- throw std::runtime_error(format("out of range [0, %zu)", output_ids.size()));
- } else {
- j = output_ids[i];
- }
- if (j < 0) {
- throw std::runtime_error(format("batch.logits[%d] != true", i));
- }
- if (j >= n_outputs) {
- // This should not happen
- throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, n_outputs));
- }
- return embd + j*model.hparams.n_embd;
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what());
- #ifndef NDEBUG
- GGML_ABORT("fatal error");
- #else
- return nullptr;
- #endif
- }
- }
- float * llama_context::get_embeddings_seq(llama_seq_id seq_id) {
- auto it = embd_seq.find(seq_id);
- if (it == embd_seq.end()) {
- return nullptr;
- }
- return it->second.data();
- }
- void llama_context::attach_threadpool(
- ggml_threadpool_t threadpool,
- ggml_threadpool_t threadpool_batch) {
- LLAMA_LOG_DEBUG("%s: call\n", __func__);
- this->threadpool = threadpool;
- this->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool;
- }
- void llama_context::detach_threadpool() {
- LLAMA_LOG_DEBUG("%s: call\n", __func__);
- this->threadpool = nullptr;
- this->threadpool_batch = nullptr;
- }
- void llama_context::set_n_threads(int32_t n_threads, int32_t n_threads_batch) {
- LLAMA_LOG_DEBUG("%s: n_threads = %d, n_threads_batch = %d\n", __func__, n_threads, n_threads_batch);
- cparams.n_threads = n_threads;
- cparams.n_threads_batch = n_threads_batch;
- }
- void llama_context::set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data) {
- LLAMA_LOG_DEBUG("%s: call\n", __func__);
- this->abort_callback = abort_callback;
- this->abort_callback_data = abort_callback_data;
- for (auto & backend : backends) {
- auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get()));
- auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback");
- if (set_abort_callback_fn) {
- set_abort_callback_fn(backend.get(), this->abort_callback, this->abort_callback_data);
- }
- }
- }
- void llama_context::set_embeddings(bool value) {
- LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
- cparams.embeddings = value;
- }
- void llama_context::set_causal_attn(bool value) {
- LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
- cparams.causal_attn = value;
- }
- void llama_context::set_warmup(bool value) {
- LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
- cparams.warmup = value;
- }
- void llama_context::set_adapter_lora(
- llama_adapter_lora * adapter,
- float scale) {
- LLAMA_LOG_DEBUG("%s: adapter = %p, scale = %f\n", __func__, (void *) adapter, scale);
- loras[adapter] = scale;
- }
- bool llama_context::rm_adapter_lora(
- llama_adapter_lora * adapter) {
- LLAMA_LOG_DEBUG("%s: adapter = %p\n", __func__, (void *) adapter);
- auto pos = loras.find(adapter);
- if (pos != loras.end()) {
- loras.erase(pos);
- return true;
- }
- return false;
- }
- void llama_context::clear_adapter_lora() {
- LLAMA_LOG_DEBUG("%s: call\n", __func__);
- loras.clear();
- }
- bool llama_context::apply_adapter_cvec(
- const float * data,
- size_t len,
- int32_t n_embd,
- int32_t il_start,
- int32_t il_end) {
- LLAMA_LOG_DEBUG("%s: il_start = %d, il_end = %d\n", __func__, il_start, il_end);
- return cvec.apply(model, data, len, n_embd, il_start, il_end);
- }
- llm_graph_result_ptr llama_context::process_ubatch(const llama_ubatch & ubatch, llm_graph_type gtype, llama_memory_state_i * mstate, ggml_status & ret) {
- if (mstate && !mstate->apply()) {
- LLAMA_LOG_ERROR("%s: failed to apply memory state\n", __func__);
- ret = GGML_STATUS_FAILED;
- return nullptr;
- }
- auto * gf = graph_init();
- if (!gf) {
- LLAMA_LOG_ERROR("%s: failed to initialize graph\n", __func__);
- ret = GGML_STATUS_FAILED;
- return nullptr;
- }
- auto res = graph_build(ctx_compute.get(), gf, ubatch, gtype, mstate);
- if (!res) {
- LLAMA_LOG_ERROR("%s: failed to build graph\n", __func__);
- ret = GGML_STATUS_FAILED;
- return nullptr;
- }
- // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
- if (!ggml_backend_sched_alloc_graph(sched.get(), gf)) {
- LLAMA_LOG_ERROR("%s: failed to allocate graph\n", __func__);
- ret = GGML_STATUS_ALLOC_FAILED;
- return nullptr;
- }
- res->set_inputs(&ubatch);
- const auto status = graph_compute(gf, ubatch.n_tokens > 1);
- if (status != GGML_STATUS_SUCCESS) {
- LLAMA_LOG_ERROR("%s: failed to compute graph, compute status: %d\n", __func__, status);
- ret = status;
- return nullptr;
- }
- ret = GGML_STATUS_SUCCESS;
- return res;
- }
- int llama_context::encode(llama_batch & inp_batch) {
- if (inp_batch.n_tokens == 0) {
- LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
- return -1;
- }
- // temporary allocate memory for the input batch if needed
- // note: during encode, we always pass the full sequence starting from pos = 0
- llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : 0);
- const llama_batch & batch = batch_allocr.batch;
- const int32_t n_tokens = batch.n_tokens;
- const auto & hparams = model.hparams;
- GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
- // TODO: move the validation to the llama_batch_allocr
- if (batch.token) {
- for (int32_t i = 0; i < n_tokens; ++i) {
- if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) {
- LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]);
- return -1;
- }
- if (batch.seq_id && (batch.seq_id[i][0] < 0 || batch.seq_id[i][0] >= LLAMA_MAX_PARALLEL_SEQUENCES)) {
- LLAMA_LOG_ERROR("%s: invalid seq_id[%d] = %d > %d\n", __func__, i, batch.seq_id[i][0], LLAMA_MAX_PARALLEL_SEQUENCES);
- throw -1;
- }
- }
- }
- // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot
- GGML_ASSERT(cparams.n_ubatch >= (uint32_t) n_tokens && "encoder requires n_ubatch >= n_tokens");
- if (t_compute_start_us == 0) {
- t_compute_start_us = ggml_time_us();
- }
- embd_seq.clear();
- n_queued_tokens += n_tokens;
- const int64_t n_embd = hparams.n_embd;
- llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true, /* logits_all */ true);
- const llama_ubatch ubatch = sbatch.split_simple(n_tokens);
- // reserve output buffer
- if (output_reserve(n_tokens) < n_tokens) {
- LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens);
- return -2;
- };
- for (int32_t i = 0; i < n_tokens; ++i) {
- output_ids[i] = i;
- }
- n_outputs = n_tokens;
- ggml_backend_sched_reset(sched.get());
- ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data);
- const auto causal_attn_org = cparams.causal_attn;
- // always use non-causal attention for encoder graphs
- // TODO: this is a tmp solution until we have a proper way to support enc-dec models
- // ref: https://github.com/ggml-org/llama.cpp/pull/12181#issuecomment-2730451223
- cparams.causal_attn = false;
- ggml_status status;
- const auto res = process_ubatch(ubatch, LLM_GRAPH_TYPE_ENCODER, nullptr, status);
- cparams.causal_attn = causal_attn_org;
- if (!res) {
- switch (status) {
- case GGML_STATUS_ABORTED: return 2;
- case GGML_STATUS_ALLOC_FAILED: return -2;
- case GGML_STATUS_FAILED: return -3;
- case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen");
- }
- }
- auto * t_embd = res->get_embd_pooled() ? res->get_embd_pooled() : res->get_embd();
- // extract embeddings
- if (t_embd) {
- ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd);
- GGML_ASSERT(backend_embd != nullptr);
- switch (cparams.pooling_type) {
- case LLAMA_POOLING_TYPE_NONE:
- {
- // extract token embeddings
- GGML_ASSERT(embd != nullptr);
- GGML_ASSERT(n_tokens*n_embd <= (int64_t) embd_size);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd, 0, n_tokens*n_embd*sizeof(float));
- } break;
- case LLAMA_POOLING_TYPE_MEAN:
- case LLAMA_POOLING_TYPE_CLS:
- case LLAMA_POOLING_TYPE_LAST:
- {
- // extract sequence embeddings
- auto & embd_seq_out = embd_seq;
- embd_seq_out.clear();
- GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits
- for (int32_t i = 0; i < n_tokens; i++) {
- const llama_seq_id seq_id = ubatch.seq_id[i][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
- embd_seq_out[seq_id].resize(n_embd);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_RANK:
- {
- // extract the rerank score - a single float per sequence
- auto & embd_seq_out = embd_seq;
- for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
- embd_seq_out[seq_id].resize(1);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_UNSPECIFIED:
- {
- GGML_ABORT("unknown pooling type");
- }
- }
- }
- // Reset state for the next token before backend sync, to allow the CPU activities in the reset to
- // overlap with device computation.
- ggml_backend_sched_reset(sched.get());
- // TODO: hacky solution
- if (model.arch == LLM_ARCH_T5 && t_embd) {
- //cross.t_embd = t_embd;
- synchronize();
- cross.n_embd = t_embd->ne[0];
- cross.n_enc = t_embd->ne[1];
- cross.v_embd.resize(cross.n_embd*cross.n_enc);
- memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd));
- // remember the sequence ids used during the encoding - needed for cross attention later
- cross.seq_ids_enc.resize(n_tokens);
- for (int32_t i = 0; i < n_tokens; i++) {
- cross.seq_ids_enc[i].clear();
- for (int s = 0; s < ubatch.n_seq_id[i]; s++) {
- llama_seq_id seq_id = ubatch.seq_id[i][s];
- cross.seq_ids_enc[i].insert(seq_id);
- }
- }
- }
- return 0;
- }
- int llama_context::decode(llama_batch & inp_batch) {
- if (!memory) {
- LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__);
- return encode(inp_batch);
- }
- if (inp_batch.n_tokens == 0) {
- LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
- return -1;
- }
- if (!inp_batch.pos) {
- if (inp_batch.seq_id) {
- LLAMA_LOG_ERROR("%s: pos == NULL, but seq_id != NULL\n", __func__);
- return -1;
- }
- }
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- // temporary allocate memory for the input batch if needed
- llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : kv_self->seq_pos_max(0) + 1);
- const llama_batch & batch = batch_allocr.batch;
- const auto & vocab = model.vocab;
- const auto & hparams = model.hparams;
- const int32_t n_vocab = vocab.n_tokens();
- const int64_t n_tokens_all = batch.n_tokens;
- const int64_t n_embd = hparams.n_embd;
- GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
- // TODO: move the validation to the llama_batch_allocr
- if (batch.token) {
- for (int64_t i = 0; i < n_tokens_all; ++i) {
- if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) {
- LLAMA_LOG_ERROR("%s: invalid token[%" PRId64 "] = %d\n", __func__, i, batch.token[i]);
- return -1;
- }
- if (batch.seq_id && (batch.seq_id[i][0] < 0 || batch.seq_id[i][0] >= LLAMA_MAX_PARALLEL_SEQUENCES)) {
- LLAMA_LOG_ERROR("%s: invalid seq_id[%" PRId64 "] = %d >= %d\n", __func__, i, batch.seq_id[i][0], LLAMA_MAX_PARALLEL_SEQUENCES);
- return -1;
- }
- }
- }
- GGML_ASSERT(n_tokens_all <= cparams.n_batch);
- GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
- if (t_compute_start_us == 0) {
- t_compute_start_us = ggml_time_us();
- }
- n_queued_tokens += n_tokens_all;
- // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens
- const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE;
- embd_seq.clear();
- int64_t n_outputs_all = 0;
- // count outputs
- if (batch.logits && !embd_pooled) {
- for (uint32_t i = 0; i < n_tokens_all; ++i) {
- n_outputs_all += batch.logits[i] != 0;
- }
- } else if (embd_pooled) {
- n_outputs_all = n_tokens_all;
- } else {
- // keep last output only
- n_outputs_all = 1;
- }
- // handle any pending defrags/shifts
- kv_self_update();
- llama_memory_state_ptr kv_state;
- bool did_defrag = false;
- while (true) {
- kv_state = kv_self->init_batch(batch, cparams.n_ubatch, embd_pooled, /* logits_all */ n_outputs_all == n_tokens_all);
- if (!kv_state) {
- return -2;
- }
- switch (kv_state->get_status()) {
- case LLAMA_MEMORY_STATUS_SUCCESS:
- {
- } break;
- case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
- {
- if (!did_defrag) {
- did_defrag = true;
- kv_self->defrag_sched(-1.0f);
- if (kv_self_update()) {
- LLAMA_LOG_DEBUG("%s: failed to init batch of size %d, retrying after defrag\n", __func__, batch.n_tokens);
- continue;
- }
- }
- LLAMA_LOG_WARN("%s: failed to find KV cache slot for batch of size %d\n", __func__, batch.n_tokens);
- return 1;
- }
- case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
- {
- return -2;
- }
- }
- break;
- }
- // reserve output buffer
- if (output_reserve(n_outputs_all) < n_outputs_all) {
- LLAMA_LOG_ERROR("%s: could not reserve space for batch with %" PRId64 " outputs\n", __func__, n_outputs_all);
- return -2;
- };
- int64_t n_outputs_prev = 0;
- do {
- const auto & ubatch = kv_state->get_ubatch();
- // count the outputs in this u_batch
- {
- int32_t n_outputs_new = 0;
- if (n_outputs_all == n_tokens_all) {
- n_outputs_new = ubatch.n_tokens;
- } else {
- GGML_ASSERT(ubatch.output);
- for (uint32_t i = 0; i < ubatch.n_tokens; i++) {
- n_outputs_new += (int32_t) (ubatch.output[i] != 0);
- }
- }
- // needs to happen before the graph is built
- n_outputs = n_outputs_new;
- }
- ggml_backend_sched_reset(sched.get());
- ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data);
- ggml_status status;
- const auto res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, kv_state.get(), status);
- if (!res) {
- // the last ubatch failed or was aborted -> remove all positions of that ubatch from the KV cache
- llama_pos pos_min[LLAMA_MAX_PARALLEL_SEQUENCES] = { std::numeric_limits<llama_pos>::max() };
- for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
- const auto & seq_id = ubatch.seq_id[i][0];
- pos_min[seq_id] = std::min(pos_min[seq_id], ubatch.pos[i]);
- }
- for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) {
- if (pos_min[s] == std::numeric_limits<llama_pos>::max()) {
- continue;
- }
- LLAMA_LOG_WARN("%s: removing KV cache entries for seq_id = %d, pos = [%d, +inf)\n", __func__, s, pos_min[s]);
- llama_kv_self_seq_rm(this, s, pos_min[s], -1);
- }
- switch (status) {
- case GGML_STATUS_ABORTED: return 2;
- case GGML_STATUS_ALLOC_FAILED: return -2;
- case GGML_STATUS_FAILED: return -3;
- case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen");
- }
- }
- // plot the computation graph in dot format (for debugging purposes)
- //if (n_past%100 == 0) {
- // ggml_graph_dump_dot(gf, NULL, "llama.dot");
- //}
- auto * t_logits = cparams.embeddings ? nullptr : res->get_logits();
- auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr;
- if (t_embd && res->get_embd_pooled()) {
- t_embd = res->get_embd_pooled();
- }
- // extract logits
- if (t_logits && n_outputs > 0) {
- ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits);
- GGML_ASSERT(backend_res != nullptr);
- GGML_ASSERT(logits != nullptr);
- float * logits_out = logits + n_outputs_prev*n_vocab;
- if (n_outputs) {
- GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all);
- GGML_ASSERT((n_outputs_prev + n_outputs)*n_vocab <= (int64_t) logits_size);
- ggml_backend_tensor_get_async(backend_res, t_logits, logits_out, 0, n_outputs*n_vocab*sizeof(float));
- }
- }
- // extract embeddings
- if (t_embd && n_outputs > 0) {
- ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd);
- GGML_ASSERT(backend_embd != nullptr);
- switch (cparams.pooling_type) {
- case LLAMA_POOLING_TYPE_NONE:
- {
- // extract token embeddings
- GGML_ASSERT(embd != nullptr);
- float * embd_out = embd + n_outputs_prev*n_embd;
- if (n_outputs) {
- GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all);
- GGML_ASSERT((n_outputs_prev + n_outputs)*n_embd <= (int64_t) embd_size);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd_out, 0, n_outputs*n_embd*sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_MEAN:
- case LLAMA_POOLING_TYPE_CLS:
- case LLAMA_POOLING_TYPE_LAST:
- {
- // extract sequence embeddings (cleared before processing each batch)
- auto & embd_seq_out = embd_seq;
- for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
- embd_seq_out[seq_id].resize(n_embd);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_RANK:
- {
- // extract the rerank score - a single float per sequence
- auto & embd_seq_out = embd_seq;
- for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
- embd_seq_out[seq_id].resize(1);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_UNSPECIFIED:
- {
- GGML_ABORT("unknown pooling type");
- }
- }
- }
- n_outputs_prev += n_outputs;
- } while (kv_state->next());
- // set to total number of outputs in the batch, for use in llama_get_logits_ith
- n_outputs = n_outputs_all;
- // set output mappings
- {
- bool sorted_output = true;
- auto & out_ids = kv_state->out_ids();
- GGML_ASSERT(out_ids.size() == (size_t) n_outputs_all);
- for (int64_t i = 0; i < n_outputs_all; ++i) {
- int64_t out_id = out_ids[i];
- output_ids[out_id] = i;
- if (out_id != i) {
- sorted_output = false;
- }
- }
- // make the outputs have the same order they had in the user-provided batch
- // note: this is mostly relevant for recurrent models atm
- if (!sorted_output) {
- const uint32_t n_vocab = model.vocab.n_tokens();
- const uint32_t n_embd = model.hparams.n_embd;
- GGML_ASSERT((size_t) n_outputs == out_ids.size());
- // TODO: is there something more efficient which also minimizes swaps?
- // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort)
- for (int32_t i = 0; i < n_outputs - 1; ++i) {
- int32_t j_min = i;
- for (int32_t j = i + 1; j < n_outputs; ++j) {
- if (out_ids[j] < out_ids[j_min]) {
- j_min = j;
- }
- }
- if (j_min == i) { continue; }
- std::swap(out_ids[i], out_ids[j_min]);
- if (logits_size > 0) {
- for (uint32_t k = 0; k < n_vocab; k++) {
- std::swap(logits[i*n_vocab + k], logits[j_min*n_vocab + k]);
- }
- }
- if (embd_size > 0) {
- for (uint32_t k = 0; k < n_embd; k++) {
- std::swap(embd[i*n_embd + k], embd[j_min*n_embd + k]);
- }
- }
- }
- std::fill(output_ids.begin(), output_ids.end(), -1);
- for (int32_t i = 0; i < n_outputs; ++i) {
- output_ids[out_ids[i]] = i;
- }
- }
- }
- // wait for the computation to finish (automatically done when obtaining the model output)
- //synchronize();
- // decide if we need to defrag the kv cache
- if (cparams.defrag_thold > 0.0f) {
- kv_self->defrag_sched(cparams.defrag_thold);
- }
- // Reset state for the next token before backend sync, to allow the CPU activities in the reset to
- // overlap with device computation.
- ggml_backend_sched_reset(sched.get());
- return 0;
- }
- //
- // output
- //
- int32_t llama_context::output_reserve(int32_t n_outputs) {
- const auto & hparams = model.hparams;
- const auto & vocab = model.vocab;
- const int64_t n_outputs_max = std::max<int64_t>(n_outputs, n_seq_max());
- const auto n_batch = cparams.n_batch;
- const auto n_vocab = vocab.n_tokens();
- const auto n_embd = hparams.n_embd;
- // TODO: use a per-batch flag for logits presence instead
- bool has_logits = !cparams.embeddings;
- bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE);
- // TODO: hacky enc-dec support
- if (model.arch == LLM_ARCH_T5) {
- has_logits = true;
- has_embd = true;
- }
- logits_size = has_logits ? n_vocab*n_outputs_max : 0;
- embd_size = has_embd ? n_embd*n_outputs_max : 0;
- if (output_ids.empty()) {
- // init, never resized afterwards
- output_ids.resize(n_batch);
- }
- const size_t prev_size = buf_output ? ggml_backend_buffer_get_size(buf_output.get()) : 0;
- const size_t new_size = (logits_size + embd_size) * sizeof(float);
- // alloc only when more than the current capacity is required
- // TODO: also consider shrinking the buffer
- if (!buf_output || prev_size < new_size) {
- if (buf_output) {
- #ifndef NDEBUG
- // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark)
- LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
- #endif
- buf_output = nullptr;
- logits = nullptr;
- embd = nullptr;
- }
- auto * buft = ggml_backend_cpu_buffer_type();
- // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory
- auto * output_dev = model.dev_output();
- auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr;
- if (output_dev_host_buft) {
- buft = output_dev_host_buft;
- }
- buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size));
- if (buf_output == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0));
- return 0;
- }
- }
- float * output_base = (float *) ggml_backend_buffer_get_base(buf_output.get());
- logits = has_logits ? output_base : nullptr;
- embd = has_embd ? output_base + logits_size : nullptr;
- // set all ids as invalid (negative)
- std::fill(output_ids.begin(), output_ids.end(), -1);
- this->n_outputs = 0;
- this->n_outputs_max = n_outputs_max;
- return n_outputs_max;
- }
- //
- // graph
- //
- int32_t llama_context::graph_max_nodes() const {
- return std::max<int32_t>(65536, 5*model.n_tensors());
- }
- ggml_cgraph * llama_context::graph_init() {
- ggml_init_params params = {
- /*.mem_size =*/ buf_compute_meta.size(),
- /*.mem_buffer =*/ buf_compute_meta.data(),
- /*.no_alloc =*/ true,
- };
- ctx_compute.reset(ggml_init(params));
- return ggml_new_graph_custom(ctx_compute.get(), graph_max_nodes(), false);
- }
- ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_state_i * mstate) {
- LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs);
- if (n_tokens % n_seqs != 0) {
- n_tokens = (n_tokens / n_seqs) * n_seqs;
- n_outputs = std::min(n_outputs, n_tokens);
- LLAMA_LOG_DEBUG("%s: making n_tokens a multiple of n_seqs - n_tokens = %u, n_seqs = %u, n_outputs = %u\n", __func__, n_tokens, n_seqs, n_outputs);
- }
- // store the n_outputs as it is, and restore it afterwards
- // TODO: not sure if needed, might simplify in the future by removing this
- const auto save_n_outputs = this->n_outputs;
- this->n_outputs = n_outputs;
- llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
- llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr};
- auto * gf = graph_init();
- auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate);
- this->n_outputs = save_n_outputs;
- if (!res) {
- LLAMA_LOG_ERROR("%s: failed to build worst-case graph\n", __func__);
- return nullptr;
- }
- ggml_backend_sched_reset(sched.get());
- // initialize scheduler with the specified graph
- if (!ggml_backend_sched_reserve(sched.get(), gf)) {
- LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
- return nullptr;
- }
- return gf;
- }
- llm_graph_result_ptr llama_context::graph_build(
- ggml_context * ctx,
- ggml_cgraph * gf,
- const llama_ubatch & ubatch,
- llm_graph_type gtype,
- const llama_memory_state_i * mstate) {
- return model.build_graph(
- {
- /*.ctx =*/ ctx,
- /*.arch =*/ model.arch,
- /*.hparams =*/ model.hparams,
- /*.cparams =*/ cparams,
- /*.ubatch =*/ ubatch,
- /*.sched =*/ sched.get(),
- /*.backend_cpu =*/ backend_cpu,
- /*.cvec =*/ &cvec,
- /*.loras =*/ &loras,
- /*.mstate =*/ mstate,
- /*.cross =*/ &cross,
- /*.n_outputs =*/ n_outputs,
- /*.cb =*/ graph_get_cb(),
- }, gf, gtype);
- }
- ggml_status llama_context::graph_compute(
- ggml_cgraph * gf,
- bool batched) {
- int n_threads = batched ? cparams.n_threads_batch : cparams.n_threads;
- ggml_threadpool_t tp = batched ? threadpool_batch : threadpool;
- if (backend_cpu != nullptr) {
- auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend_cpu));
- auto * set_threadpool_fn = (decltype(ggml_backend_cpu_set_threadpool) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool");
- set_threadpool_fn(backend_cpu, tp);
- }
- // set the number of threads for all the backends
- for (const auto & set_n_threads_fn : set_n_threads_fns) {
- set_n_threads_fn.second(set_n_threads_fn.first, n_threads);
- }
- auto status = ggml_backend_sched_graph_compute_async(sched.get(), gf);
- if (status != GGML_STATUS_SUCCESS) {
- LLAMA_LOG_ERROR("%s: ggml_backend_sched_graph_compute_async failed with error %d\n", __func__, status);
- }
- // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(sched));
- return status;
- }
- llm_graph_cb llama_context::graph_get_cb() const {
- return [&](const llama_ubatch & ubatch, ggml_tensor * cur, const char * name, int il) {
- if (il >= 0) {
- ggml_format_name(cur, "%s-%d", name, il);
- } else {
- ggml_set_name(cur, name);
- }
- if (!cparams.offload_kqv) {
- if (strcmp(name, "kqv_merged_cont") == 0) {
- // all nodes between the KV store and the attention output are run on the CPU
- ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend_cpu);
- }
- }
- // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends
- // FIXME: fix in ggml_backend_sched
- const bool full_offload = model.params.n_gpu_layers > (int) model.hparams.n_layer;
- if (ubatch.n_tokens < 32 || full_offload) {
- if (il != -1 && strcmp(name, "norm") == 0) {
- const auto & dev_layer = model.dev_layer(il);
- for (const auto & backend : backends) {
- if (ggml_backend_get_device(backend.get()) == dev_layer) {
- if (ggml_backend_supports_op(backend.get(), cur)) {
- ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend.get());
- }
- }
- }
- }
- }
- };
- }
- //
- // state save/load
- //
- class llama_io_write_dummy : public llama_io_write_i {
- public:
- llama_io_write_dummy() = default;
- void write(const void * /* src */, size_t size) override {
- size_written += size;
- }
- void write_tensor(const ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override {
- size_written += size;
- }
- size_t n_bytes() override {
- return size_written;
- }
- private:
- size_t size_written = 0;
- };
- class llama_io_write_buffer : public llama_io_write_i {
- public:
- llama_io_write_buffer(
- uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
- void write(const void * src, size_t size) override {
- if (size > buf_size) {
- throw std::runtime_error("unexpectedly reached end of buffer");
- }
- memcpy(ptr, src, size);
- ptr += size;
- size_written += size;
- buf_size -= size;
- }
- void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override {
- if (size > buf_size) {
- throw std::runtime_error("unexpectedly reached end of buffer");
- }
- ggml_backend_tensor_get(tensor, ptr, offset, size);
- ptr += size;
- size_written += size;
- buf_size -= size;
- }
- size_t n_bytes() override {
- return size_written;
- }
- private:
- uint8_t * ptr;
- size_t buf_size = 0;
- size_t size_written = 0;
- };
- class llama_io_read_buffer : public llama_io_read_i {
- public:
- llama_io_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
- const uint8_t * read(size_t size) override {
- const uint8_t * base_ptr = ptr;
- if (size > buf_size) {
- throw std::runtime_error("unexpectedly reached end of buffer");
- }
- ptr += size;
- size_read += size;
- buf_size -= size;
- return base_ptr;
- }
- void read_to(void * dst, size_t size) override {
- memcpy(dst, read(size), size);
- }
- size_t n_bytes() override {
- return size_read;
- }
- private:
- const uint8_t * ptr;
- size_t buf_size = 0;
- size_t size_read = 0;
- };
- class llama_io_write_file : public llama_io_write_i {
- public:
- llama_io_write_file(llama_file * f) : file(f) {}
- void write(const void * src, size_t size) override {
- file->write_raw(src, size);
- size_written += size;
- }
- void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override {
- temp_buffer.resize(size);
- ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size);
- write(temp_buffer.data(), temp_buffer.size());
- }
- size_t n_bytes() override {
- return size_written;
- }
- private:
- llama_file * file;
- size_t size_written = 0;
- std::vector<uint8_t> temp_buffer;
- };
- class llama_io_read_file : public llama_io_read_i {
- public:
- llama_io_read_file(llama_file * f) : file(f) {}
- void read_to(void * dst, size_t size) override {
- file->read_raw(dst, size);
- size_read += size;
- }
- const uint8_t * read(size_t size) override {
- temp_buffer.resize(size);
- read_to(temp_buffer.data(), size);
- return temp_buffer.data();
- }
- size_t n_bytes() override {
- return size_read;
- }
- private:
- llama_file * file;
- size_t size_read = 0;
- std::vector<uint8_t> temp_buffer;
- };
- size_t llama_context::state_get_size() {
- llama_io_write_dummy io;
- try {
- return state_write_data(io);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what());
- return 0;
- }
- }
- size_t llama_context::state_get_data(uint8_t * dst, size_t size) {
- llama_io_write_buffer io(dst, size);
- try {
- return state_write_data(io);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what());
- return 0;
- }
- }
- size_t llama_context::state_set_data(const uint8_t * src, size_t size) {
- llama_io_read_buffer io(src, size);
- try {
- return state_read_data(io);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what());
- return 0;
- }
- }
- size_t llama_context::state_seq_get_size(llama_seq_id seq_id) {
- llama_io_write_dummy io;
- try {
- return state_seq_write_data(io, seq_id);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what());
- return 0;
- }
- }
- size_t llama_context::state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size) {
- llama_io_write_buffer io(dst, size);
- try {
- return state_seq_write_data(io, seq_id);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what());
- return 0;
- }
- }
- size_t llama_context::state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size) {
- llama_io_read_buffer io(src, size);
- try {
- return state_seq_read_data(io, seq_id);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what());
- return 0;
- }
- }
- bool llama_context::state_load_file(const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- llama_file file(filepath, "rb");
- // sanity checks
- {
- const uint32_t magic = file.read_u32();
- const uint32_t version = file.read_u32();
- if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
- LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
- return false;
- }
- }
- // load the prompt
- {
- const uint32_t n_token_count = file.read_u32();
- if (n_token_count > n_token_capacity) {
- LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
- return false;
- }
- file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
- *n_token_count_out = n_token_count;
- }
- // restore the context state
- {
- const size_t n_state_size_cur = file.size() - file.tell();
- llama_io_read_file io( &file);
- const size_t n_read = state_read_data(io);
- if (n_read != n_state_size_cur) {
- LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read);
- return false;
- }
- }
- return true;
- }
- bool llama_context::state_save_file(const char * filepath, const llama_token * tokens, size_t n_token_count) {
- llama_file file(filepath, "wb");
- file.write_u32(LLAMA_SESSION_MAGIC);
- file.write_u32(LLAMA_SESSION_VERSION);
- // save the prompt
- file.write_u32((uint32_t) n_token_count);
- file.write_raw(tokens, sizeof(llama_token) * n_token_count);
- // save the context state using stream saving
- llama_io_write_file io(&file);
- state_write_data(io);
- return true;
- }
- size_t llama_context::state_seq_load_file(llama_seq_id seq_id, const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- llama_file file(filepath, "rb");
- // version checks
- {
- const uint32_t magic = file.read_u32();
- const uint32_t version = file.read_u32();
- if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) {
- LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version);
- return 0;
- }
- }
- // load the prompt
- {
- const uint32_t n_token_count = file.read_u32();
- if (n_token_count > n_token_capacity) {
- LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
- return 0;
- }
- file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
- *n_token_count_out = n_token_count;
- }
- // restore the context state
- {
- const size_t state_size = file.size() - file.tell();
- llama_io_read_file io(&file);
- const size_t nread = state_seq_read_data(io, seq_id);
- if (!nread) {
- LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__);
- return 0;
- }
- GGML_ASSERT(nread <= state_size);
- GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell());
- }
- return file.tell();
- }
- size_t llama_context::state_seq_save_file(llama_seq_id seq_id, const char * filepath, const llama_token * tokens, size_t n_token_count) {
- llama_file file(filepath, "wb");
- file.write_u32(LLAMA_STATE_SEQ_MAGIC);
- file.write_u32(LLAMA_STATE_SEQ_VERSION);
- // save the prompt
- file.write_u32((uint32_t) n_token_count);
- file.write_raw(tokens, sizeof(llama_token) * n_token_count);
- // save the context state using stream saving
- llama_io_write_file io(&file);
- state_seq_write_data(io, seq_id);
- const size_t res = file.tell();
- GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + io.n_bytes());
- return res;
- }
- size_t llama_context::state_write_data(llama_io_write_i & io) {
- LLAMA_LOG_DEBUG("%s: writing state\n", __func__);
- // write model info
- {
- LLAMA_LOG_DEBUG("%s: - writing model info\n", __func__);
- const std::string arch_str = llm_arch_name(model.arch);
- io.write_string(arch_str);
- // TODO: add more model-specific info which should prevent loading the session file if not identical
- }
- // write output ids
- {
- LLAMA_LOG_DEBUG("%s: - writing output ids\n", __func__);
- const auto n_outputs = this->n_outputs;
- const auto & output_ids = this->output_ids;
- std::vector<int32_t> w_output_pos;
- GGML_ASSERT(n_outputs <= n_outputs_max);
- w_output_pos.resize(n_outputs);
- // build a more compact representation of the output ids
- for (size_t i = 0; i < n_batch(); ++i) {
- // map an output id to a position in the batch
- int32_t pos = output_ids[i];
- if (pos >= 0) {
- GGML_ASSERT(pos < n_outputs);
- w_output_pos[pos] = i;
- }
- }
- io.write(&n_outputs, sizeof(n_outputs));
- if (n_outputs) {
- io.write(w_output_pos.data(), n_outputs * sizeof(int32_t));
- }
- }
- // write logits
- {
- LLAMA_LOG_DEBUG("%s: - writing logits\n", __func__);
- const uint64_t logits_size = std::min((uint64_t) this->logits_size, (uint64_t) n_outputs * model.vocab.n_tokens());
- io.write(&logits_size, sizeof(logits_size));
- if (logits_size) {
- io.write(logits, logits_size * sizeof(float));
- }
- }
- // write embeddings
- {
- LLAMA_LOG_DEBUG("%s: - writing embeddings\n", __func__);
- const uint64_t embd_size = std::min((uint64_t) this->embd_size, (uint64_t) n_outputs * model.hparams.n_embd);
- io.write(&embd_size, sizeof(embd_size));
- if (embd_size) {
- io.write(embd, embd_size * sizeof(float));
- }
- }
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- if (kv_self != nullptr) {
- LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__);
- kv_self->state_write(io);
- }
- return io.n_bytes();
- }
- size_t llama_context::state_read_data(llama_io_read_i & io) {
- LLAMA_LOG_DEBUG("%s: reading state\n", __func__);
- // read model info
- {
- LLAMA_LOG_DEBUG("%s: - reading model info\n", __func__);
- const std::string cur_arch_str = llm_arch_name(model.arch);
- std::string arch_str;
- io.read_string(arch_str);
- if (cur_arch_str != arch_str) {
- throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str()));
- }
- // TODO: add more info which needs to be identical but which is not verified otherwise
- }
- // read output ids
- {
- LLAMA_LOG_DEBUG("%s: - reading output ids\n", __func__);
- auto n_outputs = this->n_outputs;
- io.read_to(&n_outputs, sizeof(n_outputs));
- if (n_outputs > output_reserve(n_outputs)) {
- throw std::runtime_error("could not reserve outputs");
- }
- std::vector<int32_t> output_pos;
- if (n_outputs) {
- output_pos.resize(n_outputs);
- io.read_to(output_pos.data(), n_outputs * sizeof(int32_t));
- for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) {
- int32_t id = output_pos[i];
- if ((uint32_t) id >= n_batch()) {
- throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, n_batch()));
- }
- this->output_ids[id] = i;
- }
- this->n_outputs = n_outputs;
- }
- }
- // read logits
- {
- LLAMA_LOG_DEBUG("%s: - reading logits\n", __func__);
- uint64_t logits_size;
- io.read_to(&logits_size, sizeof(logits_size));
- if (this->logits_size < logits_size) {
- throw std::runtime_error("logits buffer too small");
- }
- if (logits_size) {
- io.read_to(this->logits, logits_size * sizeof(float));
- }
- }
- // read embeddings
- {
- LLAMA_LOG_DEBUG("%s: - reading embeddings\n", __func__);
- uint64_t embd_size;
- io.read_to(&embd_size, sizeof(embd_size));
- if (this->embd_size < embd_size) {
- throw std::runtime_error("embeddings buffer too small");
- }
- if (embd_size) {
- io.read_to(this->embd, embd_size * sizeof(float));
- }
- }
- if (memory) {
- LLAMA_LOG_DEBUG("%s: - reading KV self\n", __func__);
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- kv_self->state_read(io);
- }
- return io.n_bytes();
- }
- size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id) {
- GGML_UNUSED(seq_id);
- if (memory) {
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- kv_self->state_write(io, seq_id);
- }
- return io.n_bytes();
- }
- size_t llama_context::state_seq_read_data(llama_io_read_i & io, llama_seq_id seq_id) {
- GGML_UNUSED(seq_id);
- if (memory) {
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- kv_self->state_read(io, seq_id);
- }
- return io.n_bytes();
- }
- //
- // perf
- //
- llama_perf_context_data llama_context::perf_get_data() const {
- llama_perf_context_data data = {};
- data.t_start_ms = 1e-3 * t_start_us;
- data.t_load_ms = 1e-3 * t_load_us;
- data.t_p_eval_ms = 1e-3 * t_p_eval_us;
- data.t_eval_ms = 1e-3 * t_eval_us;
- data.n_p_eval = std::max(1, n_p_eval);
- data.n_eval = std::max(1, n_eval);
- return data;
- }
- void llama_context::perf_reset() {
- t_start_us = ggml_time_us();
- t_eval_us = n_eval = 0;
- t_p_eval_us = n_p_eval = 0;
- }
- //
- // training
- //
- static void llama_set_param(struct ggml_tensor * tensor, llama_opt_param_filter param_filter, void * userdata) {
- if (!tensor || tensor->type != GGML_TYPE_F32) {
- return;
- }
- if (!param_filter(tensor, userdata)) {
- return;
- }
- if (strcmp(tensor->name, "token_embd.weight") == 0) {
- return; // FIXME
- }
- if (strcmp(tensor->name, "rope_freqs.weight") == 0) {
- return; // FIXME
- }
- ggml_set_param(tensor);
- }
- void llama_context::opt_init(struct llama_model * model, struct llama_opt_params lopt_params) {
- GGML_ASSERT(!opt_ctx);
- model->hparams.n_ctx_train = lopt_params.n_ctx_train > 0 ? lopt_params.n_ctx_train : n_ctx();
- const uint32_t n_batch = std::min(this->n_batch(), model->hparams.n_ctx_train);
- const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch);
- GGML_ASSERT(model->hparams.n_ctx_train % n_batch == 0);
- GGML_ASSERT(n_batch % n_ubatch == 0);
- ggml_opt_params opt_params = ggml_opt_default_params(sched.get(), GGML_OPT_LOSS_TYPE_CROSS_ENTROPY);
- opt_params.opt_period = n_batch / n_ubatch;
- opt_params.get_opt_pars = lopt_params.get_opt_pars;
- opt_params.get_opt_pars_ud = lopt_params.get_opt_pars_ud;
- opt_ctx = ggml_opt_init(opt_params);
- llama_opt_param_filter param_filter = lopt_params.param_filter;
- void * param_filter_ud = lopt_params.param_filter_ud;
- //llama_set_param(model->tok_embd, param_filter, param_filter_ud); // FIXME
- llama_set_param(model->type_embd, param_filter, param_filter_ud);
- llama_set_param(model->pos_embd, param_filter, param_filter_ud);
- llama_set_param(model->tok_norm, param_filter, param_filter_ud);
- llama_set_param(model->tok_norm_b, param_filter, param_filter_ud);
- llama_set_param(model->output_norm, param_filter, param_filter_ud);
- llama_set_param(model->output_norm_b, param_filter, param_filter_ud);
- llama_set_param(model->output, param_filter, param_filter_ud);
- llama_set_param(model->output_b, param_filter, param_filter_ud);
- llama_set_param(model->output_norm_enc, param_filter, param_filter_ud);
- llama_set_param(model->cls, param_filter, param_filter_ud);
- llama_set_param(model->cls_b, param_filter, param_filter_ud);
- llama_set_param(model->cls_out, param_filter, param_filter_ud);
- llama_set_param(model->cls_out_b, param_filter, param_filter_ud);
- for (struct llama_layer & layer : model->layers) {
- for (size_t i = 0; i < sizeof(layer)/sizeof(struct ggml_tensor *); ++i) {
- llama_set_param(reinterpret_cast<struct ggml_tensor **>(&layer)[i], param_filter, param_filter_ud);
- }
- }
- }
- void llama_context::opt_epoch_iter(
- ggml_opt_dataset_t dataset,
- ggml_opt_result_t result,
- const std::vector<llama_token> & tokens,
- const std::vector<llama_token> & labels_sparse,
- llama_batch & batch,
- ggml_opt_epoch_callback callback,
- bool train,
- int64_t idata_in_loop,
- int64_t ndata_in_loop,
- int64_t t_loop_start) {
- GGML_ASSERT(opt_ctx);
- const uint32_t n_ctx = llama_model_n_ctx_train(&model);
- const uint32_t n_batch = std::min(this->n_batch(), n_ctx);
- const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch);
- llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
- kv_self->clear();
- for (uint32_t pos_ctx = 0; pos_ctx < n_ctx; pos_ctx += n_batch) {
- batch.n_tokens = n_batch;
- for (uint32_t pos_batch = 0; pos_batch < n_batch; ++pos_batch) {
- batch.token [pos_batch] = tokens[pos_ctx + pos_batch];
- batch.pos [pos_batch] = pos_ctx + pos_batch;
- batch.n_seq_id[pos_batch] = 1;
- batch.seq_id [pos_batch][0] = 0;
- batch.logits [pos_batch] = true;
- }
- const auto n_tokens_all = batch.n_tokens;
- n_queued_tokens += n_tokens_all;
- // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens
- const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE;
- embd_seq.clear();
- int64_t n_outputs_all = n_tokens_all;
- auto kv_state = kv_self->init_batch(batch, cparams.n_ubatch, embd_pooled, /* logits_all */ true);
- if (!kv_state || kv_state->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) {
- LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__);
- break;
- }
- // reserve output buffer
- if (output_reserve(n_outputs_all) < n_outputs_all) {
- LLAMA_LOG_ERROR("%s: could not reserve space for batch with %" PRId64 " outputs\n", __func__, n_outputs_all);
- GGML_ABORT("TODO: handle this error");
- };
- uint32_t pos_batch = 0;
- do {
- const auto & ubatch = kv_state->get_ubatch();
- n_outputs = ubatch.n_tokens;
- if (!kv_state->apply()) {
- LLAMA_LOG_ERROR("%s: failed to update the memory state\n", __func__);
- break;
- }
- auto * gf = graph_init();
- auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, kv_state.get());
- struct ggml_context * ctx_compute_opt;
- {
- const size_t size_gf = ggml_graph_size(gf);
- const size_t size_meta = 4*size_gf*ggml_tensor_overhead() + 2*ggml_graph_overhead_custom(size_gf, /*grads = */ true);
- struct ggml_init_params params = {
- /*.mem_size =*/ size_meta,
- /*.mem_buffer =*/ nullptr,
- /*.no_alloc =*/ true,
- };
- ctx_compute_opt = ggml_init(params);
- }
- ggml_opt_prepare_alloc(opt_ctx, ctx_compute_opt, gf, res->get_tokens(), res->get_logits());
- ggml_opt_alloc(opt_ctx, train);
- res->set_inputs(&ubatch);
- {
- struct ggml_tensor * labels = ggml_opt_labels(opt_ctx);
- GGML_ASSERT(labels->ne[1] == n_ubatch);
- ggml_set_zero(labels);
- const float onef = 1.0f;
- for (uint32_t pos_ubatch = 0; pos_ubatch < n_ubatch; ++pos_ubatch) {
- const uint32_t ilabel = pos_ctx + pos_batch + pos_ubatch;
- GGML_ASSERT(labels_sparse[ilabel] < labels->ne[0]);
- ggml_backend_tensor_set(labels, &onef, (pos_ubatch*labels->ne[0] + labels_sparse[ilabel])*sizeof(float), sizeof(float));
- }
- }
- ggml_opt_eval(opt_ctx, result);
- if (callback) {
- callback(train, opt_ctx, dataset, result, idata_in_loop + (pos_ctx + pos_batch)/n_ubatch + 1, ndata_in_loop, t_loop_start);
- }
- ggml_free(ctx_compute_opt);
- pos_batch += ubatch.n_tokens;
- } while (kv_state->next());
- }
- }
- void llama_context::opt_epoch(
- ggml_opt_dataset_t dataset,
- ggml_opt_result_t result_train,
- ggml_opt_result_t result_eval,
- int64_t idata_split,
- ggml_opt_epoch_callback callback_train,
- ggml_opt_epoch_callback callback_eval) {
- const uint32_t n_ctx = this->n_ctx();
- const uint32_t n_batch = std::min(cparams.n_batch, n_ctx);
- const uint32_t n_ubatch = std::min(cparams.n_ubatch, n_batch);
- const int64_t ndata = ggml_opt_dataset_ndata(dataset);
- GGML_ASSERT(idata_split >= 0);
- GGML_ASSERT(idata_split <= ndata);
- const uint32_t ubatch_per_ctx = n_ctx / n_ubatch;
- struct llama_batch batch = llama_batch_init(n_batch, 0, 1);
- std::vector<llama_token> tokens(n_ctx);
- std::vector<llama_token> labels_sparse(n_ctx);
- int64_t idata = 0;
- int64_t t_loop_start = ggml_time_us();
- int64_t ndata_in_loop = idata_split*ubatch_per_ctx;
- for (; idata < idata_split; ++idata) {
- constexpr bool train = true;
- const int64_t idata_in_loop = idata*ubatch_per_ctx;
- ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata);
- opt_epoch_iter(dataset, result_train, tokens, labels_sparse, batch,
- callback_train, train, idata_in_loop, ndata_in_loop, t_loop_start);
- }
- t_loop_start = ggml_time_us();
- ndata_in_loop = (ndata - idata_split)*ubatch_per_ctx;
- for (; idata < ndata; ++idata) {
- constexpr bool train = false;
- const int64_t idata_in_loop = (idata - idata_split)*ubatch_per_ctx;
- ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata);
- opt_epoch_iter(dataset, result_eval, tokens, labels_sparse, batch,
- callback_eval, train, idata_in_loop, ndata_in_loop, t_loop_start);
- }
- llama_batch_free(batch);
- }
- //
- // interface implementation
- //
- llama_context_params llama_context_default_params() {
- llama_context_params result = {
- /*.n_ctx =*/ 512,
- /*.n_batch =*/ 2048,
- /*.n_ubatch =*/ 512,
- /*.n_seq_max =*/ 1,
- /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
- /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
- /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
- /*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
- /*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED,
- /*.rope_freq_base =*/ 0.0f,
- /*.rope_freq_scale =*/ 0.0f,
- /*.yarn_ext_factor =*/ -1.0f,
- /*.yarn_attn_factor =*/ 1.0f,
- /*.yarn_beta_fast =*/ 32.0f,
- /*.yarn_beta_slow =*/ 1.0f,
- /*.yarn_orig_ctx =*/ 0,
- /*.defrag_thold =*/ -1.0f,
- /*.cb_eval =*/ nullptr,
- /*.cb_eval_user_data =*/ nullptr,
- /*.type_k =*/ GGML_TYPE_F16,
- /*.type_v =*/ GGML_TYPE_F16,
- /*.abort_callback =*/ nullptr,
- /*.abort_callback_data =*/ nullptr,
- /*.embeddings =*/ false,
- /*.offload_kqv =*/ true,
- /*.flash_attn =*/ false,
- /*.no_perf =*/ true,
- /*.op_offload =*/ true,
- /*.swa_full =*/ true,
- };
- return result;
- }
- llama_context * llama_init_from_model(
- llama_model * model,
- llama_context_params params) {
- if (!model) {
- LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__);
- return nullptr;
- }
- if (params.n_batch == 0 && params.n_ubatch == 0) {
- LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__);
- return nullptr;
- }
- if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) {
- LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__);
- return nullptr;
- }
- if (params.flash_attn && model->arch == LLM_ARCH_GROK) {
- LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
- params.flash_attn = false;
- }
- if (ggml_is_quantized(params.type_v) && !params.flash_attn) {
- LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
- return nullptr;
- }
- try {
- auto * ctx = new llama_context(*model, params);
- return ctx;
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: failed to initialize the context: %s\n", __func__, err.what());
- }
- return nullptr;
- }
- // deprecated
- llama_context * llama_new_context_with_model(
- llama_model * model,
- llama_context_params params) {
- return llama_init_from_model(model, params);
- }
- void llama_free(llama_context * ctx) {
- delete ctx;
- }
- uint32_t llama_n_ctx(const llama_context * ctx) {
- return ctx->n_ctx();
- }
- uint32_t llama_n_batch(const llama_context * ctx) {
- return ctx->n_batch();
- }
- uint32_t llama_n_ubatch(const llama_context * ctx) {
- return ctx->n_ubatch();
- }
- uint32_t llama_n_seq_max(const llama_context * ctx) {
- return ctx->n_seq_max();
- }
- const llama_model * llama_get_model(const llama_context * ctx) {
- return &ctx->get_model();
- }
- llama_kv_cache * llama_get_kv_self(llama_context * ctx) {
- return ctx->get_kv_self();
- }
- void llama_kv_self_update(llama_context * ctx) {
- ctx->kv_self_update();
- }
- enum llama_pooling_type llama_pooling_type(const llama_context * ctx) {
- return ctx->pooling_type();
- }
- void llama_attach_threadpool(
- llama_context * ctx,
- ggml_threadpool_t threadpool,
- ggml_threadpool_t threadpool_batch) {
- ctx->attach_threadpool(threadpool, threadpool_batch);
- }
- void llama_detach_threadpool(llama_context * ctx) {
- ctx->detach_threadpool();
- }
- void llama_set_n_threads(llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) {
- ctx->set_n_threads(n_threads, n_threads_batch);
- }
- int32_t llama_n_threads(llama_context * ctx) {
- return ctx->n_threads();
- }
- int32_t llama_n_threads_batch(llama_context * ctx) {
- return ctx->n_threads_batch();
- }
- void llama_set_abort_callback(llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) {
- ctx->set_abort_callback(abort_callback, abort_callback_data);
- }
- void llama_set_embeddings(llama_context * ctx, bool embeddings) {
- ctx->set_embeddings(embeddings);
- }
- void llama_set_causal_attn(llama_context * ctx, bool causal_attn) {
- ctx->set_causal_attn(causal_attn);
- }
- void llama_set_warmup(llama_context * ctx, bool warmup) {
- ctx->set_warmup(warmup);
- }
- void llama_synchronize(llama_context * ctx) {
- ctx->synchronize();
- }
- float * llama_get_logits(llama_context * ctx) {
- ctx->synchronize();
- return ctx->get_logits();
- }
- float * llama_get_logits_ith(llama_context * ctx, int32_t i) {
- ctx->synchronize();
- return ctx->get_logits_ith(i);
- }
- float * llama_get_embeddings(llama_context * ctx) {
- ctx->synchronize();
- return ctx->get_embeddings();
- }
- float * llama_get_embeddings_ith(llama_context * ctx, int32_t i) {
- ctx->synchronize();
- return ctx->get_embeddings_ith(i);
- }
- float * llama_get_embeddings_seq(llama_context * ctx, llama_seq_id seq_id) {
- ctx->synchronize();
- return ctx->get_embeddings_seq(seq_id);
- }
- // llama adapter API
- int32_t llama_set_adapter_lora(
- llama_context * ctx,
- llama_adapter_lora * adapter,
- float scale) {
- ctx->set_adapter_lora(adapter, scale);
- return 0;
- }
- int32_t llama_rm_adapter_lora(
- llama_context * ctx,
- llama_adapter_lora * adapter) {
- bool res = ctx->rm_adapter_lora(adapter);
- return res ? 0 : -1;
- }
- void llama_clear_adapter_lora(llama_context * ctx) {
- ctx->clear_adapter_lora();
- }
- int32_t llama_apply_adapter_cvec(
- llama_context * ctx,
- const float * data,
- size_t len,
- int32_t n_embd,
- int32_t il_start,
- int32_t il_end) {
- bool res = ctx->apply_adapter_cvec(data, len, n_embd, il_start, il_end);
- return res ? 0 : -1;
- }
- //
- // kv cache
- //
- // deprecated
- int32_t llama_kv_self_n_tokens(const llama_context * ctx) {
- const auto * kv = ctx->get_kv_self();
- if (!kv) {
- return 0;
- }
- int32_t res = 0;
- for (uint32_t s = 0; s < ctx->get_cparams().n_seq_max; s++) {
- const llama_pos p0 = kv->seq_pos_min(s);
- const llama_pos p1 = kv->seq_pos_max(s);
- if (p0 >= 0) {
- res += (p1 - p0) + 1;
- }
- }
- return res;
- }
- // deprecated
- // note: this is the same as above - will be removed anyway, so it's ok
- int32_t llama_kv_self_used_cells(const llama_context * ctx) {
- const auto * kv = ctx->get_kv_self();
- if (!kv) {
- return 0;
- }
- int32_t res = 0;
- for (uint32_t s = 0; s < ctx->get_cparams().n_seq_max; s++) {
- const llama_pos p0 = kv->seq_pos_min(s);
- const llama_pos p1 = kv->seq_pos_max(s);
- if (p0 >= 0) {
- res += (p1 - p0) + 1;
- }
- }
- return res;
- }
- void llama_kv_self_clear(llama_context * ctx) {
- auto * kv = ctx->get_kv_self();
- if (!kv) {
- return;
- }
- kv->clear();
- }
- bool llama_kv_self_seq_rm(
- llama_context * ctx,
- llama_seq_id seq_id,
- llama_pos p0,
- llama_pos p1) {
- auto * kv = ctx->get_kv_self();
- if (!kv) {
- return true;
- }
- return kv->seq_rm(seq_id, p0, p1);
- }
- void llama_kv_self_seq_cp(
- llama_context * ctx,
- llama_seq_id seq_id_src,
- llama_seq_id seq_id_dst,
- llama_pos p0,
- llama_pos p1) {
- auto * kv = ctx->get_kv_self();
- if (!kv) {
- return;
- }
- kv->seq_cp(seq_id_src, seq_id_dst, p0, p1);
- }
- void llama_kv_self_seq_keep(llama_context * ctx, llama_seq_id seq_id) {
- auto * kv = ctx->get_kv_self();
- if (!kv) {
- return;
- }
- kv->seq_keep(seq_id);
- }
- void llama_kv_self_seq_add(
- llama_context * ctx,
- llama_seq_id seq_id,
- llama_pos p0,
- llama_pos p1,
- llama_pos delta) {
- auto * kv = ctx->get_kv_self();
- if (!kv) {
- return;
- }
- kv->seq_add(seq_id, p0, p1, delta);
- }
- void llama_kv_self_seq_div(
- llama_context * ctx,
- llama_seq_id seq_id,
- llama_pos p0,
- llama_pos p1,
- int d) {
- auto * kv = ctx->get_kv_self();
- if (!kv) {
- return;
- }
- kv->seq_div(seq_id, p0, p1, d);
- }
- llama_pos llama_kv_self_seq_pos_min(llama_context * ctx, llama_seq_id seq_id) {
- const auto * kv = ctx->get_kv_self();
- if (!kv) {
- return -1;
- }
- return kv->seq_pos_min(seq_id);
- }
- llama_pos llama_kv_self_seq_pos_max(llama_context * ctx, llama_seq_id seq_id) {
- const auto * kv = ctx->get_kv_self();
- if (!kv) {
- return -1;
- }
- return kv->seq_pos_max(seq_id);
- }
- void llama_kv_self_defrag(llama_context * ctx) {
- auto * kv = ctx->get_kv_self();
- if (!kv) {
- return;
- }
- // force defrag
- kv->defrag_sched(-1.0f);
- }
- bool llama_kv_self_can_shift(const llama_context * ctx) {
- const auto * kv = ctx->get_kv_self();
- if (!kv) {
- return false;
- }
- return kv->get_can_shift();
- }
- // llama state API
- // deprecated
- size_t llama_get_state_size(llama_context * ctx) {
- return llama_state_get_size(ctx);
- }
- // deprecated
- size_t llama_copy_state_data(llama_context * ctx, uint8_t * dst) {
- return llama_state_get_data(ctx, dst, -1);
- }
- // deprecated
- size_t llama_set_state_data(llama_context * ctx, const uint8_t * src) {
- return llama_state_set_data(ctx, src, -1);
- }
- // deprecated
- bool llama_load_session_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
- }
- // deprecated
- bool llama_save_session_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
- return llama_state_save_file(ctx, path_session, tokens, n_token_count);
- }
- // Returns the *actual* size of the state.
- // Intended to be used when saving to state to a buffer.
- size_t llama_state_get_size(llama_context * ctx) {
- return ctx->state_get_size();
- }
- size_t llama_state_get_data(llama_context * ctx, uint8_t * dst, size_t size) {
- ctx->synchronize();
- return ctx->state_get_data(dst, size);
- }
- // Sets the state reading from the specified source address
- size_t llama_state_set_data(llama_context * ctx, const uint8_t * src, size_t size) {
- ctx->synchronize();
- return ctx->state_set_data(src, size);
- }
- bool llama_state_load_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- ctx->synchronize();
- try {
- return ctx->state_load_file(path_session, tokens_out, n_token_capacity, n_token_count_out);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what());
- return false;
- }
- }
- bool llama_state_save_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
- ctx->synchronize();
- try {
- return ctx->state_save_file(path_session, tokens, n_token_count);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what());
- return false;
- }
- }
- size_t llama_state_seq_get_size(llama_context * ctx, llama_seq_id seq_id) {
- return ctx->state_seq_get_size(seq_id);
- }
- size_t llama_state_seq_get_data(llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) {
- ctx->synchronize();
- return ctx->state_seq_get_data(seq_id, dst, size);
- }
- size_t llama_state_seq_set_data(llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id seq_id) {
- ctx->synchronize();
- return ctx->state_seq_set_data(seq_id, src, size);
- }
- size_t llama_state_seq_save_file(llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
- ctx->synchronize();
- try {
- return ctx->state_seq_save_file(seq_id, filepath, tokens, n_token_count);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what());
- return 0;
- }
- }
- size_t llama_state_seq_load_file(llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- ctx->synchronize();
- try {
- return ctx->state_seq_load_file(dest_seq_id, filepath, tokens_out, n_token_capacity, n_token_count_out);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what());
- return 0;
- }
- }
- ///
- int32_t llama_encode(
- llama_context * ctx,
- llama_batch batch) {
- const int ret = ctx->encode(batch);
- if (ret != 0) {
- LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret);
- }
- return ret;
- }
- int32_t llama_decode(
- llama_context * ctx,
- llama_batch batch) {
- const int ret = ctx->decode(batch);
- if (ret != 0 && ret != 1) {
- LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
- }
- return ret;
- }
- //
- // perf
- //
- llama_perf_context_data llama_perf_context(const llama_context * ctx) {
- llama_perf_context_data data = {};
- if (ctx == nullptr) {
- return data;
- }
- data = ctx->perf_get_data();
- return data;
- }
- void llama_perf_context_print(const llama_context * ctx) {
- const auto data = llama_perf_context(ctx);
- const double t_end_ms = 1e-3 * ggml_time_us();
- LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, data.t_load_ms);
- LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
- __func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval);
- LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
- __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval);
- LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval));
- }
- void llama_perf_context_reset(llama_context * ctx) {
- ctx->perf_reset();
- }
- //
- // training
- //
- bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata) {
- GGML_UNUSED(tensor);
- GGML_UNUSED(userdata);
- return true;
- }
- void llama_opt_init(struct llama_context * ctx, struct llama_model * model, struct llama_opt_params lopt_params) {
- ctx->opt_init(model, lopt_params);
- }
- void llama_opt_epoch(
- struct llama_context * ctx,
- ggml_opt_dataset_t dataset,
- ggml_opt_result_t result_train,
- ggml_opt_result_t result_eval,
- int64_t idata_split,
- ggml_opt_epoch_callback callback_train,
- ggml_opt_epoch_callback callback_eval) {
- ctx->opt_epoch(
- dataset,
- result_train,
- result_eval,
- idata_split,
- callback_train,
- callback_eval);
- }
|