llama.cpp 120 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484
  1. // Defines fileno on msys:
  2. #ifndef _GNU_SOURCE
  3. #define _GNU_SOURCE
  4. #include <cstddef>
  5. #include <cstdint>
  6. #include <cstdio>
  7. #endif
  8. #include "llama-util.h"
  9. #include "llama.h"
  10. #include "ggml.h"
  11. #ifdef GGML_USE_CUBLAS
  12. #include "ggml-cuda.h"
  13. #elif defined(GGML_USE_CLBLAST)
  14. #include "ggml-opencl.h"
  15. #endif
  16. #ifdef GGML_USE_METAL
  17. #include "ggml-metal.h"
  18. #endif
  19. #include <array>
  20. #include <ctime>
  21. #include <cinttypes>
  22. #include <fstream>
  23. #include <random>
  24. #include <map>
  25. #include <unordered_map>
  26. #include <queue>
  27. #include <cassert>
  28. #include <cstring>
  29. #include <climits>
  30. #include <memory>
  31. #include <algorithm>
  32. #include <initializer_list>
  33. #include <thread>
  34. #include <atomic>
  35. #include <mutex>
  36. #include <sstream>
  37. #include <numeric>
  38. #define LLAMA_USE_SCRATCH
  39. #define LLAMA_MAX_SCRATCH_BUFFERS 16
  40. // available llama models
  41. enum e_model {
  42. MODEL_UNKNOWN,
  43. MODEL_3B,
  44. MODEL_7B,
  45. MODEL_13B,
  46. MODEL_30B,
  47. MODEL_65B,
  48. };
  49. static const size_t MB = 1024*1024;
  50. // computed for n_ctx == 2048
  51. // TODO: dynamically determine these sizes
  52. // needs modifications in ggml
  53. typedef void (*offload_func_t)(struct ggml_tensor * tensor);
  54. void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
  55. (void) tensor;
  56. }
  57. static const std::map<e_model, size_t> & MEM_REQ_SCRATCH0()
  58. {
  59. static std::map<e_model, size_t> k_sizes = {
  60. { MODEL_3B, 256ull * MB },
  61. { MODEL_7B, 512ull * MB },
  62. { MODEL_13B, 512ull * MB },
  63. { MODEL_30B, 512ull * MB },
  64. { MODEL_65B, 1024ull * MB },
  65. };
  66. return k_sizes;
  67. }
  68. static const std::map<e_model, size_t> & MEM_REQ_SCRATCH1()
  69. {
  70. static std::map<e_model, size_t> k_sizes = {
  71. { MODEL_3B, 256ull * MB },
  72. { MODEL_7B, 512ull * MB },
  73. { MODEL_13B, 512ull * MB },
  74. { MODEL_30B, 512ull * MB },
  75. { MODEL_65B, 1024ull * MB },
  76. };
  77. return k_sizes;
  78. }
  79. // 2*n_embd*n_ctx*n_layer*sizeof(float16)
  80. static const std::map<e_model, size_t> & MEM_REQ_KV_SELF()
  81. {
  82. static std::map<e_model, size_t> k_sizes = {
  83. { MODEL_3B, 682ull * MB },
  84. { MODEL_7B, 1026ull * MB },
  85. { MODEL_13B, 1608ull * MB },
  86. { MODEL_30B, 3124ull * MB },
  87. { MODEL_65B, 5120ull * MB },
  88. };
  89. return k_sizes;
  90. }
  91. // this is mostly needed for temporary mul_mat buffers to dequantize the data
  92. // not actually needed if BLAS is disabled
  93. static const std::map<e_model, size_t> & MEM_REQ_EVAL()
  94. {
  95. static std::map<e_model, size_t> k_sizes = {
  96. { MODEL_3B, 512ull * MB },
  97. { MODEL_7B, 768ull * MB },
  98. { MODEL_13B, 1024ull * MB },
  99. { MODEL_30B, 1280ull * MB },
  100. { MODEL_65B, 1536ull * MB },
  101. };
  102. return k_sizes;
  103. }
  104. // default hparams (LLaMA 7B)
  105. struct llama_hparams {
  106. uint32_t n_vocab = 32000;
  107. uint32_t n_ctx = 512; // this is provided as user input?
  108. uint32_t n_embd = 4096;
  109. uint32_t n_mult = 256;
  110. uint32_t n_head = 32;
  111. uint32_t n_layer = 32;
  112. uint32_t n_rot = 64;
  113. enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;
  114. bool operator!=(const llama_hparams & other) const {
  115. return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams)));
  116. }
  117. };
  118. struct llama_layer {
  119. // normalization
  120. struct ggml_tensor * attention_norm;
  121. // attention
  122. struct ggml_tensor * wq;
  123. struct ggml_tensor * wk;
  124. struct ggml_tensor * wv;
  125. struct ggml_tensor * wo;
  126. // normalization
  127. struct ggml_tensor * ffn_norm;
  128. // ff
  129. struct ggml_tensor * w1;
  130. struct ggml_tensor * w2;
  131. struct ggml_tensor * w3;
  132. };
  133. struct llama_kv_cache {
  134. struct ggml_tensor * k;
  135. struct ggml_tensor * v;
  136. struct ggml_context * ctx = NULL;
  137. llama_ctx_buffer buf;
  138. int n; // number of tokens currently in the cache
  139. ~llama_kv_cache() {
  140. if (ctx) {
  141. ggml_free(ctx);
  142. }
  143. #ifdef GGML_USE_CUBLAS
  144. ggml_cuda_free_data(k);
  145. ggml_cuda_free_data(v);
  146. #endif // GGML_USE_CUBLAS
  147. }
  148. };
  149. struct llama_model {
  150. e_model type = MODEL_UNKNOWN;
  151. llama_hparams hparams;
  152. struct ggml_tensor * tok_embeddings;
  153. struct ggml_tensor * norm;
  154. struct ggml_tensor * output;
  155. std::vector<llama_layer> layers;
  156. int n_gpu_layers;
  157. // context
  158. struct ggml_context * ctx = NULL;
  159. // key + value cache for the self attention
  160. // TODO: move to llama_state
  161. struct llama_kv_cache kv_self;
  162. // the model memory buffer
  163. llama_ctx_buffer buf;
  164. // model memory mapped file
  165. std::unique_ptr<llama_mmap> mapping;
  166. // objects representing data potentially being locked in memory
  167. llama_mlock mlock_buf;
  168. llama_mlock mlock_mmap;
  169. // for quantize-stats only
  170. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  171. ~llama_model() {
  172. if (ctx) {
  173. ggml_free(ctx);
  174. }
  175. #ifdef GGML_USE_CUBLAS
  176. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  177. ggml_cuda_free_data(tensors_by_name[i].second);
  178. }
  179. ggml_cuda_free_scratch();
  180. #elif defined(GGML_USE_CLBLAST)
  181. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  182. ggml_cl_free_data(tensors_by_name[i].second);
  183. }
  184. #endif
  185. }
  186. };
  187. struct llama_vocab {
  188. using id = int32_t;
  189. using token = std::string;
  190. struct token_score {
  191. token tok;
  192. float score;
  193. };
  194. std::unordered_map<token, id> token_to_id;
  195. std::vector<token_score> id_to_token;
  196. };
  197. struct llama_context {
  198. std::mt19937 rng;
  199. int64_t t_load_us = 0;
  200. int64_t t_start_us = 0;
  201. bool has_evaluated_once = false;
  202. int64_t t_sample_us = 0;
  203. int64_t t_eval_us = 0;
  204. int64_t t_p_eval_us = 0;
  205. int32_t n_sample = 0; // number of tokens sampled
  206. int32_t n_eval = 0; // number of eval calls
  207. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  208. llama_model model;
  209. llama_vocab vocab;
  210. size_t mem_per_token = 0;
  211. // decode output (2-dimensional array: [n_tokens][n_vocab])
  212. std::vector<float> logits;
  213. bool logits_all = false;
  214. // input embedding (1-dimensional array: [n_embd])
  215. std::vector<float> embedding;
  216. // memory buffers used to evaluate the model
  217. // TODO: move in llama_state
  218. llama_ctx_buffer buf_compute;
  219. llama_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
  220. #ifdef GGML_USE_METAL
  221. ggml_metal_context * ctx_metal = NULL;
  222. #endif
  223. int buf_last = 0;
  224. size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
  225. void use_buf(struct ggml_context * ctx, int i) {
  226. #if defined(LLAMA_USE_SCRATCH)
  227. size_t last_size = 0;
  228. if (i == -1) {
  229. last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, });
  230. } else {
  231. auto & buf = buf_scratch[i];
  232. last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, });
  233. }
  234. if (buf_last >= 0) {
  235. buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size);
  236. }
  237. buf_last = i;
  238. #else
  239. (void) i;
  240. (void) ctx;
  241. #endif
  242. }
  243. size_t get_buf_max_mem(int i) const {
  244. #if defined(LLAMA_USE_SCRATCH)
  245. return buf_max_size[i];
  246. #else
  247. (void) i;
  248. return 0;
  249. #endif
  250. }
  251. };
  252. template <typename T>
  253. static T checked_mul(T a, T b) {
  254. T ret = a * b;
  255. if (a != 0 && ret / a != b) {
  256. throw std::runtime_error(format("overflow multiplying %llu * %llu",
  257. (unsigned long long) a, (unsigned long long) b));
  258. }
  259. return ret;
  260. }
  261. static size_t checked_div(size_t a, size_t b) {
  262. if (b == 0 || a % b != 0) {
  263. throw std::runtime_error(format("error dividing %zu / %zu", a, b));
  264. }
  265. return a / b;
  266. }
  267. static std::string llama_format_tensor_shape(const std::vector<uint32_t> & ne) {
  268. char buf[256];
  269. snprintf(buf, sizeof(buf), "%5u", ne.at(0));
  270. for (size_t i = 1; i < ne.size(); i++) {
  271. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i));
  272. }
  273. return buf;
  274. }
  275. static size_t llama_calc_tensor_size(const std::vector<uint32_t> & ne, enum ggml_type type) {
  276. size_t size = ggml_type_size(type);
  277. for (uint32_t dim : ne) {
  278. size = checked_mul<size_t>(size, dim);
  279. }
  280. return size / ggml_blck_size(type);
  281. }
  282. struct llama_load_tensor_shard {
  283. std::vector<uint32_t> ne;
  284. size_t size;
  285. enum ggml_type type;
  286. size_t file_idx;
  287. size_t file_off;
  288. void calc_size() {
  289. size = llama_calc_tensor_size(ne, type);
  290. }
  291. };
  292. enum llama_split_type {
  293. SPLIT_NONE,
  294. SPLIT_BY_COLUMNS,
  295. SPLIT_BY_ROWS
  296. };
  297. struct llama_load_tensor {
  298. std::vector<llama_load_tensor_shard> shards;
  299. std::string name;
  300. enum ggml_type type = GGML_TYPE_F32;
  301. llama_split_type split_type = SPLIT_NONE;
  302. std::vector<uint32_t> ne;
  303. size_t size;
  304. struct ggml_tensor * ggml_tensor = NULL;
  305. uint8_t * data;
  306. llama_load_tensor(const std::string & name) : name(name) {}
  307. void calc_all() {
  308. calc_type();
  309. calc_split_type();
  310. calc_ne();
  311. calc_size();
  312. }
  313. void calc_type() {
  314. const auto & first_shard = shards.at(0);
  315. for (const auto & shard : shards) {
  316. if (shard.type != first_shard.type) {
  317. throw std::runtime_error(format("inconsistent tensor shard type in '%s'", name.c_str()));
  318. }
  319. }
  320. type = first_shard.type;
  321. }
  322. void calc_split_type() {
  323. if (shards.at(0).ne.size() == 1 || // 1D tensors are just duplicated in every file
  324. shards.size() == 1) { // only one file?
  325. split_type = SPLIT_NONE;
  326. } else if (name.find("tok_embeddings.") == 0 ||
  327. name.find(".attention.wo.weight") != std::string::npos ||
  328. name.find(".feed_forward.w2.weight") != std::string::npos) {
  329. split_type = SPLIT_BY_COLUMNS;
  330. } else {
  331. split_type = SPLIT_BY_ROWS;
  332. }
  333. }
  334. void calc_ne() {
  335. const auto & first_shard = shards.at(0);
  336. for (const auto & shard : shards) {
  337. if (shard.ne != first_shard.ne) {
  338. throw std::runtime_error(format("inconsistent tensor shard shape in '%s': first was %s, other was %s",
  339. name.c_str(), llama_format_tensor_shape(first_shard.ne).c_str(), llama_format_tensor_shape(shard.ne).c_str()));
  340. }
  341. }
  342. ne = first_shard.ne;
  343. LLAMA_ASSERT(shards.size() <= UINT32_MAX);
  344. uint32_t n_shards = (uint32_t) shards.size();
  345. switch (split_type) {
  346. case SPLIT_NONE:
  347. ne = first_shard.ne;
  348. break;
  349. case SPLIT_BY_COLUMNS:
  350. ne = {checked_mul<uint32_t>(first_shard.ne[0], n_shards),
  351. first_shard.ne[1]};
  352. break;
  353. case SPLIT_BY_ROWS:
  354. ne = {first_shard.ne[0],
  355. checked_mul<uint32_t>(first_shard.ne[1], n_shards)};
  356. break;
  357. }
  358. }
  359. void calc_size() {
  360. size = llama_calc_tensor_size(ne, type);
  361. }
  362. };
  363. struct llama_load_tensors_map {
  364. // tensors is kept in a separate vector to preserve file order
  365. std::vector<llama_load_tensor> tensors;
  366. std::unordered_map<std::string, size_t> name_to_idx;
  367. };
  368. enum llama_file_version {
  369. LLAMA_FILE_VERSION_GGML,
  370. LLAMA_FILE_VERSION_GGMF_V1, // added version field and scores in vocab
  371. LLAMA_FILE_VERSION_GGJT_V1, // added padding
  372. LLAMA_FILE_VERSION_GGJT_V2, // changed quantization format
  373. LLAMA_FILE_VERSION_GGJT_V3, // changed Q4 and Q8 quantization format
  374. };
  375. struct llama_file_loader {
  376. llama_file file;
  377. llama_file_version file_version;
  378. llama_hparams hparams;
  379. llama_vocab vocab;
  380. llama_file_loader(const char * fname, size_t file_idx, llama_load_tensors_map & tensors_map)
  381. : file(fname, "rb") {
  382. fprintf(stderr, "llama.cpp: loading model from %s\n", fname);
  383. read_magic();
  384. read_hparams();
  385. read_vocab();
  386. read_tensor_metadata(file_idx, tensors_map);
  387. }
  388. void read_magic() {
  389. uint32_t magic = file.read_u32();
  390. if (magic == LLAMA_FILE_MAGIC_GGML) {
  391. file_version = LLAMA_FILE_VERSION_GGML;
  392. return;
  393. }
  394. uint32_t version = file.read_u32();
  395. switch (magic) {
  396. case LLAMA_FILE_MAGIC_GGMF:
  397. switch (version) {
  398. case 1: file_version = LLAMA_FILE_VERSION_GGMF_V1; return;
  399. }
  400. break;
  401. case LLAMA_FILE_MAGIC_GGJT:
  402. switch (version) {
  403. case 1: file_version = LLAMA_FILE_VERSION_GGJT_V1; return;
  404. case 2: file_version = LLAMA_FILE_VERSION_GGJT_V2; return;
  405. case 3: file_version = LLAMA_FILE_VERSION_GGJT_V3; return;
  406. }
  407. }
  408. throw std::runtime_error(format("unknown (magic, version) combination: %08x, %08x; is this really a GGML file?",
  409. magic, version));
  410. }
  411. void read_hparams() {
  412. hparams.n_vocab = file.read_u32();
  413. hparams.n_embd = file.read_u32();
  414. hparams.n_mult = file.read_u32();
  415. hparams.n_head = file.read_u32();
  416. hparams.n_layer = file.read_u32();
  417. hparams.n_rot = file.read_u32();
  418. hparams.ftype = (enum llama_ftype) file.read_u32();
  419. }
  420. void read_vocab() {
  421. vocab.id_to_token.resize(hparams.n_vocab);
  422. for (uint32_t i = 0; i < hparams.n_vocab; i++) {
  423. uint32_t len = file.read_u32();
  424. std::string word = file.read_string(len);
  425. float score = 0.0f;
  426. if (file_version >= LLAMA_FILE_VERSION_GGMF_V1) {
  427. file.read_raw(&score, sizeof(score));
  428. }
  429. vocab.token_to_id[word] = i;
  430. auto & tok_score = vocab.id_to_token[i];
  431. tok_score.tok = std::move(word);
  432. tok_score.score = score;
  433. }
  434. }
  435. void read_tensor_metadata(size_t file_idx, llama_load_tensors_map & tensors_map) {
  436. while (file.tell() < file.size) {
  437. llama_load_tensor_shard shard;
  438. uint32_t n_dims = file.read_u32();
  439. uint32_t name_len = file.read_u32();
  440. shard.type = (enum ggml_type) file.read_u32();
  441. shard.ne.resize(n_dims);
  442. file.read_raw(shard.ne.data(), sizeof(shard.ne[0]) * n_dims);
  443. std::string name = file.read_string(name_len);
  444. if (n_dims < 1 || n_dims > 2) {
  445. throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name.c_str(), n_dims));
  446. }
  447. switch (shard.type) {
  448. case GGML_TYPE_F32:
  449. case GGML_TYPE_F16:
  450. case GGML_TYPE_Q4_0:
  451. case GGML_TYPE_Q4_1:
  452. case GGML_TYPE_Q5_0:
  453. case GGML_TYPE_Q5_1:
  454. case GGML_TYPE_Q8_0:
  455. case GGML_TYPE_Q2_K:
  456. case GGML_TYPE_Q3_K:
  457. case GGML_TYPE_Q4_K:
  458. case GGML_TYPE_Q5_K:
  459. case GGML_TYPE_Q6_K:
  460. break;
  461. default: {
  462. throw std::runtime_error(format("unrecognized tensor type %u\n", shard.type));
  463. }
  464. }
  465. if (file_version >= LLAMA_FILE_VERSION_GGJT_V1) {
  466. // skip to the next multiple of 32 bytes
  467. file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
  468. }
  469. shard.file_idx = file_idx;
  470. shard.file_off = file.tell();
  471. shard.calc_size();
  472. file.seek(shard.size, SEEK_CUR);
  473. auto it = tensors_map.name_to_idx.find(name);
  474. size_t idx;
  475. if (it != tensors_map.name_to_idx.end()) {
  476. idx = it->second;
  477. } else {
  478. tensors_map.tensors.emplace_back(name);
  479. idx = tensors_map.tensors.size() - 1;
  480. tensors_map.name_to_idx.emplace(name, idx);
  481. }
  482. tensors_map.tensors.at(idx).shards.push_back(shard);
  483. }
  484. }
  485. };
  486. struct llama_file_saver {
  487. llama_file file;
  488. llama_file_loader * any_file_loader;
  489. llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
  490. : file(fname, "wb"), any_file_loader(any_file_loader) {
  491. fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
  492. write_magic();
  493. write_hparams(new_ftype);
  494. write_vocab();
  495. }
  496. void write_magic() {
  497. file.write_u32(LLAMA_FILE_MAGIC); // magic
  498. file.write_u32(LLAMA_FILE_VERSION); // version
  499. }
  500. void write_hparams(enum llama_ftype new_ftype) {
  501. const llama_hparams & hparams = any_file_loader->hparams;
  502. file.write_u32(hparams.n_vocab);
  503. file.write_u32(hparams.n_embd);
  504. file.write_u32(hparams.n_mult);
  505. file.write_u32(hparams.n_head);
  506. file.write_u32(hparams.n_layer);
  507. file.write_u32(hparams.n_rot);
  508. file.write_u32(new_ftype);
  509. }
  510. void write_vocab() {
  511. if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
  512. fprintf(stderr, "llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
  513. }
  514. uint32_t n_vocab = any_file_loader->hparams.n_vocab;
  515. for (uint32_t i = 0; i < n_vocab; i++) {
  516. const auto & token_score = any_file_loader->vocab.id_to_token.at(i);
  517. file.write_u32((uint32_t) token_score.tok.size());
  518. file.write_raw(token_score.tok.data(), token_score.tok.size());
  519. file.write_raw(&token_score.score, sizeof(token_score.score));
  520. }
  521. }
  522. void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) {
  523. switch (new_type) {
  524. case GGML_TYPE_F32:
  525. case GGML_TYPE_F16:
  526. case GGML_TYPE_Q4_0:
  527. case GGML_TYPE_Q4_1:
  528. case GGML_TYPE_Q5_0:
  529. case GGML_TYPE_Q5_1:
  530. case GGML_TYPE_Q8_0:
  531. case GGML_TYPE_Q2_K:
  532. case GGML_TYPE_Q3_K:
  533. case GGML_TYPE_Q4_K:
  534. case GGML_TYPE_Q5_K:
  535. case GGML_TYPE_Q6_K:
  536. break;
  537. default: LLAMA_ASSERT(false);
  538. }
  539. file.write_u32((uint32_t) tensor.ne.size());
  540. file.write_u32((uint32_t) tensor.name.size());
  541. file.write_u32(new_type);
  542. file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size());
  543. file.write_raw(tensor.name.data(), tensor.name.size());
  544. file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
  545. LLAMA_ASSERT(new_size == llama_calc_tensor_size(tensor.ne, new_type));
  546. file.write_raw(new_data, new_size);
  547. }
  548. };
  549. struct llama_model_loader {
  550. std::vector<std::unique_ptr<llama_file_loader>> file_loaders;
  551. llama_load_tensors_map tensors_map;
  552. bool use_mmap;
  553. size_t num_ggml_tensors_created = 0;
  554. struct ggml_context * ggml_ctx = NULL;
  555. std::unique_ptr<llama_mmap> mapping;
  556. llama_model_loader(const std::string & fname_base, bool use_mmap, bool vocab_only) {
  557. auto * first_file = new llama_file_loader(fname_base.c_str(), 0, tensors_map);
  558. file_loaders.emplace_back(first_file);
  559. uint32_t n_parts = vocab_only ? 1 : guess_n_parts();
  560. for (uint32_t i = 1; i < n_parts; i++) {
  561. std::string fname = fname_base + "." + std::to_string(i);
  562. auto * ith_file = new llama_file_loader(fname.c_str(), i, tensors_map);
  563. file_loaders.emplace_back(ith_file);
  564. if (ith_file->hparams != first_file->hparams) {
  565. throw std::runtime_error(format("llama.cpp: hparams inconsistent between files"));
  566. }
  567. }
  568. if (!llama_mmap::SUPPORTED) {
  569. use_mmap = false;
  570. }
  571. if (use_mmap && alignment_prevents_mmap()) {
  572. fprintf(stderr, "llama.cpp: can't use mmap because tensors are not aligned; convert to new format to avoid this\n");
  573. use_mmap = false;
  574. }
  575. this->use_mmap = use_mmap;
  576. for (llama_load_tensor & lt : tensors_map.tensors) {
  577. lt.calc_all();
  578. }
  579. }
  580. bool alignment_prevents_mmap() {
  581. for (const llama_load_tensor & lt : tensors_map.tensors) {
  582. for (const llama_load_tensor_shard & shard : lt.shards) {
  583. if (shard.file_off & 3) {
  584. return true;
  585. }
  586. }
  587. }
  588. return false;
  589. }
  590. uint32_t guess_n_parts() const {
  591. auto it = tensors_map.name_to_idx.find("tok_embeddings.weight");
  592. if (it == tensors_map.name_to_idx.end()) {
  593. throw std::runtime_error(std::string("missing tok_embeddings.weight"));
  594. }
  595. const llama_load_tensor & lt = tensors_map.tensors.at(it->second);
  596. return file_loaders.at(0)->hparams.n_embd / lt.shards.at(0).ne.at(0);
  597. }
  598. void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const {
  599. *ctx_size_p = *mmapped_size_p = 0;
  600. for (const llama_load_tensor & lt : tensors_map.tensors) {
  601. *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
  602. *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size;
  603. }
  604. }
  605. struct ggml_tensor * get_tensor(const std::string & name, const std::vector<uint32_t> & ne, ggml_backend backend) {
  606. auto it = tensors_map.name_to_idx.find(name);
  607. if (it == tensors_map.name_to_idx.end()) {
  608. throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str())));
  609. }
  610. llama_load_tensor & lt = tensors_map.tensors.at(it->second);
  611. if (lt.ne != ne) {
  612. throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s",
  613. name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str()));
  614. }
  615. return get_tensor_for(lt, backend);
  616. }
  617. struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) {
  618. struct ggml_tensor * tensor;
  619. if (backend != GGML_BACKEND_CPU) {
  620. ggml_set_no_alloc(ggml_ctx, true);
  621. }
  622. if (lt.ne.size() == 2) {
  623. tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1));
  624. } else {
  625. LLAMA_ASSERT(lt.ne.size() == 1);
  626. tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0));
  627. }
  628. ggml_set_name(tensor, lt.name.c_str());
  629. LLAMA_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor
  630. if (backend != GGML_BACKEND_CPU) {
  631. ggml_set_no_alloc(ggml_ctx, use_mmap);
  632. }
  633. tensor->backend = backend;
  634. lt.ggml_tensor = tensor;
  635. num_ggml_tensors_created++;
  636. return tensor;
  637. }
  638. void done_getting_tensors() const {
  639. if (num_ggml_tensors_created != tensors_map.tensors.size()) {
  640. throw std::runtime_error(std::string("llama.cpp: file contained more tensors than expected"));
  641. }
  642. }
  643. void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
  644. size_t data_size = 0;
  645. size_t prefetch_size = 0;
  646. size_t lock_size = 0;
  647. for (const llama_load_tensor & lt : tensors_map.tensors) {
  648. data_size += lt.size;
  649. if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) {
  650. prefetch_size += lt.size;
  651. }
  652. }
  653. if (use_mmap) {
  654. mapping.reset(new llama_mmap(&file_loaders.at(0)->file, prefetch_size));
  655. if (lmlock) {
  656. lmlock->init(mapping->addr);
  657. }
  658. }
  659. size_t done_size = 0;
  660. for (llama_load_tensor & lt : tensors_map.tensors) {
  661. if (progress_callback) {
  662. progress_callback((float) done_size / data_size, progress_callback_user_data);
  663. }
  664. LLAMA_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already
  665. lt.data = (uint8_t *) lt.ggml_tensor->data;
  666. // allocate temp buffer if not using mmap
  667. if (!use_mmap && lt.data == NULL) {
  668. GGML_ASSERT(lt.ggml_tensor->backend != GGML_BACKEND_CPU);
  669. lt.data = (uint8_t*)malloc(ggml_nbytes(lt.ggml_tensor));
  670. }
  671. load_data_for(lt);
  672. switch(lt.ggml_tensor->backend) {
  673. case GGML_BACKEND_CPU:
  674. lt.ggml_tensor->data = lt.data;
  675. if (use_mmap && lmlock) {
  676. lock_size += lt.size;
  677. lmlock->grow_to(lock_size);
  678. }
  679. break;
  680. #if defined(GGML_USE_CUBLAS)
  681. case GGML_BACKEND_GPU:
  682. case GGML_BACKEND_GPU_SPLIT:
  683. ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor);
  684. if (!use_mmap) {
  685. free(lt.data);
  686. }
  687. break;
  688. #elif defined(GGML_USE_CLBLAST)
  689. case GGML_BACKEND_GPU:
  690. ggml_cl_transform_tensor(lt.data, lt.ggml_tensor);
  691. if (!use_mmap) {
  692. free(lt.data);
  693. }
  694. break;
  695. #endif
  696. default:
  697. continue;
  698. }
  699. done_size += lt.size;
  700. }
  701. }
  702. void load_data_for(llama_load_tensor & lt) {
  703. if (use_mmap) {
  704. LLAMA_ASSERT(lt.shards.size() == 1);
  705. lt.data = (uint8_t *) mapping->addr + lt.shards.at(0).file_off;
  706. } else if (lt.split_type == SPLIT_NONE) {
  707. llama_file & file = file_loaders.at(lt.shards.at(0).file_idx)->file;
  708. file.seek(lt.shards.at(0).file_off, SEEK_SET);
  709. file.read_raw(lt.data, lt.size);
  710. } else if (lt.split_type == SPLIT_BY_ROWS) {
  711. size_t offset = 0;
  712. for (llama_load_tensor_shard & shard : lt.shards) {
  713. llama_file & file = file_loaders.at(shard.file_idx)->file;
  714. file.seek(shard.file_off, SEEK_SET);
  715. file.read_raw(lt.data + offset, shard.size);
  716. offset += shard.size;
  717. }
  718. LLAMA_ASSERT(offset == lt.size);
  719. } else if (lt.split_type == SPLIT_BY_COLUMNS) {
  720. // Let's load the data into temporary buffers to ensure the OS performs large loads.
  721. std::vector<llama_buffer> tmp_bufs(lt.shards.size());
  722. for (size_t i = 0; i < lt.shards.size(); i++) {
  723. llama_load_tensor_shard & shard = lt.shards.at(i);
  724. llama_file & file = file_loaders.at(shard.file_idx)->file;
  725. file.seek(shard.file_off, SEEK_SET);
  726. tmp_bufs.at(i).resize(shard.size);
  727. file.read_raw(tmp_bufs.at(i).addr, shard.size);
  728. }
  729. // Then reshape.
  730. size_t num_rows = lt.ne.at(1);
  731. size_t per_shard_row_size = lt.shards.at(0).size / num_rows;
  732. size_t out_offset = 0;
  733. for (size_t row = 0; row < num_rows; row++) {
  734. for (llama_buffer & tmp_buf : tmp_bufs) {
  735. memcpy(lt.data + out_offset,
  736. tmp_buf.addr + row * per_shard_row_size,
  737. per_shard_row_size);
  738. out_offset += per_shard_row_size;
  739. }
  740. }
  741. LLAMA_ASSERT(out_offset == lt.size);
  742. }
  743. if (0) {
  744. print_checksum(lt);
  745. }
  746. }
  747. static void print_checksum(llama_load_tensor & lt) {
  748. uint32_t sum = 0;
  749. for (size_t i = 0; i < lt.size; i++) {
  750. uint8_t byte = lt.data[i];
  751. sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
  752. }
  753. fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
  754. llama_format_tensor_shape(lt.ne).c_str(), lt.size);
  755. }
  756. };
  757. //
  758. // kv cache
  759. //
  760. static bool kv_cache_init(
  761. const struct llama_hparams & hparams,
  762. struct llama_kv_cache & cache,
  763. ggml_type wtype,
  764. int n_ctx,
  765. int n_gpu_layers) {
  766. const int n_embd = hparams.n_embd;
  767. const int n_layer = hparams.n_layer;
  768. const int64_t n_mem = n_layer*n_ctx;
  769. const int64_t n_elements = n_embd*n_mem;
  770. cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
  771. struct ggml_init_params params;
  772. params.mem_size = cache.buf.size;
  773. params.mem_buffer = cache.buf.addr;
  774. params.no_alloc = false;
  775. cache.ctx = ggml_init(params);
  776. if (!cache.ctx) {
  777. fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
  778. return false;
  779. }
  780. cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  781. cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  782. ggml_set_name(cache.k, "cache_k");
  783. ggml_set_name(cache.v, "cache_v");
  784. #ifdef GGML_USE_CUBLAS
  785. if (n_gpu_layers > n_layer + 1) {
  786. ggml_cuda_assign_buffers_no_scratch(cache.v);
  787. }
  788. if (n_gpu_layers > n_layer + 2) {
  789. ggml_cuda_assign_buffers_no_scratch(cache.k);
  790. }
  791. #endif // GGML_USE_CUBLAS
  792. return true;
  793. }
  794. struct llama_context_params llama_context_default_params() {
  795. struct llama_context_params result = {
  796. /*.n_ctx =*/ 512,
  797. /*.n_batch =*/ 512,
  798. /*.gpu_layers =*/ 0,
  799. /*.main_gpu =*/ 0,
  800. /*.tensor_split =*/ {0},
  801. /*.low_vram =*/ false,
  802. /*.seed =*/ -1,
  803. /*.f16_kv =*/ true,
  804. /*.logits_all =*/ false,
  805. /*.vocab_only =*/ false,
  806. /*.use_mmap =*/ true,
  807. /*.use_mlock =*/ false,
  808. /*.embedding =*/ false,
  809. /*.progress_callback =*/ nullptr,
  810. /*.progress_callback_user_data =*/ nullptr,
  811. };
  812. return result;
  813. }
  814. struct llama_model_quantize_params llama_model_quantize_default_params() {
  815. struct llama_model_quantize_params result = {
  816. /*.nthread =*/ 0,
  817. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  818. /*.allow_requantize =*/ false,
  819. /*.quantize_output_tensor =*/ true,
  820. };
  821. return result;
  822. }
  823. bool llama_mmap_supported() {
  824. return llama_mmap::SUPPORTED;
  825. }
  826. bool llama_mlock_supported() {
  827. return llama_mlock::SUPPORTED;
  828. }
  829. void llama_init_backend() {
  830. ggml_time_init();
  831. // needed to initialize f16 tables
  832. {
  833. struct ggml_init_params params = { 0, NULL, false };
  834. struct ggml_context * ctx = ggml_init(params);
  835. ggml_free(ctx);
  836. }
  837. }
  838. int64_t llama_time_us() {
  839. return ggml_time_us();
  840. }
  841. //
  842. // model loading
  843. //
  844. static const char *llama_file_version_name(llama_file_version version) {
  845. switch (version) {
  846. case LLAMA_FILE_VERSION_GGML: return "'ggml' (old version with low tokenizer quality and no mmap support)";
  847. case LLAMA_FILE_VERSION_GGMF_V1: return "ggmf v1 (old version with no mmap support)";
  848. case LLAMA_FILE_VERSION_GGJT_V1: return "ggjt v1 (pre #1405)";
  849. case LLAMA_FILE_VERSION_GGJT_V2: return "ggjt v2 (pre #1508)";
  850. case LLAMA_FILE_VERSION_GGJT_V3: return "ggjt v3 (latest)";
  851. }
  852. return "unknown";
  853. }
  854. static const char *llama_ftype_name(enum llama_ftype ftype) {
  855. switch (ftype) {
  856. case LLAMA_FTYPE_ALL_F32: return "all F32";
  857. case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
  858. case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
  859. case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
  860. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  861. return "mostly Q4_1, some F16";
  862. case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0";
  863. case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1";
  864. case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0";
  865. // K-quants
  866. case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K";
  867. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small";
  868. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium";
  869. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large";
  870. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "mostly Q4_K - Small";
  871. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium";
  872. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small";
  873. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium";
  874. case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K";
  875. default: return "unknown, may not work";
  876. }
  877. }
  878. static const char *llama_model_type_name(e_model type) {
  879. switch (type) {
  880. case MODEL_3B: return "3B";
  881. case MODEL_7B: return "7B";
  882. case MODEL_13B: return "13B";
  883. case MODEL_30B: return "30B";
  884. case MODEL_65B: return "65B";
  885. default: LLAMA_ASSERT(false);
  886. }
  887. }
  888. static void llama_model_load_internal(
  889. const std::string & fname,
  890. llama_context & lctx,
  891. int n_ctx,
  892. int n_batch,
  893. int n_gpu_layers,
  894. int main_gpu,
  895. const float * tensor_split,
  896. bool low_vram,
  897. ggml_type memory_type,
  898. bool use_mmap,
  899. bool use_mlock,
  900. bool vocab_only,
  901. llama_progress_callback progress_callback,
  902. void * progress_callback_user_data) {
  903. lctx.t_start_us = ggml_time_us();
  904. std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap, vocab_only));
  905. lctx.vocab = std::move(ml->file_loaders.at(0)->vocab);
  906. auto & model = lctx.model;
  907. model.hparams = ml->file_loaders.at(0)->hparams;
  908. model.n_gpu_layers = n_gpu_layers;
  909. llama_file_version file_version = ml->file_loaders.at(0)->file_version;
  910. auto & hparams = model.hparams;
  911. {
  912. switch (hparams.n_layer) {
  913. case 26: model.type = e_model::MODEL_3B; break;
  914. case 32: model.type = e_model::MODEL_7B; break;
  915. case 40: model.type = e_model::MODEL_13B; break;
  916. case 60: model.type = e_model::MODEL_30B; break;
  917. case 80: model.type = e_model::MODEL_65B; break;
  918. default:
  919. {
  920. if (hparams.n_layer < 32) {
  921. model.type = e_model::MODEL_7B;
  922. }
  923. } break;
  924. }
  925. hparams.n_ctx = n_ctx;
  926. }
  927. const uint32_t n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
  928. {
  929. fprintf(stderr, "%s: format = %s\n", __func__, llama_file_version_name(file_version));
  930. fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  931. fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx);
  932. fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd);
  933. fprintf(stderr, "%s: n_mult = %u\n", __func__, hparams.n_mult);
  934. fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head);
  935. fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer);
  936. fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot);
  937. fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
  938. fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff);
  939. fprintf(stderr, "%s: n_parts = %zu\n", __func__, ml->file_loaders.size());
  940. fprintf(stderr, "%s: model size = %s\n", __func__, llama_model_type_name(model.type));
  941. }
  942. if (file_version < LLAMA_FILE_VERSION_GGJT_V2) {
  943. if (hparams.ftype != LLAMA_FTYPE_ALL_F32 &&
  944. hparams.ftype != LLAMA_FTYPE_MOSTLY_F16 &&
  945. hparams.ftype != LLAMA_FTYPE_MOSTLY_Q8_0) {
  946. throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1405)"));
  947. }
  948. }
  949. if (file_version < LLAMA_FILE_VERSION_GGJT_V3) {
  950. if (hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
  951. hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_1 ||
  952. hparams.ftype == LLAMA_FTYPE_MOSTLY_Q8_0) {
  953. throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1508)"));
  954. }
  955. }
  956. if (vocab_only) {
  957. return;
  958. }
  959. auto & ctx = model.ctx;
  960. size_t ctx_size;
  961. size_t mmapped_size;
  962. ml->calc_sizes(&ctx_size, &mmapped_size);
  963. fprintf(stderr, "%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
  964. // create the ggml context
  965. {
  966. lctx.model.buf.resize(ctx_size);
  967. if (use_mlock) {
  968. lctx.model.mlock_buf.init(lctx.model.buf.addr);
  969. lctx.model.mlock_buf.grow_to(lctx.model.buf.size);
  970. }
  971. struct ggml_init_params params = {
  972. /*.mem_size =*/ lctx.model.buf.size,
  973. /*.mem_buffer =*/ lctx.model.buf.addr,
  974. /*.no_alloc =*/ ml->use_mmap,
  975. };
  976. model.ctx = ggml_init(params);
  977. if (!model.ctx) {
  978. throw std::runtime_error(format("ggml_init() failed"));
  979. }
  980. }
  981. (void) main_gpu;
  982. #if defined(GGML_USE_CUBLAS)
  983. fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__);
  984. ggml_cuda_set_main_device(main_gpu);
  985. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
  986. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
  987. #elif defined(GGML_USE_CLBLAST)
  988. fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__);
  989. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
  990. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
  991. #else
  992. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU
  993. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_CPU
  994. #endif
  995. // prepare memory for the weights
  996. size_t vram_weights = 0;
  997. size_t vram_scratch = 0;
  998. {
  999. const uint32_t n_embd = hparams.n_embd;
  1000. const uint32_t n_layer = hparams.n_layer;
  1001. const uint32_t n_vocab = hparams.n_vocab;
  1002. ml->ggml_ctx = ctx;
  1003. model.tok_embeddings = ml->get_tensor("tok_embeddings.weight", {n_embd, n_vocab}, GGML_BACKEND_CPU);
  1004. // "output" tensor
  1005. {
  1006. ggml_backend backend_norm;
  1007. ggml_backend backend_output;
  1008. if (n_gpu_layers > int(n_layer)) { // NOLINT
  1009. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  1010. // on Windows however this is detrimental unless everything is on the GPU
  1011. #ifndef _WIN32
  1012. backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
  1013. #else
  1014. backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
  1015. #endif // _WIN32
  1016. backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
  1017. } else {
  1018. backend_norm = GGML_BACKEND_CPU;
  1019. backend_output = GGML_BACKEND_CPU;
  1020. }
  1021. model.norm = ml->get_tensor("norm.weight", {n_embd}, backend_norm);
  1022. model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}, backend_output);
  1023. if (backend_norm == GGML_BACKEND_GPU) {
  1024. vram_weights += ggml_nbytes(model.norm);
  1025. }
  1026. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  1027. vram_weights += ggml_nbytes(model.output);
  1028. }
  1029. }
  1030. const int i_gpu_start = n_layer - n_gpu_layers;
  1031. model.layers.resize(n_layer);
  1032. for (uint32_t i = 0; i < n_layer; ++i) {
  1033. const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
  1034. const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
  1035. auto & layer = model.layers[i];
  1036. std::string layers_i = "layers." + std::to_string(i);
  1037. layer.attention_norm = ml->get_tensor(layers_i + ".attention_norm.weight", {n_embd}, backend);
  1038. layer.wq = ml->get_tensor(layers_i + ".attention.wq.weight", {n_embd, n_embd}, backend_split);
  1039. layer.wk = ml->get_tensor(layers_i + ".attention.wk.weight", {n_embd, n_embd}, backend_split);
  1040. layer.wv = ml->get_tensor(layers_i + ".attention.wv.weight", {n_embd, n_embd}, backend_split);
  1041. layer.wo = ml->get_tensor(layers_i + ".attention.wo.weight", {n_embd, n_embd}, backend_split);
  1042. layer.ffn_norm = ml->get_tensor(layers_i + ".ffn_norm.weight", {n_embd}, backend);
  1043. layer.w1 = ml->get_tensor(layers_i + ".feed_forward.w1.weight", {n_embd, n_ff}, backend_split);
  1044. layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend_split);
  1045. layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend_split);
  1046. if (backend == GGML_BACKEND_GPU) {
  1047. vram_weights +=
  1048. ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
  1049. ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
  1050. ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
  1051. }
  1052. }
  1053. }
  1054. ml->done_getting_tensors();
  1055. // print memory requirements
  1056. {
  1057. const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
  1058. // this is the total memory required to run the inference
  1059. const size_t mem_required =
  1060. ctx_size +
  1061. mmapped_size - vram_weights + // weights in VRAM not in memory
  1062. MEM_REQ_SCRATCH0().at(model.type) +
  1063. MEM_REQ_SCRATCH1().at(model.type) +
  1064. MEM_REQ_EVAL().at (model.type);
  1065. // this is the memory required by one llama_state
  1066. const size_t mem_required_state =
  1067. scale*MEM_REQ_KV_SELF().at(model.type);
  1068. fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
  1069. mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
  1070. (void) vram_scratch;
  1071. (void) n_batch;
  1072. #ifdef GGML_USE_CUBLAS
  1073. if (low_vram) {
  1074. fprintf(stderr, "%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
  1075. ggml_cuda_set_scratch_size(0); // disable scratch
  1076. } else {
  1077. vram_scratch = n_batch * MB;
  1078. ggml_cuda_set_scratch_size(vram_scratch);
  1079. if (n_gpu_layers > 0) {
  1080. fprintf(stderr, "%s: allocating batch_size x 1 MB = %ld MB VRAM for the scratch buffer\n",
  1081. __func__, vram_scratch / MB);
  1082. }
  1083. }
  1084. #endif // GGML_USE_CUBLAS
  1085. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  1086. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  1087. fprintf(stderr, "%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  1088. if (n_gpu_layers > (int) hparams.n_layer) {
  1089. fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__);
  1090. }
  1091. size_t vram_kv_cache = 0;
  1092. if (n_gpu_layers > (int) hparams.n_layer + 1) {
  1093. if (low_vram) {
  1094. fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
  1095. } else {
  1096. fprintf(stderr, "%s: offloading v cache to GPU\n", __func__);
  1097. vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2;
  1098. }
  1099. }
  1100. if (n_gpu_layers > (int) hparams.n_layer + 2) {
  1101. if (low_vram) {
  1102. fprintf(stderr, "%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
  1103. } else {
  1104. fprintf(stderr, "%s: offloading k cache to GPU\n", __func__);
  1105. vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2;
  1106. }
  1107. }
  1108. const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
  1109. fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n",
  1110. __func__, std::min(n_gpu_layers, max_offloadable_layers), hparams.n_layer + 3);
  1111. fprintf(stderr, "%s: total VRAM used: %zu MB\n",
  1112. __func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
  1113. #else
  1114. (void) n_gpu_layers;
  1115. #endif
  1116. }
  1117. // populate `tensors_by_name`
  1118. for (llama_load_tensor & lt : ml->tensors_map.tensors) {
  1119. model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor);
  1120. }
  1121. (void) tensor_split;
  1122. #if defined(GGML_USE_CUBLAS)
  1123. {
  1124. ggml_cuda_set_tensor_split(tensor_split);
  1125. }
  1126. #endif
  1127. ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL);
  1128. if (progress_callback) {
  1129. progress_callback(1.0f, progress_callback_user_data);
  1130. }
  1131. model.mapping = std::move(ml->mapping);
  1132. // loading time will be recalculate after the first eval, so
  1133. // we take page faults deferred by mmap() into consideration
  1134. lctx.t_load_us = ggml_time_us() - lctx.t_start_us;
  1135. }
  1136. static bool llama_model_load(
  1137. const std::string & fname,
  1138. llama_context & lctx,
  1139. int n_ctx,
  1140. int n_batch,
  1141. int n_gpu_layers,
  1142. int main_gpu,
  1143. float * tensor_split,
  1144. bool low_vram,
  1145. ggml_type memory_type,
  1146. bool use_mmap,
  1147. bool use_mlock,
  1148. bool vocab_only,
  1149. llama_progress_callback progress_callback,
  1150. void *progress_callback_user_data) {
  1151. try {
  1152. llama_model_load_internal(fname, lctx, n_ctx, n_batch, n_gpu_layers, main_gpu, tensor_split, low_vram, memory_type,
  1153. use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
  1154. return true;
  1155. } catch (const std::exception & err) {
  1156. fprintf(stderr, "error loading model: %s\n", err.what());
  1157. return false;
  1158. }
  1159. }
  1160. // evaluate the transformer
  1161. //
  1162. // - lctx: llama context
  1163. // - tokens: new batch of tokens to process
  1164. // - n_past: the context size so far
  1165. // - n_threads: number of threads to use
  1166. // - cgraph_fname: filename of the exported computation graph
  1167. //
  1168. static bool llama_eval_internal(
  1169. llama_context & lctx,
  1170. const llama_token * tokens,
  1171. const int n_tokens,
  1172. const int n_past,
  1173. const int n_threads,
  1174. const char * cgraph_fname) {
  1175. // enforce that the first token is BOS
  1176. if (n_past == 0 && tokens[0] != llama_token_bos()) {
  1177. fprintf(stderr, "%s: first token must be BOS\n", __func__);
  1178. return false;
  1179. }
  1180. const int64_t t_start_us = ggml_time_us();
  1181. const int N = n_tokens;
  1182. const auto & model = lctx.model;
  1183. const auto & hparams = model.hparams;
  1184. const auto & kv_self = model.kv_self;
  1185. LLAMA_ASSERT(!!kv_self.ctx);
  1186. const int n_embd = hparams.n_embd;
  1187. const int n_layer = hparams.n_layer;
  1188. const int n_ctx = hparams.n_ctx;
  1189. const int n_head = hparams.n_head;
  1190. const int n_vocab = hparams.n_vocab;
  1191. const int n_rot = hparams.n_embd/hparams.n_head;
  1192. const int n_gpu_layers = model.n_gpu_layers;
  1193. auto & mem_per_token = lctx.mem_per_token;
  1194. auto & buf_compute = lctx.buf_compute;
  1195. struct ggml_init_params params = {
  1196. /*.mem_size =*/ buf_compute.size,
  1197. /*.mem_buffer =*/ buf_compute.addr,
  1198. /*.no_alloc =*/ false,
  1199. };
  1200. struct ggml_context * ctx0 = ggml_init(params);
  1201. // for big prompts, if BLAS is enabled, it is better to use only one thread
  1202. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  1203. ggml_cgraph gf = {};
  1204. gf.n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
  1205. struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
  1206. ggml_set_name(embd, "embd");
  1207. memcpy(embd->data, tokens, N*ggml_element_size(embd));
  1208. struct ggml_tensor * cur;
  1209. struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd);
  1210. const int i_gpu_start = n_layer - n_gpu_layers;
  1211. (void) i_gpu_start;
  1212. // offload functions set the tensor output backend to GPU
  1213. // tensors are GPU-accelerated if any input or the output has been offloaded
  1214. //
  1215. // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
  1216. // in that case ggml_cuda_assign_buffers has no effect
  1217. offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
  1218. offload_func_t offload_func_kq = llama_nop;
  1219. offload_func_t offload_func_v = llama_nop;
  1220. #ifdef GGML_USE_CUBLAS
  1221. if (n_gpu_layers > n_layer) {
  1222. offload_func_nr = ggml_cuda_assign_buffers;
  1223. }
  1224. if (n_gpu_layers > n_layer + 1) {
  1225. offload_func_v = ggml_cuda_assign_buffers;
  1226. }
  1227. if (n_gpu_layers > n_layer + 2) {
  1228. offload_func_kq = ggml_cuda_assign_buffers;
  1229. }
  1230. #endif // GGML_USE_CUBLAS
  1231. for (int il = 0; il < n_layer; ++il) {
  1232. offload_func_t offload_func = llama_nop;
  1233. #ifdef GGML_USE_CUBLAS
  1234. if (il >= i_gpu_start) {
  1235. offload_func = ggml_cuda_assign_buffers;
  1236. }
  1237. #endif // GGML_USE_CUBLAS
  1238. struct ggml_tensor * inpSA = inpL;
  1239. lctx.use_buf(ctx0, 0);
  1240. // norm
  1241. {
  1242. cur = ggml_rms_norm(ctx0, inpL);
  1243. offload_func(cur);
  1244. ggml_set_name(cur, "rms_norm_0");
  1245. // cur = cur*attention_norm(broadcasted)
  1246. cur = ggml_mul(ctx0, cur, model.layers[il].attention_norm);
  1247. offload_func(cur);
  1248. ggml_set_name(cur, "attention_norm_0");
  1249. }
  1250. // self-attention
  1251. {
  1252. // compute Q and K and RoPE them
  1253. struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  1254. offload_func_kq(tmpk);
  1255. ggml_set_name(tmpk, "tmpk");
  1256. struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  1257. offload_func_kq(tmpq);
  1258. ggml_set_name(tmpq, "tmpq");
  1259. struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd/n_head, n_head, N), n_past, n_rot, 0);
  1260. offload_func_kq(Kcur);
  1261. ggml_set_name(Kcur, "Kcur");
  1262. struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd/n_head, n_head, N), n_past, n_rot, 0);
  1263. offload_func_kq(Qcur);
  1264. ggml_set_name(Qcur, "Qcur");
  1265. // store key and value to memory
  1266. {
  1267. // compute the transposed [N, n_embd] V matrix
  1268. struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  1269. offload_func_v(tmpv);
  1270. ggml_set_name(tmpv, "tmpv");
  1271. struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd, N));
  1272. offload_func_v(Vcur);
  1273. ggml_set_name(Vcur, "Vcur");
  1274. struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
  1275. offload_func_kq(k);
  1276. ggml_set_name(k, "k");
  1277. struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
  1278. ( n_ctx)*ggml_element_size(kv_self.v),
  1279. (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
  1280. offload_func_v(v);
  1281. ggml_set_name(v, "v");
  1282. // important: storing RoPE-ed version of K in the KV cache!
  1283. ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
  1284. ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
  1285. }
  1286. struct ggml_tensor * Q =
  1287. ggml_permute(ctx0,
  1288. Qcur,
  1289. 0, 2, 1, 3);
  1290. offload_func_kq(Q);
  1291. ggml_set_name(Q, "Q");
  1292. struct ggml_tensor * K =
  1293. ggml_permute(ctx0,
  1294. ggml_reshape_3d(ctx0,
  1295. ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kv_self.k)*n_embd),
  1296. n_embd/n_head, n_head, n_past + N),
  1297. 0, 2, 1, 3);
  1298. offload_func_kq(K);
  1299. ggml_set_name(K, "K");
  1300. // K * Q
  1301. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  1302. offload_func_kq(KQ);
  1303. ggml_set_name(KQ, "KQ");
  1304. // KQ_scaled = KQ / sqrt(n_embd/n_head)
  1305. struct ggml_tensor * KQ_scale = ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head));
  1306. ggml_set_name(KQ_scale, "1/sqrt(n_embd/n_head)");
  1307. // KQ_scaled shape [n_past + N, N, n_head, 1]
  1308. struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
  1309. offload_func_kq(KQ_scaled);
  1310. ggml_set_name(KQ_scaled, "KQ_scaled");
  1311. // KQ_masked = mask_past(KQ_scaled)
  1312. struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
  1313. offload_func_kq(KQ_masked);
  1314. ggml_set_name(KQ_masked, "KQ_masked");
  1315. // KQ = soft_max(KQ_masked)
  1316. struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
  1317. offload_func_v(KQ_soft_max);
  1318. ggml_set_name(KQ_soft_max, "KQ_soft_max");
  1319. // split cached V into n_head heads
  1320. struct ggml_tensor * V =
  1321. ggml_view_3d(ctx0, kv_self.v,
  1322. n_past + N, n_embd/n_head, n_head,
  1323. n_ctx*ggml_element_size(kv_self.v),
  1324. n_ctx*ggml_element_size(kv_self.v)*n_embd/n_head,
  1325. il*n_ctx*ggml_element_size(kv_self.v)*n_embd);
  1326. offload_func_v(V);
  1327. ggml_set_name(V, "V");
  1328. #if 1
  1329. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
  1330. offload_func_v(KQV);
  1331. ggml_set_name(KQV, "KQV");
  1332. #else
  1333. // make V contiguous in memory to speed up the matmul, however we waste time on the copy
  1334. // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation
  1335. // is there a better way?
  1336. struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd/n_head, n_head));
  1337. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max);
  1338. #endif
  1339. // KQV_merged = KQV.permute(0, 2, 1, 3)
  1340. struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  1341. offload_func_v(KQV_merged);
  1342. ggml_set_name(KQV_merged, "KQV_merged");
  1343. // cur = KQV_merged.contiguous().view(n_embd, N)
  1344. cur = ggml_cpy(ctx0,
  1345. KQV_merged,
  1346. ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
  1347. offload_func_v(cur);
  1348. ggml_set_name(cur, "KQV_merged_contiguous");
  1349. // projection (no bias)
  1350. cur = ggml_mul_mat(ctx0,
  1351. model.layers[il].wo,
  1352. cur);
  1353. offload_func(cur);
  1354. ggml_set_name(cur, "result_wo");
  1355. }
  1356. lctx.use_buf(ctx0, 1);
  1357. struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
  1358. offload_func(inpFF);
  1359. ggml_set_name(inpFF, "inpFF");
  1360. // feed-forward network
  1361. {
  1362. // norm
  1363. {
  1364. cur = ggml_rms_norm(ctx0, inpFF);
  1365. offload_func(cur);
  1366. ggml_set_name(cur, "rms_norm_1");
  1367. // cur = cur*ffn_norm(broadcasted)
  1368. cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm);
  1369. offload_func(cur);
  1370. ggml_set_name(cur, "ffn_norm");
  1371. }
  1372. struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
  1373. model.layers[il].w3,
  1374. cur);
  1375. offload_func(tmp);
  1376. ggml_set_name(tmp, "result_w3");
  1377. cur = ggml_mul_mat(ctx0,
  1378. model.layers[il].w1,
  1379. cur);
  1380. offload_func(cur);
  1381. ggml_set_name(cur, "result_w2");
  1382. // SILU activation
  1383. cur = ggml_silu(ctx0, cur);
  1384. offload_func(cur);
  1385. ggml_set_name(cur, "silu");
  1386. cur = ggml_mul(ctx0, cur, tmp);
  1387. offload_func(cur);
  1388. ggml_set_name(cur, "silu_x_result_w3");
  1389. cur = ggml_mul_mat(ctx0,
  1390. model.layers[il].w2,
  1391. cur);
  1392. offload_func(cur);
  1393. ggml_set_name(cur, "result_w2");
  1394. }
  1395. cur = ggml_add(ctx0, cur, inpFF);
  1396. offload_func(cur);
  1397. ggml_set_name(cur, "inpFF_+_result_w2");
  1398. // input for next layer
  1399. inpL = cur;
  1400. }
  1401. lctx.use_buf(ctx0, 0);
  1402. // used at the end to optionally extract the embeddings
  1403. struct ggml_tensor * embeddings = NULL;
  1404. // norm
  1405. {
  1406. cur = ggml_rms_norm(ctx0, inpL);
  1407. offload_func_nr(cur);
  1408. ggml_set_name(cur, "rms_norm_inpL");
  1409. cur = ggml_rms_norm(ctx0, cur);
  1410. offload_func_nr(cur);
  1411. ggml_set_name(cur, "rms_norm_after");
  1412. // cur = cur*norm(broadcasted)
  1413. cur = ggml_mul(ctx0, cur, model.norm);
  1414. offload_func_nr(cur);
  1415. ggml_set_name(cur, "result_norm");
  1416. embeddings = cur;
  1417. }
  1418. // lm_head
  1419. cur = ggml_mul_mat(ctx0, model.output, cur);
  1420. ggml_set_name(cur, "result_output");
  1421. lctx.use_buf(ctx0, -1);
  1422. // logits -> probs
  1423. //cur = ggml_soft_max_inplace(ctx0, cur);
  1424. // run the computation
  1425. ggml_build_forward_expand(&gf, cur);
  1426. #ifdef GGML_USE_METAL
  1427. if (lctx.ctx_metal && N == 1) {
  1428. ggml_metal_graph_compute(lctx.ctx_metal, &gf);
  1429. ggml_metal_get_tensor (lctx.ctx_metal, cur);
  1430. } else {
  1431. // IMPORTANT:
  1432. // Since we don't have efficient Matrix x Matrix Metal multiplication yet, we fallback to vanilla
  1433. // ggml_graph_compute(). It uses Apple's Accelerate CBLAS API which takes advantage of the ANE or the AMX
  1434. // coprocessor.
  1435. //
  1436. // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch.
  1437. // But for now, we have focused only on Matrix x Vector Metal multiplication.
  1438. //
  1439. // TODO: avoid these syncs via shared memory (ref #1696)
  1440. //
  1441. if (lctx.ctx_metal) {
  1442. // We need to sync the GPU KV cache with the CPU KV cache
  1443. ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k);
  1444. ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v);
  1445. }
  1446. ggml_graph_compute(ctx0, &gf);
  1447. }
  1448. #else
  1449. ggml_graph_compute(ctx0, &gf);
  1450. #endif
  1451. if (cgraph_fname) {
  1452. ggml_graph_export(&gf, cgraph_fname);
  1453. }
  1454. #ifdef GGML_PERF
  1455. // print timing information per ggml operation (for debugging purposes)
  1456. // requires GGML_PERF to be defined
  1457. ggml_graph_print(&gf);
  1458. #endif
  1459. // plot the computation graph in dot format (for debugging purposes)
  1460. //if (n_past%100 == 0) {
  1461. // ggml_graph_dump_dot(&gf, NULL, "llama.dot");
  1462. //}
  1463. //embd_w.resize(n_vocab*N);
  1464. //memcpy(embd_w.data(), ggml_get_data(cur), sizeof(float)*n_vocab*N);
  1465. // update kv token count
  1466. lctx.model.kv_self.n = n_past + N;
  1467. // extract logits
  1468. {
  1469. auto & logits_out = lctx.logits;
  1470. if (lctx.logits_all) {
  1471. logits_out.resize(n_vocab * N);
  1472. memcpy(logits_out.data(), (float *) ggml_get_data(cur), sizeof(float)*n_vocab*N);
  1473. } else {
  1474. // return result for just the last token
  1475. logits_out.resize(n_vocab);
  1476. memcpy(logits_out.data(), (float *) ggml_get_data(cur) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
  1477. }
  1478. }
  1479. // extract embeddings
  1480. if (!lctx.embedding.empty()) {
  1481. auto & embedding_out = lctx.embedding;
  1482. embedding_out.resize(n_embd);
  1483. memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
  1484. }
  1485. if (mem_per_token == 0) {
  1486. mem_per_token = ggml_used_mem(ctx0)/N;
  1487. }
  1488. #if 0
  1489. printf("\n%s: used_mem = %.3f MB, scratch -- %.3f MB %.3f MB\n", __func__,
  1490. ggml_used_mem(ctx0)/1024.0/1024.0,
  1491. lctx.get_buf_max_mem(0)/1024.0/1024.0,
  1492. lctx.get_buf_max_mem(1)/1024.0/1024.0);
  1493. #endif
  1494. ggml_free(ctx0);
  1495. // measure the performance only for the single-token evals
  1496. if (N == 1) {
  1497. lctx.t_eval_us += ggml_time_us() - t_start_us;
  1498. lctx.n_eval++;
  1499. }
  1500. else if (N > 1) {
  1501. lctx.t_p_eval_us += ggml_time_us() - t_start_us;
  1502. lctx.n_p_eval += N;
  1503. }
  1504. return true;
  1505. }
  1506. //
  1507. // tokenizer
  1508. //
  1509. static size_t utf8_len(char src) {
  1510. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  1511. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  1512. return lookup[highbits];
  1513. }
  1514. struct llama_sp_symbol {
  1515. using index = int;
  1516. index prev;
  1517. index next;
  1518. const char * text;
  1519. size_t n;
  1520. };
  1521. static_assert(std::is_trivially_copyable<llama_sp_symbol>::value, "llama_sp_symbol is not trivially copyable");
  1522. struct llama_sp_bigram {
  1523. struct comparator {
  1524. bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) {
  1525. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  1526. }
  1527. };
  1528. using queue_storage = std::vector<llama_sp_bigram>;
  1529. using queue = std::priority_queue<llama_sp_bigram, queue_storage, comparator>;
  1530. llama_sp_symbol::index left;
  1531. llama_sp_symbol::index right;
  1532. float score;
  1533. size_t size;
  1534. };
  1535. // original implementation:
  1536. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  1537. struct llama_tokenizer {
  1538. llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {}
  1539. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  1540. // split string into utf8 chars
  1541. int index = 0;
  1542. size_t offs = 0;
  1543. while (offs < text.size()) {
  1544. llama_sp_symbol sym;
  1545. size_t char_len = std::min(text.size() - offs, utf8_len(text[offs]));
  1546. sym.text = text.c_str() + offs;
  1547. sym.n = char_len;
  1548. offs += char_len;
  1549. sym.prev = index - 1;
  1550. sym.next = offs == text.size() ? -1 : index + 1;
  1551. index++;
  1552. symbols_.emplace_back(sym);
  1553. }
  1554. // seed the work queue with all possible 2-character tokens.
  1555. for (size_t i = 1; i < symbols_.size(); ++i) {
  1556. try_add_bigram(i - 1, i);
  1557. }
  1558. // keep substituting the highest frequency pairs for as long as we can.
  1559. while (!work_queue_.empty()) {
  1560. auto bigram = work_queue_.top();
  1561. work_queue_.pop();
  1562. auto & left_sym = symbols_[bigram.left];
  1563. auto & right_sym = symbols_[bigram.right];
  1564. // if one of the symbols already got merged, skip it.
  1565. if (left_sym.n == 0 || right_sym.n == 0 ||
  1566. left_sym.n + right_sym.n != bigram.size) {
  1567. continue;
  1568. }
  1569. // merge the right sym into the left one
  1570. left_sym.n += right_sym.n;
  1571. right_sym.n = 0;
  1572. //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  1573. // remove the right sym from the chain
  1574. left_sym.next = right_sym.next;
  1575. if (right_sym.next >= 0) {
  1576. symbols_[right_sym.next].prev = bigram.left;
  1577. }
  1578. // find more substitutions
  1579. try_add_bigram(left_sym.prev, bigram.left);
  1580. try_add_bigram(bigram.left, left_sym.next);
  1581. }
  1582. for (int i = 0; i != -1; i = symbols_[i].next) {
  1583. auto & symbol = symbols_[i];
  1584. auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n));
  1585. if (token == vocab_.token_to_id.end()) {
  1586. // output any symbols that did not form tokens as bytes.
  1587. for (int j = 0; j < (int) symbol.n; ++j) {
  1588. llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
  1589. output.push_back(token_id);
  1590. }
  1591. } else {
  1592. output.push_back((*token).second);
  1593. }
  1594. }
  1595. }
  1596. private:
  1597. void try_add_bigram(int left, int right) {
  1598. if (left == -1 || right == -1) {
  1599. return;
  1600. }
  1601. const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n);
  1602. auto token = vocab_.token_to_id.find(text);
  1603. if (token == vocab_.token_to_id.end()) {
  1604. return;
  1605. }
  1606. if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) {
  1607. return;
  1608. }
  1609. const auto &tok_score = vocab_.id_to_token[(*token).second];
  1610. llama_sp_bigram bigram;
  1611. bigram.left = left;
  1612. bigram.right = right;
  1613. bigram.score = tok_score.score;
  1614. bigram.size = text.size();
  1615. work_queue_.push(bigram);
  1616. }
  1617. const llama_vocab & vocab_;
  1618. std::vector<llama_sp_symbol> symbols_;
  1619. llama_sp_bigram::queue work_queue_;
  1620. };
  1621. static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) {
  1622. llama_tokenizer tokenizer(vocab);
  1623. std::vector<llama_vocab::id> output;
  1624. if (text.empty()) {
  1625. return output;
  1626. }
  1627. if (bos) {
  1628. output.push_back(llama_token_bos());
  1629. }
  1630. tokenizer.tokenize(text, output);
  1631. return output;
  1632. }
  1633. //
  1634. // sampling
  1635. //
  1636. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  1637. assert(candidates->size > 0);
  1638. const int64_t t_start_sample_us = ggml_time_us();
  1639. // Sort the logits in descending order
  1640. if (!candidates->sorted) {
  1641. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  1642. return a.logit > b.logit;
  1643. });
  1644. candidates->sorted = true;
  1645. }
  1646. float max_l = candidates->data[0].logit;
  1647. float cum_sum = 0.0f;
  1648. for (size_t i = 0; i < candidates->size; ++i) {
  1649. float p = expf(candidates->data[i].logit - max_l);
  1650. candidates->data[i].p = p;
  1651. cum_sum += p;
  1652. }
  1653. for (size_t i = 0; i < candidates->size; ++i) {
  1654. candidates->data[i].p /= cum_sum;
  1655. }
  1656. if (ctx) {
  1657. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1658. }
  1659. }
  1660. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep) {
  1661. const int64_t t_start_sample_us = ggml_time_us();
  1662. k = std::max(k, (int) min_keep);
  1663. k = std::min(k, (int) candidates->size);
  1664. // Sort scores in descending order
  1665. if (!candidates->sorted) {
  1666. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  1667. return a.logit > b.logit;
  1668. };
  1669. if (k == (int) candidates->size) {
  1670. std::sort(candidates->data, candidates->data + candidates->size, comp);
  1671. } else {
  1672. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  1673. }
  1674. candidates->sorted = true;
  1675. }
  1676. candidates->size = k;
  1677. if (ctx) {
  1678. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1679. }
  1680. }
  1681. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  1682. if (p >= 1.0f) {
  1683. return;
  1684. }
  1685. const int64_t t_start_sample_us = ggml_time_us();
  1686. llama_sample_softmax(ctx, candidates);
  1687. // Compute the cumulative probabilities
  1688. float cum_sum = 0.0f;
  1689. size_t last_idx = candidates->size;
  1690. for (size_t i = 0; i < candidates->size; ++i) {
  1691. cum_sum += candidates->data[i].p;
  1692. // Check if the running sum is greater than p or if we have kept at least min_keep tokens
  1693. if (cum_sum > p && i >= min_keep) {
  1694. last_idx = i;
  1695. break;
  1696. }
  1697. }
  1698. // Resize the output vector to keep only the top-p tokens
  1699. candidates->size = last_idx;
  1700. if (ctx) {
  1701. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1702. }
  1703. }
  1704. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  1705. if (z >= 1.0f || candidates->size <= 2) {
  1706. return;
  1707. }
  1708. const int64_t t_start_sample_us = ggml_time_us();
  1709. llama_sample_softmax(nullptr, candidates);
  1710. // Compute the first and second derivatives
  1711. std::vector<float> first_derivatives(candidates->size - 1);
  1712. std::vector<float> second_derivatives(candidates->size - 2);
  1713. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  1714. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  1715. }
  1716. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  1717. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  1718. }
  1719. // Calculate absolute value of second derivatives
  1720. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  1721. second_derivatives[i] = abs(second_derivatives[i]);
  1722. }
  1723. // Normalize the second derivatives
  1724. float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  1725. for (float & value : second_derivatives) {
  1726. value /= second_derivatives_sum;
  1727. }
  1728. float cum_sum = 0.0f;
  1729. size_t last_idx = candidates->size;
  1730. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  1731. cum_sum += second_derivatives[i];
  1732. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  1733. if (cum_sum > z && i >= min_keep) {
  1734. last_idx = i;
  1735. break;
  1736. }
  1737. }
  1738. // Resize the output vector to keep only the tokens above the tail location
  1739. candidates->size = last_idx;
  1740. if (ctx) {
  1741. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1742. }
  1743. }
  1744. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  1745. // Reference implementation:
  1746. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  1747. if (p >= 1.0f) {
  1748. return;
  1749. }
  1750. const int64_t t_start_sample_us = ggml_time_us();
  1751. // Compute the softmax of logits and calculate entropy
  1752. llama_sample_softmax(nullptr, candidates);
  1753. float entropy = 0.0f;
  1754. for (size_t i = 0; i < candidates->size; ++i) {
  1755. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  1756. }
  1757. // Compute the absolute difference between negative log probability and entropy for each candidate
  1758. std::vector<float> shifted_scores;
  1759. for (size_t i = 0; i < candidates->size; ++i) {
  1760. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  1761. shifted_scores.push_back(shifted_score);
  1762. }
  1763. // Sort tokens based on the shifted_scores and their corresponding indices
  1764. std::vector<size_t> indices(candidates->size);
  1765. std::iota(indices.begin(), indices.end(), 0);
  1766. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  1767. return shifted_scores[a] < shifted_scores[b];
  1768. });
  1769. // Compute the cumulative probabilities
  1770. float cum_sum = 0.0f;
  1771. size_t last_idx = indices.size();
  1772. for (size_t i = 0; i < indices.size(); ++i) {
  1773. size_t idx = indices[i];
  1774. cum_sum += candidates->data[idx].p;
  1775. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  1776. if (cum_sum > p && i >= min_keep - 1) {
  1777. last_idx = i + 1;
  1778. break;
  1779. }
  1780. }
  1781. // Resize the output vector to keep only the locally typical tokens
  1782. std::vector<llama_token_data> new_candidates;
  1783. for (size_t i = 0; i < last_idx; ++i) {
  1784. size_t idx = indices[i];
  1785. new_candidates.push_back(candidates->data[idx]);
  1786. }
  1787. // Replace the data in candidates with the new_candidates data
  1788. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  1789. candidates->size = new_candidates.size();
  1790. if (ctx) {
  1791. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1792. }
  1793. }
  1794. void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  1795. const int64_t t_start_sample_us = ggml_time_us();
  1796. for (size_t i = 0; i < candidates_p->size; ++i) {
  1797. candidates_p->data[i].logit /= temp;
  1798. }
  1799. if (ctx) {
  1800. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1801. }
  1802. }
  1803. void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty) {
  1804. if (last_tokens_size == 0 || penalty == 1.0f) {
  1805. return;
  1806. }
  1807. const int64_t t_start_sample_us = ggml_time_us();
  1808. for (size_t i = 0; i < candidates->size; ++i) {
  1809. const auto * token_iter = std::find(last_tokens, last_tokens + last_tokens_size, candidates->data[i].id);
  1810. if (token_iter == last_tokens + last_tokens_size) {
  1811. continue;
  1812. }
  1813. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  1814. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  1815. if (candidates->data[i].logit <= 0) {
  1816. candidates->data[i].logit *= penalty;
  1817. } else {
  1818. candidates->data[i].logit /= penalty;
  1819. }
  1820. }
  1821. candidates->sorted = false;
  1822. if (ctx) {
  1823. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1824. }
  1825. }
  1826. void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) {
  1827. if (last_tokens_size == 0 || (alpha_frequency == 0.0f && alpha_presence == 0.0f)) {
  1828. return;
  1829. }
  1830. const int64_t t_start_sample_us = ggml_time_us();
  1831. // Create a frequency map to count occurrences of each token in last_tokens
  1832. std::unordered_map<llama_token, int> token_count;
  1833. for (size_t i = 0; i < last_tokens_size; ++i) {
  1834. token_count[last_tokens_p[i]]++;
  1835. }
  1836. // Apply frequency and presence penalties to the candidates
  1837. for (size_t i = 0; i < candidates->size; ++i) {
  1838. auto token_iter = token_count.find(candidates->data[i].id);
  1839. if (token_iter == token_count.end()) {
  1840. continue;
  1841. }
  1842. int count = token_iter->second;
  1843. candidates->data[i].logit -= float(count) * alpha_frequency + float(count > 0) * alpha_presence;
  1844. }
  1845. candidates->sorted = false;
  1846. if (ctx) {
  1847. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1848. }
  1849. }
  1850. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) {
  1851. assert(ctx);
  1852. auto N = float(llama_n_vocab(ctx));
  1853. int64_t t_start_sample_us;
  1854. t_start_sample_us = ggml_time_us();
  1855. llama_sample_softmax(nullptr, candidates);
  1856. // Estimate s_hat using the most probable m tokens
  1857. float s_hat = 0.0;
  1858. float sum_ti_bi = 0.0;
  1859. float sum_ti_sq = 0.0;
  1860. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  1861. float t_i = logf(float(i + 2) / float(i + 1));
  1862. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  1863. sum_ti_bi += t_i * b_i;
  1864. sum_ti_sq += t_i * t_i;
  1865. }
  1866. s_hat = sum_ti_bi / sum_ti_sq;
  1867. // Compute k from the estimated s_hat and target surprise value
  1868. float epsilon_hat = s_hat - 1;
  1869. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  1870. // Sample the next word X using top-k sampling
  1871. llama_sample_top_k(nullptr, candidates, int(k), 1);
  1872. if (ctx) {
  1873. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1874. }
  1875. llama_token X = llama_sample_token(ctx, candidates);
  1876. t_start_sample_us = ggml_time_us();
  1877. // Compute error as the difference between observed surprise and target surprise value
  1878. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  1879. return candidate.id == X;
  1880. }));
  1881. float observed_surprise = -log2f(candidates->data[X_idx].p);
  1882. float e = observed_surprise - tau;
  1883. // Update mu using the learning rate and error
  1884. *mu = *mu - eta * e;
  1885. if (ctx) {
  1886. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1887. ctx->n_sample++;
  1888. }
  1889. return X;
  1890. }
  1891. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  1892. assert(ctx);
  1893. int64_t t_start_sample_us;
  1894. t_start_sample_us = ggml_time_us();
  1895. llama_sample_softmax(ctx, candidates);
  1896. // Truncate the words with surprise values greater than mu
  1897. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  1898. return -log2f(candidate.p) > *mu;
  1899. }));
  1900. if (candidates->size == 0) {
  1901. candidates->size = 1;
  1902. }
  1903. // Normalize the probabilities of the remaining words
  1904. llama_sample_softmax(ctx, candidates);
  1905. // Sample the next word X from the remaining words
  1906. if (ctx) {
  1907. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1908. }
  1909. llama_token X = llama_sample_token(ctx, candidates);
  1910. t_start_sample_us = ggml_time_us();
  1911. // Compute error as the difference between observed surprise and target surprise value
  1912. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  1913. return candidate.id == X;
  1914. }));
  1915. float observed_surprise = -log2f(candidates->data[X_idx].p);
  1916. float e = observed_surprise - tau;
  1917. // Update mu using the learning rate and error
  1918. *mu = *mu - eta * e;
  1919. if (ctx) {
  1920. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1921. }
  1922. return X;
  1923. }
  1924. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  1925. const int64_t t_start_sample_us = ggml_time_us();
  1926. // Find max element
  1927. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  1928. return a.logit < b.logit;
  1929. });
  1930. llama_token result = max_iter->id;
  1931. if (ctx) {
  1932. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1933. ctx->n_sample++;
  1934. }
  1935. return result;
  1936. }
  1937. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  1938. assert(ctx);
  1939. const int64_t t_start_sample_us = ggml_time_us();
  1940. llama_sample_softmax(nullptr, candidates);
  1941. std::vector<float> probs;
  1942. probs.reserve(candidates->size);
  1943. for (size_t i = 0; i < candidates->size; ++i) {
  1944. probs.push_back(candidates->data[i].p);
  1945. }
  1946. std::discrete_distribution<> dist(probs.begin(), probs.end());
  1947. auto & rng = ctx->rng;
  1948. int idx = dist(rng);
  1949. llama_token result = candidates->data[idx].id;
  1950. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1951. ctx->n_sample++;
  1952. return result;
  1953. }
  1954. //
  1955. // quantization
  1956. //
  1957. static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llama_buffer & output, const int nelements, const int nthread) {
  1958. if (output.size < nelements * sizeof(float)) {
  1959. output.resize(nelements * sizeof(float));
  1960. }
  1961. float * f32_output = (float *) output.addr;
  1962. quantize_fns_t qtype;
  1963. if (ggml_is_quantized(tensor.type)) {
  1964. qtype = ggml_internal_get_quantize_fn(tensor.type);
  1965. if (qtype.dequantize_row_q == NULL) {
  1966. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type)));
  1967. }
  1968. } else if (tensor.type != GGML_TYPE_F16) {
  1969. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor.type)));
  1970. }
  1971. if (nthread < 2) {
  1972. if (tensor.type == GGML_TYPE_F16) {
  1973. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements);
  1974. } else if (ggml_is_quantized(tensor.type)) {
  1975. qtype.dequantize_row_q(tensor.data, f32_output, nelements);
  1976. } else {
  1977. LLAMA_ASSERT(false); // unreachable
  1978. }
  1979. return;
  1980. }
  1981. auto block_size = tensor.type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor.type);
  1982. auto block_size_bytes = ggml_type_size(tensor.type);
  1983. LLAMA_ASSERT(nelements % block_size == 0);
  1984. auto nblocks = nelements / block_size;
  1985. auto blocks_per_thread = nblocks / nthread;
  1986. auto spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  1987. std::vector<std::thread> workers;
  1988. for (auto tnum = 0, in_buff_offs = 0, out_buff_offs = 0; tnum < nthread; tnum++) {
  1989. auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  1990. auto thr_elems = thr_blocks * block_size; // number of elements for this thread
  1991. auto thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  1992. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  1993. if (typ == GGML_TYPE_F16) {
  1994. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  1995. } else {
  1996. qtype.dequantize_row_q(inbuf, outbuf, nels);
  1997. }
  1998. };
  1999. workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems));
  2000. in_buff_offs += thr_block_bytes;
  2001. out_buff_offs += thr_elems;
  2002. }
  2003. for (auto & worker : workers) {
  2004. worker.join();
  2005. }
  2006. }
  2007. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  2008. ggml_type quantized_type;
  2009. llama_ftype ftype = params->ftype;
  2010. int nthread = params->nthread;
  2011. switch (params->ftype) {
  2012. case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
  2013. case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
  2014. case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
  2015. case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
  2016. case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
  2017. case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
  2018. case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
  2019. #ifdef GGML_USE_K_QUANTS
  2020. // K-quants
  2021. case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
  2022. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  2023. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  2024. case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
  2025. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  2026. case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
  2027. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  2028. case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
  2029. case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
  2030. #endif
  2031. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  2032. }
  2033. if (nthread <= 0) {
  2034. nthread = std::thread::hardware_concurrency();
  2035. }
  2036. std::unique_ptr<llama_model_loader> model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false,
  2037. /*vocab_only*/ false));
  2038. llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), params->ftype);
  2039. #ifdef GGML_USE_K_QUANTS
  2040. int n_attention_wv = 0;
  2041. int n_feed_forward_w2 = 0;
  2042. for (auto& tensor : model_loader->tensors_map.tensors) {
  2043. if (tensor.name.find("attention.wv.weight") != std::string::npos) {
  2044. ++n_attention_wv;
  2045. }
  2046. else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
  2047. ++n_feed_forward_w2;
  2048. }
  2049. }
  2050. int i_attention_wv = 0;
  2051. int i_feed_forward_w2 = 0;
  2052. #endif
  2053. size_t total_size_org = 0;
  2054. size_t total_size_new = 0;
  2055. std::vector<int64_t> hist_all(1 << 4, 0);
  2056. std::vector<std::thread> workers;
  2057. std::mutex mutex;
  2058. size_t idx = 0;
  2059. for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) {
  2060. llama_buffer read_data;
  2061. read_data.resize(tensor.size);
  2062. tensor.data = read_data.addr;
  2063. model_loader->load_data_for(tensor);
  2064. printf("[%4zu/%4zu] %36s - %16s, type = %6s, ",
  2065. ++idx, model_loader->tensors_map.tensors.size(),
  2066. tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
  2067. ggml_type_name(tensor.type));
  2068. // This used to be a regex, but <regex> has an extreme cost to compile times.
  2069. bool quantize = tensor.name.rfind("weight") == tensor.name.size() - 6; // ends with 'weight'?
  2070. // quantize only 2D tensors
  2071. quantize &= (tensor.ne.size() == 2);
  2072. quantize &= params->quantize_output_tensor || tensor.name != "output.weight";
  2073. quantize &= quantized_type != tensor.type;
  2074. enum ggml_type new_type;
  2075. void * new_data;
  2076. size_t new_size;
  2077. llama_buffer work;
  2078. if (!quantize) {
  2079. new_type = tensor.type;
  2080. new_data = tensor.data;
  2081. new_size = tensor.size;
  2082. printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
  2083. } else {
  2084. new_type = quantized_type;
  2085. #ifdef GGML_USE_K_QUANTS
  2086. if (tensor.name == "output.weight") {
  2087. new_type = GGML_TYPE_Q6_K;
  2088. } else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
  2089. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2090. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2091. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  2092. (i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8 ||
  2093. (i_attention_wv - n_attention_wv/8)%3 == 2)) new_type = GGML_TYPE_Q6_K;
  2094. ++i_attention_wv;
  2095. } else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
  2096. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2097. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2098. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  2099. (i_feed_forward_w2 < n_feed_forward_w2/8 || i_feed_forward_w2 >= 7*n_feed_forward_w2/8 ||
  2100. (i_feed_forward_w2 - n_feed_forward_w2/8)%3 == 2)) new_type = GGML_TYPE_Q6_K;
  2101. ++i_feed_forward_w2;
  2102. } else if (tensor.name.find("attention.wo.weight") != std::string::npos) {
  2103. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2104. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2105. }
  2106. #endif
  2107. float * f32_data;
  2108. size_t nelements = tensor.ne.at(0) * tensor.ne.at(1);
  2109. llama_buffer f32_conv_buf;
  2110. if (tensor.type == GGML_TYPE_F32) {
  2111. f32_data = (float *) tensor.data;
  2112. } else if (ggml_is_quantized(tensor.type) && !params->allow_requantize) {
  2113. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor.type)));
  2114. } else {
  2115. llama_convert_tensor_internal(tensor, f32_conv_buf, nelements, nthread);
  2116. f32_data = (float *) f32_conv_buf.addr;
  2117. }
  2118. printf("quantizing .. ");
  2119. fflush(stdout);
  2120. work.resize(nelements * 4); // upper bound on size
  2121. new_data = work.addr;
  2122. std::vector<int64_t> hist_cur(1 << 4, 0);
  2123. int chunk_size = 32 * 512;
  2124. const int nchunk = (nelements + chunk_size - 1)/chunk_size;
  2125. const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
  2126. if (nthread_use < 2) {
  2127. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data());
  2128. } else {
  2129. size_t counter = 0;
  2130. new_size = 0;
  2131. auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () {
  2132. std::vector<int64_t> local_hist;
  2133. size_t local_size = 0;
  2134. while (true) {
  2135. std::unique_lock<std::mutex> lock(mutex);
  2136. size_t first = counter; counter += chunk_size;
  2137. if (first >= nelements) {
  2138. if (!local_hist.empty()) {
  2139. for (int j=0; j<int(local_hist.size()); ++j) {
  2140. hist_cur[j] += local_hist[j];
  2141. }
  2142. new_size += local_size;
  2143. }
  2144. break;
  2145. }
  2146. lock.unlock();
  2147. size_t last = std::min(nelements, first + chunk_size);
  2148. if (local_hist.empty()) {
  2149. local_hist.resize(hist_cur.size(), 0);
  2150. }
  2151. local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
  2152. }
  2153. };
  2154. if ((int) workers.size() < nthread_use - 1) {
  2155. workers.resize(nthread_use - 1);
  2156. }
  2157. for (int it = 0; it < nthread_use - 1; ++it) {
  2158. workers[it] = std::thread(compute);
  2159. }
  2160. compute();
  2161. for (int it = 0; it < nthread_use - 1; ++it) {
  2162. workers[it].join();
  2163. }
  2164. }
  2165. printf("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
  2166. int64_t tot_count = 0;
  2167. for (size_t i = 0; i < hist_cur.size(); i++) {
  2168. hist_all[i] += hist_cur[i];
  2169. tot_count += hist_cur[i];
  2170. }
  2171. if (tot_count > 0) {
  2172. for (size_t i = 0; i < hist_cur.size(); i++) {
  2173. printf("%5.3f ", hist_cur[i] / float(nelements));
  2174. }
  2175. }
  2176. printf("\n");
  2177. }
  2178. total_size_org += tensor.size;
  2179. total_size_new += new_size;
  2180. file_saver.write_tensor(tensor, new_type, new_data, new_size);
  2181. }
  2182. printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  2183. printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  2184. {
  2185. int64_t sum_all = 0;
  2186. for (size_t i = 0; i < hist_all.size(); i++) {
  2187. sum_all += hist_all[i];
  2188. }
  2189. if (sum_all > 0) {
  2190. printf("%s: hist: ", __func__);
  2191. for (size_t i = 0; i < hist_all.size(); i++) {
  2192. printf("%5.3f ", hist_all[i] / float(sum_all));
  2193. }
  2194. printf("\n");
  2195. }
  2196. }
  2197. }
  2198. //
  2199. // interface implementation
  2200. //
  2201. struct llama_context * llama_init_from_file(
  2202. const char * path_model,
  2203. struct llama_context_params params) {
  2204. ggml_time_init();
  2205. llama_context * ctx = new llama_context;
  2206. if (params.seed < 0) {
  2207. params.seed = time(NULL);
  2208. }
  2209. unsigned cur_percentage = 0;
  2210. if (params.progress_callback == NULL) {
  2211. params.progress_callback_user_data = &cur_percentage;
  2212. params.progress_callback = [](float progress, void * ctx) {
  2213. unsigned * cur_percentage_p = (unsigned *) ctx;
  2214. unsigned percentage = (unsigned) (100 * progress);
  2215. while (percentage > *cur_percentage_p) {
  2216. *cur_percentage_p = percentage;
  2217. fprintf(stderr, ".");
  2218. fflush(stderr);
  2219. if (percentage >= 100) {
  2220. fprintf(stderr, "\n");
  2221. }
  2222. }
  2223. };
  2224. }
  2225. ctx->rng = std::mt19937(params.seed);
  2226. ctx->logits_all = params.logits_all;
  2227. ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
  2228. if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_batch, params.n_gpu_layers, params.main_gpu,
  2229. params.tensor_split, params.low_vram, memory_type, params.use_mmap, params.use_mlock,
  2230. params.vocab_only, params.progress_callback, params.progress_callback_user_data)) {
  2231. fprintf(stderr, "%s: failed to load model\n", __func__);
  2232. llama_free(ctx);
  2233. return nullptr;
  2234. }
  2235. // reserve memory for context buffers
  2236. if (!params.vocab_only) {
  2237. if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
  2238. fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
  2239. llama_free(ctx);
  2240. return nullptr;
  2241. }
  2242. {
  2243. const size_t memory_size = ggml_nbytes(ctx->model.kv_self.k) + ggml_nbytes(ctx->model.kv_self.v);
  2244. fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
  2245. }
  2246. const auto & hparams = ctx->model.hparams;
  2247. // resized during inference
  2248. if (params.logits_all) {
  2249. ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
  2250. } else {
  2251. ctx->logits.reserve(hparams.n_vocab);
  2252. }
  2253. if (params.embedding){
  2254. ctx->embedding.resize(hparams.n_embd);
  2255. }
  2256. ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type));
  2257. ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0().at(ctx->model.type));
  2258. ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type));
  2259. }
  2260. #ifdef GGML_USE_METAL
  2261. if (params.n_gpu_layers > 0) {
  2262. // this allocates all Metal resources and memory buffers
  2263. ctx->ctx_metal = ggml_metal_init();
  2264. void *data_ptr = NULL;
  2265. size_t data_size = 0;
  2266. if (params.use_mmap) {
  2267. data_ptr = ctx->model.mapping->addr;
  2268. data_size= ctx->model.mapping->size;
  2269. } else {
  2270. data_ptr = ggml_get_mem_buffer(ctx->model.ctx);
  2271. data_size= ggml_get_mem_size(ctx->model.ctx);
  2272. }
  2273. #define LLAMA_METAL_CHECK_BUF(result) \
  2274. if (!(result)) { \
  2275. fprintf(stderr, "%s: failed to add buffer\n", __func__); \
  2276. llama_free(ctx); \
  2277. return NULL; \
  2278. }
  2279. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size));
  2280. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size));
  2281. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->model.kv_self.buf.addr, ctx->model.kv_self.buf.size));
  2282. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].addr, ctx->buf_scratch[0].size));
  2283. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].addr, ctx->buf_scratch[1].size));
  2284. #undef LLAMA_METAL_CHECK_BUF
  2285. }
  2286. #endif
  2287. return ctx;
  2288. }
  2289. void llama_free(struct llama_context * ctx) {
  2290. delete ctx;
  2291. }
  2292. int llama_model_quantize(
  2293. const char * fname_inp,
  2294. const char * fname_out,
  2295. const llama_model_quantize_params *params) {
  2296. try {
  2297. llama_model_quantize_internal(fname_inp, fname_out, params);
  2298. return 0;
  2299. } catch (const std::exception & err) {
  2300. fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.what());
  2301. return 1;
  2302. }
  2303. }
  2304. int llama_apply_lora_from_file_internal(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) {
  2305. fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  2306. auto & model = ctx->model;
  2307. const int64_t t_start_lora_us = ggml_time_us();
  2308. auto fin = std::ifstream(path_lora, std::ios::binary);
  2309. if (!fin) {
  2310. fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora);
  2311. return 1;
  2312. }
  2313. // verify magic and version
  2314. {
  2315. uint32_t magic;
  2316. fin.read((char *) &magic, sizeof(magic));
  2317. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  2318. fprintf(stderr, "%s: bad file magic\n", __func__);
  2319. return 1;
  2320. }
  2321. uint32_t format_version;
  2322. fin.read((char *) &format_version, sizeof(format_version));
  2323. if (format_version != 1) {
  2324. fprintf(stderr, "%s: unsupported file version\n", __func__ );
  2325. return 1;
  2326. }
  2327. }
  2328. int32_t lora_r;
  2329. int32_t lora_alpha;
  2330. fin.read((char *) &lora_r, sizeof(lora_r));
  2331. fin.read((char *) &lora_alpha, sizeof(lora_alpha));
  2332. float scaling = (float)lora_alpha / (float)lora_r;
  2333. fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  2334. // create a temporary ggml context to store the lora tensors
  2335. // todo: calculate size from biggest possible tensor
  2336. std::vector<uint8_t> lora_buf(1024ull * 1024ull * 1024ull);
  2337. struct ggml_init_params params;
  2338. params.mem_size = lora_buf.size();
  2339. params.mem_buffer = lora_buf.data();
  2340. params.no_alloc = false;
  2341. ggml_context * lora_ctx = ggml_init(params);
  2342. std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
  2343. // create a name -> tensor map of the model to accelerate lookups
  2344. std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
  2345. for (auto & kv: model.tensors_by_name) {
  2346. model_tensors.insert(kv);
  2347. }
  2348. // load base model
  2349. std::unique_ptr<llama_model_loader> model_loader;
  2350. ggml_context * base_ctx = NULL;
  2351. llama_buffer base_buf;
  2352. if (path_base_model) {
  2353. fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model);
  2354. model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*vocab_only*/ false));
  2355. size_t ctx_size;
  2356. size_t mmapped_size;
  2357. model_loader->calc_sizes(&ctx_size, &mmapped_size);
  2358. base_buf.resize(ctx_size);
  2359. ggml_init_params base_params;
  2360. base_params.mem_size = base_buf.size;
  2361. base_params.mem_buffer = base_buf.addr;
  2362. base_params.no_alloc = model_loader->use_mmap;
  2363. base_ctx = ggml_init(base_params);
  2364. model_loader->ggml_ctx = base_ctx;
  2365. // maybe this should in llama_model_loader
  2366. if (model_loader->use_mmap) {
  2367. model_loader->mapping.reset(new llama_mmap(&model_loader->file_loaders.at(0)->file, /* prefetch */ 0));
  2368. }
  2369. }
  2370. // read tensors and apply
  2371. bool warned = false;
  2372. int n_tensors = 0;
  2373. while (true) {
  2374. int32_t n_dims;
  2375. int32_t length;
  2376. int32_t ftype;
  2377. fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
  2378. fin.read(reinterpret_cast<char *>(&length), sizeof(length));
  2379. fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
  2380. if (fin.eof()) {
  2381. break;
  2382. }
  2383. int32_t ne[2] = { 1, 1 };
  2384. for (int i = 0; i < n_dims; ++i) {
  2385. fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
  2386. }
  2387. std::string name;
  2388. {
  2389. char buf[1024];
  2390. fin.read(buf, length);
  2391. name = std::string(buf, length);
  2392. }
  2393. // check for lora suffix and get the type of tensor
  2394. const std::string lora_suffix = ".lora";
  2395. size_t pos = name.rfind(lora_suffix);
  2396. if (pos == std::string::npos) {
  2397. fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  2398. return 1;
  2399. }
  2400. std::string lora_type = name.substr(pos + lora_suffix.length());
  2401. std::string base_name = name;
  2402. base_name.erase(pos);
  2403. // fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
  2404. if (model_tensors.find(base_name) == model_tensors.end()) {
  2405. fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
  2406. return 1;
  2407. }
  2408. // create ggml tensor
  2409. ggml_type wtype;
  2410. switch (ftype) {
  2411. case 0: wtype = GGML_TYPE_F32; break;
  2412. case 1: wtype = GGML_TYPE_F16; break;
  2413. default:
  2414. {
  2415. fprintf(stderr, "%s: invalid tensor data type '%d'\n",
  2416. __func__, ftype);
  2417. return false;
  2418. }
  2419. }
  2420. ggml_tensor* lora_tensor;
  2421. if (n_dims == 2) {
  2422. lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
  2423. }
  2424. else {
  2425. fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims);
  2426. return 1;
  2427. }
  2428. // load tensor data
  2429. size_t offset = fin.tellg();
  2430. size_t tensor_data_size = ggml_nbytes(lora_tensor);
  2431. offset = (offset + 31) & -32;
  2432. fin.seekg(offset);
  2433. fin.read((char*)lora_tensor->data, tensor_data_size);
  2434. lora_tensors[name] = lora_tensor;
  2435. // check if we have both A and B tensors and apply
  2436. if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
  2437. lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
  2438. ggml_tensor * dest_t = model_tensors[base_name];
  2439. ggml_tensor * base_t;
  2440. if (model_loader) {
  2441. // load from base model
  2442. if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
  2443. fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  2444. return 1;
  2445. }
  2446. size_t idx = model_loader->tensors_map.name_to_idx[base_name];
  2447. llama_load_tensor & lt = model_loader->tensors_map.tensors[idx];
  2448. base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
  2449. lt.data = (uint8_t *) lt.ggml_tensor->data;
  2450. model_loader->load_data_for(lt);
  2451. lt.ggml_tensor->data = lt.data;
  2452. }
  2453. else {
  2454. base_t = dest_t;
  2455. }
  2456. if (ggml_is_quantized(base_t->type)) {
  2457. if (!warned) {
  2458. fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  2459. "use a f16 or f32 base model with --lora-base\n", __func__);
  2460. warned = true;
  2461. }
  2462. }
  2463. ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
  2464. ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
  2465. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  2466. fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  2467. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  2468. return 1;
  2469. }
  2470. // w = w + BA*s
  2471. ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
  2472. if (scaling != 1.0f) {
  2473. ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling);
  2474. BA = ggml_scale_inplace(lora_ctx, BA, scale_tensor);
  2475. }
  2476. ggml_tensor * r;
  2477. if (base_t == dest_t) {
  2478. r = ggml_add_inplace(lora_ctx, dest_t, BA);
  2479. }
  2480. else {
  2481. r = ggml_add(lora_ctx, base_t, BA);
  2482. r = ggml_cpy(lora_ctx, r, dest_t);
  2483. }
  2484. struct ggml_cgraph gf = ggml_build_forward(r);
  2485. gf.n_threads = n_threads;
  2486. ggml_graph_compute(lora_ctx, &gf);
  2487. // we won't need these tensors again, reset the context to save memory
  2488. ggml_free(lora_ctx);
  2489. lora_ctx = ggml_init(params);
  2490. lora_tensors.clear();
  2491. n_tensors++;
  2492. if (n_tensors % 4 == 0) {
  2493. fprintf(stderr, ".");
  2494. }
  2495. }
  2496. }
  2497. // TODO: this should be in a destructor, it will leak on failure
  2498. ggml_free(lora_ctx);
  2499. if (base_ctx) {
  2500. ggml_free(base_ctx);
  2501. }
  2502. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  2503. fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0);
  2504. return 0;
  2505. }
  2506. int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) {
  2507. try {
  2508. return llama_apply_lora_from_file_internal(ctx, path_lora, path_base_model, n_threads);
  2509. } catch (const std::exception & err) {
  2510. fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what());
  2511. return 1;
  2512. }
  2513. }
  2514. int llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  2515. return ctx->model.kv_self.n;
  2516. }
  2517. #define LLAMA_MAX_RNG_STATE (64*1024)
  2518. void llama_set_rng_seed(struct llama_context * ctx, int seed) {
  2519. if (seed < 0) {
  2520. seed = time(NULL);
  2521. }
  2522. ctx->rng.seed(seed);
  2523. }
  2524. // Returns the *maximum* size of the state
  2525. size_t llama_get_state_size(const struct llama_context * ctx) {
  2526. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  2527. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  2528. const size_t s_rng_size = sizeof(size_t);
  2529. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  2530. const size_t s_logits_capacity = sizeof(size_t);
  2531. const size_t s_logits_size = sizeof(size_t);
  2532. const size_t s_logits = ctx->logits.capacity() * sizeof(float);
  2533. const size_t s_embedding_size = sizeof(size_t);
  2534. const size_t s_embedding = ctx->embedding.size() * sizeof(float);
  2535. const size_t s_kv_size = sizeof(size_t);
  2536. const size_t s_kv_ntok = sizeof(int);
  2537. const size_t s_kv = ctx->model.kv_self.buf.size;
  2538. const size_t s_total = (
  2539. + s_rng_size
  2540. + s_rng
  2541. + s_logits_capacity
  2542. + s_logits_size
  2543. + s_logits
  2544. + s_embedding_size
  2545. + s_embedding
  2546. + s_kv_size
  2547. + s_kv_ntok
  2548. + s_kv
  2549. );
  2550. return s_total;
  2551. }
  2552. // Copies the state to the specified destination address
  2553. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  2554. uint8_t * out = dst;
  2555. // copy rng
  2556. {
  2557. std::stringstream rng_ss;
  2558. rng_ss << ctx->rng;
  2559. const size_t rng_size = rng_ss.str().size();
  2560. char rng_buf[LLAMA_MAX_RNG_STATE];
  2561. memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
  2562. memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
  2563. memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size);
  2564. memcpy(out, &rng_buf[0], LLAMA_MAX_RNG_STATE); out += LLAMA_MAX_RNG_STATE;
  2565. }
  2566. // copy logits
  2567. {
  2568. const size_t logits_cap = ctx->logits.capacity();
  2569. const size_t logits_size = ctx->logits.size();
  2570. memcpy(out, &logits_cap, sizeof(logits_cap)); out += sizeof(logits_cap);
  2571. memcpy(out, &logits_size, sizeof(logits_size)); out += sizeof(logits_size);
  2572. if (logits_size) {
  2573. memcpy(out, ctx->logits.data(), logits_size * sizeof(float));
  2574. }
  2575. out += logits_cap * sizeof(float);
  2576. }
  2577. // copy embeddings
  2578. {
  2579. const size_t embedding_size = ctx->embedding.size();
  2580. memcpy(out, &embedding_size, sizeof(embedding_size)); out += sizeof(embedding_size);
  2581. if (embedding_size) {
  2582. memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float));
  2583. out += embedding_size * sizeof(float);
  2584. }
  2585. }
  2586. // copy kv cache
  2587. {
  2588. const auto & kv_self = ctx->model.kv_self;
  2589. const auto & hparams = ctx->model.hparams;
  2590. const int n_layer = hparams.n_layer;
  2591. const int n_embd = hparams.n_embd;
  2592. const int n_ctx = hparams.n_ctx;
  2593. const size_t kv_size = kv_self.buf.size;
  2594. const int kv_ntok = llama_get_kv_cache_token_count(ctx);
  2595. memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size);
  2596. memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok);
  2597. if (kv_size) {
  2598. const size_t elt_size = ggml_element_size(kv_self.k);
  2599. char buffer[4096];
  2600. ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, /* no_alloc */ true });
  2601. ggml_cgraph gf{};
  2602. gf.n_threads = 1;
  2603. ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
  2604. kout3d->data = out;
  2605. out += ggml_nbytes(kout3d);
  2606. ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
  2607. vout3d->data = out;
  2608. out += ggml_nbytes(vout3d);
  2609. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  2610. n_embd, kv_ntok, n_layer,
  2611. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  2612. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  2613. kv_ntok, n_embd, n_layer,
  2614. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  2615. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
  2616. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d));
  2617. ggml_graph_compute(cpy_ctx, &gf);
  2618. ggml_free(cpy_ctx);
  2619. }
  2620. }
  2621. const size_t written = out - dst;
  2622. const size_t max_size = llama_get_state_size(ctx);
  2623. LLAMA_ASSERT(written <= max_size);
  2624. return written;
  2625. }
  2626. // Sets the state reading from the specified source address
  2627. size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
  2628. uint8_t * inp = src;
  2629. // set rng
  2630. {
  2631. size_t rng_size;
  2632. char rng_buf[LLAMA_MAX_RNG_STATE];
  2633. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  2634. memcpy(&rng_buf[0], inp, LLAMA_MAX_RNG_STATE); inp += LLAMA_MAX_RNG_STATE;
  2635. std::stringstream rng_ss;
  2636. rng_ss.str(std::string(&rng_buf[0], rng_size));
  2637. rng_ss >> ctx->rng;
  2638. LLAMA_ASSERT(rng_ss.fail() == false);
  2639. }
  2640. // set logits
  2641. {
  2642. size_t logits_cap;
  2643. size_t logits_size;
  2644. memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap);
  2645. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  2646. LLAMA_ASSERT(ctx->logits.capacity() == logits_cap);
  2647. if (logits_size) {
  2648. ctx->logits.resize(logits_size);
  2649. memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
  2650. }
  2651. inp += logits_cap * sizeof(float);
  2652. }
  2653. // set embeddings
  2654. {
  2655. size_t embedding_size;
  2656. memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
  2657. LLAMA_ASSERT(ctx->embedding.capacity() == embedding_size);
  2658. if (embedding_size) {
  2659. memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
  2660. inp += embedding_size * sizeof(float);
  2661. }
  2662. }
  2663. // set kv cache
  2664. {
  2665. const auto & kv_self = ctx->model.kv_self;
  2666. const auto & hparams = ctx->model.hparams;
  2667. const int n_layer = hparams.n_layer;
  2668. const int n_embd = hparams.n_embd;
  2669. const int n_ctx = hparams.n_ctx;
  2670. size_t kv_size;
  2671. int kv_ntok;
  2672. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  2673. memcpy(&kv_ntok, inp, sizeof(kv_ntok)); inp += sizeof(kv_ntok);
  2674. if (kv_size) {
  2675. LLAMA_ASSERT(kv_self.buf.size == kv_size);
  2676. const size_t elt_size = ggml_element_size(kv_self.k);
  2677. char buffer[4096];
  2678. ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, /* no_alloc */ true });
  2679. ggml_cgraph gf{};
  2680. gf.n_threads = 1;
  2681. ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
  2682. kin3d->data = (void *) inp;
  2683. inp += ggml_nbytes(kin3d);
  2684. ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
  2685. vin3d->data = (void *) inp;
  2686. inp += ggml_nbytes(vin3d);
  2687. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  2688. n_embd, kv_ntok, n_layer,
  2689. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  2690. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  2691. kv_ntok, n_embd, n_layer,
  2692. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  2693. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
  2694. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d));
  2695. ggml_graph_compute(cpy_ctx, &gf);
  2696. ggml_free(cpy_ctx);
  2697. }
  2698. ctx->model.kv_self.n = kv_ntok;
  2699. }
  2700. const size_t nread = inp - src;
  2701. const size_t max_size = llama_get_state_size(ctx);
  2702. LLAMA_ASSERT(nread <= max_size);
  2703. return nread;
  2704. }
  2705. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  2706. llama_file file(path_session, "rb");
  2707. // sanity checks
  2708. {
  2709. const uint32_t magic = file.read_u32();
  2710. const uint32_t version = file.read_u32();
  2711. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  2712. fprintf(stderr, "%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  2713. return false;
  2714. }
  2715. llama_hparams session_hparams;
  2716. file.read_raw(&session_hparams, sizeof(llama_hparams));
  2717. if (session_hparams != ctx->model.hparams) {
  2718. fprintf(stderr, "%s : model hparams didn't match from session file!\n", __func__);
  2719. return false;
  2720. }
  2721. }
  2722. // load the prompt
  2723. {
  2724. const uint32_t n_token_count = file.read_u32();
  2725. if (n_token_count > n_token_capacity) {
  2726. fprintf(stderr, "%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  2727. return false;
  2728. }
  2729. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  2730. *n_token_count_out = n_token_count;
  2731. }
  2732. // restore the context state
  2733. {
  2734. const size_t n_state_size_cur = file.size - file.tell();
  2735. const size_t n_state_size_max = llama_get_state_size(ctx);
  2736. if (n_state_size_cur > n_state_size_max) {
  2737. fprintf(stderr, "%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  2738. return false;
  2739. }
  2740. std::vector<uint8_t> state_data(n_state_size_max);
  2741. file.read_raw(state_data.data(), n_state_size_cur);
  2742. llama_set_state_data(ctx, state_data.data());
  2743. }
  2744. return true;
  2745. }
  2746. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  2747. llama_file file(path_session, "wb");
  2748. file.write_u32(LLAMA_SESSION_MAGIC);
  2749. file.write_u32(LLAMA_SESSION_VERSION);
  2750. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  2751. // save the prompt
  2752. file.write_u32((uint32_t) n_token_count);
  2753. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  2754. // save the context state
  2755. {
  2756. const size_t n_state_size_max = llama_get_state_size(ctx);
  2757. std::vector<uint8_t> state_data(n_state_size_max);
  2758. const size_t n_state_size_cur = llama_copy_state_data(ctx, state_data.data());
  2759. file.write_raw(state_data.data(), n_state_size_cur);
  2760. }
  2761. return true;
  2762. }
  2763. int llama_eval(
  2764. struct llama_context * ctx,
  2765. const llama_token * tokens,
  2766. int n_tokens,
  2767. int n_past,
  2768. int n_threads) {
  2769. if (!llama_eval_internal(*ctx, tokens, n_tokens, n_past, n_threads, nullptr)) {
  2770. fprintf(stderr, "%s: failed to eval\n", __func__);
  2771. return 1;
  2772. }
  2773. // get a more accurate load time, upon first eval
  2774. // TODO: fix this
  2775. if (!ctx->has_evaluated_once) {
  2776. ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
  2777. ctx->has_evaluated_once = true;
  2778. }
  2779. return 0;
  2780. }
  2781. int llama_eval_export(struct llama_context * ctx, const char * fname) {
  2782. const int n_batch = 1;
  2783. const int n_ctx = 512 - n_batch;
  2784. const std::vector<llama_token> tmp(n_batch, llama_token_bos());
  2785. if (!llama_eval_internal(*ctx, tmp.data(), tmp.size(), n_ctx, 1, fname)) {
  2786. fprintf(stderr, "%s: failed to eval\n", __func__);
  2787. return 1;
  2788. }
  2789. return 0;
  2790. }
  2791. int llama_tokenize(
  2792. struct llama_context * ctx,
  2793. const char * text,
  2794. llama_token * tokens,
  2795. int n_max_tokens,
  2796. bool add_bos) {
  2797. auto res = llama_tokenize(ctx->vocab, text, add_bos);
  2798. if (n_max_tokens < (int) res.size()) {
  2799. fprintf(stderr, "%s: too many tokens\n", __func__);
  2800. return -((int) res.size());
  2801. }
  2802. for (size_t i = 0; i < res.size(); i++) {
  2803. tokens[i] = res[i];
  2804. }
  2805. return res.size();
  2806. }
  2807. int llama_n_vocab(const struct llama_context * ctx) {
  2808. return ctx->vocab.id_to_token.size();
  2809. }
  2810. int llama_n_ctx(const struct llama_context * ctx) {
  2811. return ctx->model.hparams.n_ctx;
  2812. }
  2813. int llama_n_embd(const struct llama_context * ctx) {
  2814. return ctx->model.hparams.n_embd;
  2815. }
  2816. int llama_get_vocab(
  2817. const struct llama_context * ctx,
  2818. const char * * strings,
  2819. float * scores,
  2820. int capacity) {
  2821. int n = std::min(capacity, (int) ctx->vocab.id_to_token.size());
  2822. for (int i = 0; i<n; ++i) {
  2823. strings[i] = ctx->vocab.id_to_token[i].tok.c_str();
  2824. scores[i] = ctx->vocab.id_to_token[i].score;
  2825. }
  2826. return n;
  2827. }
  2828. float * llama_get_logits(struct llama_context * ctx) {
  2829. return ctx->logits.data();
  2830. }
  2831. float * llama_get_embeddings(struct llama_context * ctx) {
  2832. return ctx->embedding.data();
  2833. }
  2834. const char * llama_token_to_str(const struct llama_context * ctx, llama_token token) {
  2835. if (token >= llama_n_vocab(ctx)) {
  2836. return nullptr;
  2837. }
  2838. return ctx->vocab.id_to_token[token].tok.c_str();
  2839. }
  2840. llama_token llama_token_bos() {
  2841. return 1;
  2842. }
  2843. llama_token llama_token_eos() {
  2844. return 2;
  2845. }
  2846. llama_token llama_token_nl() {
  2847. return 13;
  2848. }
  2849. void llama_print_timings(struct llama_context * ctx) {
  2850. const int64_t t_end_us = ggml_time_us();
  2851. const int32_t n_sample = std::max(1, ctx->n_sample);
  2852. const int32_t n_eval = std::max(1, ctx->n_eval);
  2853. const int32_t n_p_eval = std::max(1, ctx->n_p_eval);
  2854. fprintf(stderr, "\n");
  2855. fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0);
  2856. fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token)\n", __func__, 1e-3 * ctx->t_sample_us, n_sample, 1e-3 * ctx->t_sample_us / n_sample);
  2857. fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token)\n", __func__, 1e-3 * ctx->t_p_eval_us, n_p_eval, 1e-3 * ctx->t_p_eval_us / n_p_eval);
  2858. fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token)\n", __func__, 1e-3 * ctx->t_eval_us, n_eval, 1e-3 * ctx->t_eval_us / n_eval);
  2859. fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0);
  2860. }
  2861. void llama_reset_timings(struct llama_context * ctx) {
  2862. ctx->t_start_us = ggml_time_us();
  2863. ctx->t_sample_us = ctx->n_sample = 0;
  2864. ctx->t_eval_us = ctx->n_eval = 0;
  2865. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  2866. }
  2867. const char * llama_print_system_info(void) {
  2868. static std::string s;
  2869. s = "";
  2870. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  2871. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  2872. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  2873. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  2874. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  2875. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  2876. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  2877. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  2878. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  2879. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  2880. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  2881. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  2882. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  2883. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  2884. return s.c_str();
  2885. }
  2886. // For internal test use
  2887. std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
  2888. return ctx->model.tensors_by_name;
  2889. }