llama.cpp 142 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128
  1. // Defines fileno on msys:
  2. #ifndef _GNU_SOURCE
  3. #define _GNU_SOURCE
  4. #include <cstddef>
  5. #include <cstdint>
  6. #include <cstdio>
  7. #endif
  8. #include "llama-util.h"
  9. #include "llama.h"
  10. #include "ggml.h"
  11. #ifdef GGML_USE_CUBLAS
  12. #include "ggml-cuda.h"
  13. #elif defined(GGML_USE_CLBLAST)
  14. #include "ggml-opencl.h"
  15. #endif
  16. #ifdef GGML_USE_METAL
  17. #include "ggml-metal.h"
  18. #endif
  19. #ifdef GGML_USE_MPI
  20. #include "ggml-mpi.h"
  21. #endif
  22. #ifdef GGML_USE_K_QUANTS
  23. #ifndef QK_K
  24. #ifdef GGML_QKK_64
  25. #define QK_K 64
  26. #else
  27. #define QK_K 256
  28. #endif
  29. #endif
  30. #endif
  31. #include <array>
  32. #include <ctime>
  33. #include <cinttypes>
  34. #include <fstream>
  35. #include <random>
  36. #include <map>
  37. #include <unordered_map>
  38. #include <queue>
  39. #include <cassert>
  40. #include <cstring>
  41. #include <climits>
  42. #include <memory>
  43. #include <algorithm>
  44. #include <initializer_list>
  45. #include <thread>
  46. #include <atomic>
  47. #include <mutex>
  48. #include <sstream>
  49. #include <numeric>
  50. #if defined(_MSC_VER)
  51. #pragma warning(disable: 4244 4267) // possible loss of data
  52. #endif
  53. #define LLAMA_USE_SCRATCH
  54. #define LLAMA_MAX_SCRATCH_BUFFERS 16
  55. // available llama models
  56. enum e_model {
  57. MODEL_UNKNOWN,
  58. MODEL_3B,
  59. MODEL_7B,
  60. MODEL_13B,
  61. MODEL_30B,
  62. MODEL_65B,
  63. MODEL_70B,
  64. };
  65. static const size_t kB = 1024;
  66. static const size_t MB = 1024*1024;
  67. // computed for n_ctx == 2048
  68. // TODO: dynamically determine these sizes
  69. // needs modifications in ggml
  70. typedef void (*offload_func_t)(struct ggml_tensor * tensor);
  71. void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
  72. (void) tensor;
  73. }
  74. //
  75. // ggml helpers
  76. //
  77. static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
  78. struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
  79. if (plan.work_size > 0) {
  80. buf.resize(plan.work_size);
  81. plan.work_data = buf.data();
  82. }
  83. ggml_graph_compute(graph, &plan);
  84. }
  85. //
  86. // memory sizes (calculated for n_batch == 512)
  87. //
  88. static const std::map<e_model, size_t> & MEM_REQ_SCRATCH0(int n_ctx)
  89. {
  90. static std::map<e_model, size_t> k_sizes = {
  91. { MODEL_3B, ((size_t) n_ctx / 16ull + 92ull) * MB },
  92. { MODEL_7B, ((size_t) n_ctx / 16ull + 100ull) * MB },
  93. { MODEL_13B, ((size_t) n_ctx / 12ull + 120ull) * MB },
  94. { MODEL_30B, ((size_t) n_ctx / 9ull + 160ull) * MB },
  95. { MODEL_65B, ((size_t) n_ctx / 6ull + 256ull) * MB }, // guess
  96. { MODEL_70B, ((size_t) n_ctx / 7ull + 164ull) * MB },
  97. };
  98. return k_sizes;
  99. }
  100. static const std::map<e_model, size_t> & MEM_REQ_SCRATCH1()
  101. {
  102. static std::map<e_model, size_t> k_sizes = {
  103. { MODEL_3B, 128ull * MB },
  104. { MODEL_7B, 160ull * MB },
  105. { MODEL_13B, 192ull * MB },
  106. { MODEL_30B, 256ull * MB },
  107. { MODEL_65B, 384ull * MB }, // guess
  108. { MODEL_70B, 304ull * MB },
  109. };
  110. return k_sizes;
  111. }
  112. // used to store the compute graph tensors + non-scratch data
  113. static const std::map<e_model, size_t> & MEM_REQ_EVAL()
  114. {
  115. static std::map<e_model, size_t> k_sizes = {
  116. { MODEL_3B, 8ull * MB },
  117. { MODEL_7B, 10ull * MB },
  118. { MODEL_13B, 12ull * MB },
  119. { MODEL_30B, 16ull * MB },
  120. { MODEL_65B, 24ull * MB }, // guess
  121. { MODEL_70B, 24ull * MB },
  122. };
  123. return k_sizes;
  124. }
  125. // amount of VRAM needed per batch size to hold temporary results
  126. // the values for 3b and 65b are not derived from testing but instead chosen conservatively
  127. static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_BASE()
  128. {
  129. static std::map<e_model, size_t> k_sizes = {
  130. { MODEL_3B, 512ull * kB },
  131. { MODEL_7B, 512ull * kB },
  132. { MODEL_13B, 640ull * kB },
  133. { MODEL_30B, 768ull * kB },
  134. { MODEL_65B, 1536ull * kB },
  135. { MODEL_70B, 1536ull * kB }, // TODO (likely can be reduced)
  136. };
  137. return k_sizes;
  138. }
  139. // amount of VRAM needed per batch size and context to hold temporary results
  140. // the values for 3b and 65b are not derived from testing but instead chosen conservatively
  141. static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_PER_CONTEXT()
  142. {
  143. static std::map<e_model, size_t> k_sizes = {
  144. { MODEL_3B, 128ull },
  145. { MODEL_7B, 128ull },
  146. { MODEL_13B, 160ull },
  147. { MODEL_30B, 208ull },
  148. { MODEL_65B, 416ull },
  149. { MODEL_70B, 416ull }, // TODO (likely can be reduced)
  150. };
  151. return k_sizes;
  152. }
  153. // default hparams (LLaMA 7B)
  154. struct llama_hparams {
  155. uint32_t n_vocab = 32000;
  156. uint32_t n_ctx = 512; // this is provided as user input?
  157. uint32_t n_embd = 4096;
  158. uint32_t n_mult = 256;
  159. uint32_t n_head = 32;
  160. uint32_t n_head_kv = 32;
  161. uint32_t n_layer = 32;
  162. uint32_t n_rot = 64;
  163. // LLaMAv2
  164. // TODO: load from model data hparams
  165. float f_ffn_mult = 1.0f;
  166. float f_rms_norm_eps = 1e-6f;
  167. float rope_freq_base = 10000.0f;
  168. float rope_freq_scale = 1.0f;
  169. enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;
  170. bool operator!=(const llama_hparams & other) const {
  171. return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT
  172. }
  173. uint32_t n_gqa() const {
  174. return n_head/n_head_kv;
  175. }
  176. uint32_t n_embd_head() const {
  177. return n_embd/n_head;
  178. }
  179. uint32_t n_embd_gqa() const {
  180. return n_embd/n_gqa();
  181. }
  182. size_t kv_size() const {
  183. size_t result = 2ull;
  184. result *= (size_t) n_embd_gqa();
  185. result *= (size_t) n_ctx;
  186. result *= (size_t) n_layer;
  187. result *= sizeof(ggml_fp16_t);
  188. return result;
  189. }
  190. };
  191. struct llama_layer {
  192. // normalization
  193. struct ggml_tensor * attention_norm;
  194. // attention
  195. struct ggml_tensor * wq;
  196. struct ggml_tensor * wk;
  197. struct ggml_tensor * wv;
  198. struct ggml_tensor * wo;
  199. // normalization
  200. struct ggml_tensor * ffn_norm;
  201. // ff
  202. struct ggml_tensor * w1;
  203. struct ggml_tensor * w2;
  204. struct ggml_tensor * w3;
  205. };
  206. struct llama_kv_cache {
  207. struct ggml_tensor * k = NULL;
  208. struct ggml_tensor * v = NULL;
  209. struct ggml_context * ctx = NULL;
  210. llama_ctx_buffer buf;
  211. int n; // number of tokens currently in the cache
  212. ~llama_kv_cache() {
  213. if (ctx) {
  214. ggml_free(ctx);
  215. }
  216. #ifdef GGML_USE_CUBLAS
  217. ggml_cuda_free_data(k);
  218. ggml_cuda_free_data(v);
  219. #endif // GGML_USE_CUBLAS
  220. }
  221. };
  222. struct llama_vocab {
  223. using id = int32_t;
  224. using token = std::string;
  225. struct token_score {
  226. token tok;
  227. float score;
  228. };
  229. std::unordered_map<token, id> token_to_id;
  230. std::vector<token_score> id_to_token;
  231. };
  232. struct llama_model {
  233. e_model type = MODEL_UNKNOWN;
  234. llama_hparams hparams;
  235. struct ggml_tensor * tok_embeddings;
  236. struct ggml_tensor * norm;
  237. struct ggml_tensor * output;
  238. std::vector<llama_layer> layers;
  239. int n_gpu_layers;
  240. // context
  241. struct ggml_context * ctx = NULL;
  242. // the model memory buffer
  243. llama_ctx_buffer buf;
  244. // model memory mapped file
  245. std::unique_ptr<llama_mmap> mapping;
  246. // objects representing data potentially being locked in memory
  247. llama_mlock mlock_buf;
  248. llama_mlock mlock_mmap;
  249. // for quantize-stats only
  250. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  251. int64_t t_load_us = 0;
  252. int64_t t_start_us = 0;
  253. llama_vocab vocab;
  254. ~llama_model() {
  255. if (ctx) {
  256. ggml_free(ctx);
  257. }
  258. #ifdef GGML_USE_CUBLAS
  259. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  260. ggml_cuda_free_data(tensors_by_name[i].second);
  261. }
  262. ggml_cuda_free_scratch();
  263. #elif defined(GGML_USE_CLBLAST)
  264. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  265. ggml_cl_free_data(tensors_by_name[i].second);
  266. }
  267. #endif
  268. }
  269. };
  270. struct llama_context {
  271. llama_context(const llama_model & model) : model(model), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {}
  272. #ifdef GGML_USE_METAL
  273. ~llama_context() {
  274. if (ctx_metal) {
  275. ggml_metal_free(ctx_metal);
  276. }
  277. }
  278. #endif
  279. std::mt19937 rng;
  280. bool has_evaluated_once = false;
  281. int64_t t_sample_us = 0;
  282. int64_t t_eval_us = 0;
  283. int64_t t_p_eval_us = 0;
  284. int32_t n_sample = 0; // number of tokens sampled
  285. int32_t n_eval = 0; // number of eval calls
  286. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  287. const llama_model & model;
  288. bool model_owner = false;
  289. int64_t t_load_us;
  290. int64_t t_start_us;
  291. // key + value cache for the self attention
  292. struct llama_kv_cache kv_self;
  293. size_t mem_per_token = 0;
  294. // decode output (2-dimensional array: [n_tokens][n_vocab])
  295. std::vector<float> logits;
  296. bool logits_all = false;
  297. // input embedding (1-dimensional array: [n_embd])
  298. std::vector<float> embedding;
  299. // reusable buffer for `struct ggml_graph_plan.work_data`
  300. std::vector<uint8_t> work_buffer;
  301. // memory buffers used to evaluate the model
  302. // TODO: move in llama_state
  303. llama_ctx_buffer buf_compute;
  304. llama_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
  305. #ifdef GGML_USE_METAL
  306. ggml_metal_context * ctx_metal = NULL;
  307. #endif
  308. #ifdef GGML_USE_MPI
  309. ggml_mpi_context * ctx_mpi = NULL;
  310. #endif
  311. int buf_last = 0;
  312. size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
  313. void use_buf(struct ggml_context * ctx, int i) {
  314. #if defined(LLAMA_USE_SCRATCH)
  315. size_t last_size = 0;
  316. if (i == -1) {
  317. last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, });
  318. } else {
  319. auto & buf = buf_scratch[i];
  320. last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, });
  321. }
  322. if (buf_last >= 0) {
  323. buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size);
  324. }
  325. buf_last = i;
  326. #else
  327. (void) i;
  328. (void) ctx;
  329. #endif
  330. }
  331. size_t get_buf_max_mem(int i) const {
  332. #if defined(LLAMA_USE_SCRATCH)
  333. return buf_max_size[i];
  334. #else
  335. (void) i;
  336. return 0;
  337. #endif
  338. }
  339. };
  340. template <typename T>
  341. static T checked_mul(T a, T b) {
  342. T ret = a * b;
  343. if (a != 0 && ret / a != b) {
  344. throw std::runtime_error(format("overflow multiplying %llu * %llu",
  345. (unsigned long long) a, (unsigned long long) b));
  346. }
  347. return ret;
  348. }
  349. static size_t checked_div(size_t a, size_t b) {
  350. if (b == 0 || a % b != 0) {
  351. throw std::runtime_error(format("error dividing %zu / %zu", a, b));
  352. }
  353. return a / b;
  354. }
  355. static std::string llama_format_tensor_shape(const std::vector<uint32_t> & ne) {
  356. char buf[256];
  357. snprintf(buf, sizeof(buf), "%5u", ne.at(0));
  358. for (size_t i = 1; i < ne.size(); i++) {
  359. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i));
  360. }
  361. return buf;
  362. }
  363. static size_t llama_calc_tensor_size(const std::vector<uint32_t> & ne, enum ggml_type type) {
  364. size_t size = ggml_type_size(type);
  365. for (uint32_t dim : ne) {
  366. size = checked_mul<size_t>(size, dim);
  367. }
  368. return size / ggml_blck_size(type);
  369. }
  370. struct llama_load_tensor {
  371. std::string name;
  372. enum ggml_type type = GGML_TYPE_F32;
  373. std::vector<uint32_t> ne;
  374. size_t file_off;
  375. size_t size;
  376. struct ggml_tensor * ggml_tensor = NULL;
  377. uint8_t * data;
  378. };
  379. struct llama_load_tensors_map {
  380. // tensors is kept in a separate vector to preserve file order
  381. std::vector<llama_load_tensor> tensors;
  382. std::unordered_map<std::string, size_t> name_to_idx;
  383. };
  384. enum llama_file_version {
  385. LLAMA_FILE_VERSION_GGML,
  386. LLAMA_FILE_VERSION_GGMF_V1, // added version field and scores in vocab
  387. LLAMA_FILE_VERSION_GGJT_V1, // added padding
  388. LLAMA_FILE_VERSION_GGJT_V2, // changed quantization format
  389. LLAMA_FILE_VERSION_GGJT_V3, // changed Q4 and Q8 quantization format
  390. };
  391. struct llama_file_loader {
  392. llama_file file;
  393. llama_file_version file_version;
  394. llama_hparams hparams;
  395. llama_vocab vocab;
  396. llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map)
  397. : file(fname, "rb") {
  398. fprintf(stderr, "llama.cpp: loading model from %s\n", fname);
  399. read_magic();
  400. read_hparams();
  401. read_vocab();
  402. read_tensor_metadata(tensors_map);
  403. }
  404. void read_magic() {
  405. uint32_t magic = file.read_u32();
  406. if (magic == LLAMA_FILE_MAGIC_GGML) {
  407. file_version = LLAMA_FILE_VERSION_GGML;
  408. return;
  409. }
  410. uint32_t version = file.read_u32();
  411. switch (magic) {
  412. case LLAMA_FILE_MAGIC_GGMF:
  413. switch (version) {
  414. case 1: file_version = LLAMA_FILE_VERSION_GGMF_V1; return;
  415. }
  416. break;
  417. case LLAMA_FILE_MAGIC_GGJT:
  418. switch (version) {
  419. case 1: file_version = LLAMA_FILE_VERSION_GGJT_V1; return;
  420. case 2: file_version = LLAMA_FILE_VERSION_GGJT_V2; return;
  421. case 3: file_version = LLAMA_FILE_VERSION_GGJT_V3; return;
  422. }
  423. }
  424. throw std::runtime_error(format("unknown (magic, version) combination: %08x, %08x; is this really a GGML file?",
  425. magic, version));
  426. }
  427. void read_hparams() {
  428. hparams.n_vocab = file.read_u32();
  429. hparams.n_embd = file.read_u32();
  430. hparams.n_mult = file.read_u32();
  431. hparams.n_head = file.read_u32();
  432. hparams.n_layer = file.read_u32();
  433. hparams.n_rot = file.read_u32();
  434. hparams.ftype = (enum llama_ftype) file.read_u32();
  435. // LLaMAv2
  436. // TODO: read from header
  437. hparams.n_head_kv = hparams.n_head;
  438. }
  439. void read_vocab() {
  440. vocab.id_to_token.resize(hparams.n_vocab);
  441. for (uint32_t i = 0; i < hparams.n_vocab; i++) {
  442. uint32_t len = file.read_u32();
  443. std::string word = file.read_string(len);
  444. float score = 0.0f;
  445. file.read_raw(&score, sizeof(score));
  446. vocab.token_to_id[word] = i;
  447. auto & tok_score = vocab.id_to_token[i];
  448. tok_score.tok = std::move(word);
  449. tok_score.score = score;
  450. }
  451. }
  452. void read_tensor_metadata(llama_load_tensors_map & tensors_map) {
  453. while (file.tell() < file.size) {
  454. llama_load_tensor tensor;
  455. uint32_t n_dims = file.read_u32();
  456. uint32_t name_len = file.read_u32();
  457. tensor.type = (enum ggml_type) file.read_u32();
  458. tensor.ne.resize(n_dims);
  459. file.read_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * n_dims);
  460. std::string name = file.read_string(name_len);
  461. if (n_dims < 1 || n_dims > 2) {
  462. throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name.c_str(), n_dims));
  463. }
  464. switch (tensor.type) {
  465. case GGML_TYPE_F32:
  466. case GGML_TYPE_F16:
  467. case GGML_TYPE_Q4_0:
  468. case GGML_TYPE_Q4_1:
  469. case GGML_TYPE_Q5_0:
  470. case GGML_TYPE_Q5_1:
  471. case GGML_TYPE_Q8_0:
  472. case GGML_TYPE_Q2_K:
  473. case GGML_TYPE_Q3_K:
  474. case GGML_TYPE_Q4_K:
  475. case GGML_TYPE_Q5_K:
  476. case GGML_TYPE_Q6_K:
  477. break;
  478. default: {
  479. throw std::runtime_error(format("unrecognized tensor type %u\n", tensor.type));
  480. }
  481. }
  482. // skip to the next multiple of 32 bytes
  483. if (file_version >= LLAMA_FILE_VERSION_GGJT_V1) {
  484. file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
  485. }
  486. tensor.file_off = file.tell();
  487. tensor.name = name;
  488. tensor.size = llama_calc_tensor_size(tensor.ne, tensor.type);
  489. file.seek(tensor.size, SEEK_CUR);
  490. tensors_map.tensors.push_back(tensor);
  491. tensors_map.name_to_idx[name] = tensors_map.tensors.size() - 1;
  492. }
  493. }
  494. };
  495. struct llama_file_saver {
  496. llama_file file;
  497. llama_file_loader * any_file_loader;
  498. llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
  499. : file(fname, "wb"), any_file_loader(any_file_loader) {
  500. fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
  501. write_magic();
  502. write_hparams(new_ftype);
  503. write_vocab();
  504. }
  505. void write_magic() {
  506. file.write_u32(LLAMA_FILE_MAGIC); // magic
  507. file.write_u32(LLAMA_FILE_VERSION); // version
  508. }
  509. void write_hparams(enum llama_ftype new_ftype) {
  510. const llama_hparams & hparams = any_file_loader->hparams;
  511. file.write_u32(hparams.n_vocab);
  512. file.write_u32(hparams.n_embd);
  513. file.write_u32(hparams.n_mult);
  514. file.write_u32(hparams.n_head);
  515. file.write_u32(hparams.n_layer);
  516. file.write_u32(hparams.n_rot);
  517. file.write_u32(new_ftype);
  518. }
  519. void write_vocab() {
  520. if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
  521. fprintf(stderr, "llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
  522. }
  523. uint32_t n_vocab = any_file_loader->hparams.n_vocab;
  524. for (uint32_t i = 0; i < n_vocab; i++) {
  525. const auto & token_score = any_file_loader->vocab.id_to_token.at(i);
  526. file.write_u32((uint32_t) token_score.tok.size());
  527. file.write_raw(token_score.tok.data(), token_score.tok.size());
  528. file.write_raw(&token_score.score, sizeof(token_score.score));
  529. }
  530. }
  531. void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) {
  532. switch (new_type) {
  533. case GGML_TYPE_F32:
  534. case GGML_TYPE_F16:
  535. case GGML_TYPE_Q4_0:
  536. case GGML_TYPE_Q4_1:
  537. case GGML_TYPE_Q5_0:
  538. case GGML_TYPE_Q5_1:
  539. case GGML_TYPE_Q8_0:
  540. case GGML_TYPE_Q2_K:
  541. case GGML_TYPE_Q3_K:
  542. case GGML_TYPE_Q4_K:
  543. case GGML_TYPE_Q5_K:
  544. case GGML_TYPE_Q6_K:
  545. break;
  546. default: LLAMA_ASSERT(false);
  547. }
  548. file.write_u32((uint32_t) tensor.ne.size());
  549. file.write_u32((uint32_t) tensor.name.size());
  550. file.write_u32(new_type);
  551. file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size());
  552. file.write_raw(tensor.name.data(), tensor.name.size());
  553. file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
  554. LLAMA_ASSERT(new_size == llama_calc_tensor_size(tensor.ne, new_type));
  555. file.write_raw(new_data, new_size);
  556. }
  557. };
  558. struct llama_model_loader {
  559. std::unique_ptr<llama_file_loader> file_loader;
  560. llama_load_tensors_map tensors_map;
  561. bool use_mmap;
  562. size_t num_ggml_tensors_created = 0;
  563. struct ggml_context * ggml_ctx = NULL;
  564. std::unique_ptr<llama_mmap> mapping;
  565. llama_model_loader(const std::string & fname_base, bool use_mmap) {
  566. file_loader = std::unique_ptr<llama_file_loader>(new llama_file_loader(fname_base.c_str(), tensors_map));
  567. if (!llama_mmap::SUPPORTED) {
  568. use_mmap = false;
  569. }
  570. this->use_mmap = use_mmap;
  571. }
  572. void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const {
  573. *ctx_size_p = *mmapped_size_p = 0;
  574. for (const llama_load_tensor & lt : tensors_map.tensors) {
  575. *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
  576. *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size + 16;
  577. }
  578. }
  579. struct ggml_tensor * get_tensor(const std::string & name, const std::vector<uint32_t> & ne, ggml_backend backend) {
  580. auto it = tensors_map.name_to_idx.find(name);
  581. if (it == tensors_map.name_to_idx.end()) {
  582. throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str())));
  583. }
  584. llama_load_tensor & lt = tensors_map.tensors.at(it->second);
  585. if (lt.ne != ne) {
  586. throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s",
  587. name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str()));
  588. }
  589. return get_tensor_for(lt, backend);
  590. }
  591. struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) {
  592. struct ggml_tensor * tensor;
  593. if (backend != GGML_BACKEND_CPU) {
  594. ggml_set_no_alloc(ggml_ctx, true);
  595. }
  596. if (lt.ne.size() == 2) {
  597. tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1));
  598. } else {
  599. LLAMA_ASSERT(lt.ne.size() == 1);
  600. tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0));
  601. }
  602. ggml_set_name(tensor, lt.name.c_str());
  603. LLAMA_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor
  604. if (backend != GGML_BACKEND_CPU) {
  605. ggml_set_no_alloc(ggml_ctx, use_mmap);
  606. }
  607. tensor->backend = backend;
  608. lt.ggml_tensor = tensor;
  609. num_ggml_tensors_created++;
  610. return tensor;
  611. }
  612. void done_getting_tensors() const {
  613. if (num_ggml_tensors_created != tensors_map.tensors.size()) {
  614. throw std::runtime_error(std::string("llama.cpp: file contained more tensors than expected"));
  615. }
  616. }
  617. void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
  618. size_t data_size = 0;
  619. size_t prefetch_size = 0;
  620. size_t lock_size = 0;
  621. for (const llama_load_tensor & lt : tensors_map.tensors) {
  622. data_size += lt.size;
  623. if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) {
  624. prefetch_size += lt.size;
  625. }
  626. }
  627. if (use_mmap) {
  628. mapping.reset(new llama_mmap(&file_loader->file, prefetch_size, ggml_is_numa()));
  629. if (lmlock) {
  630. lmlock->init(mapping->addr);
  631. }
  632. }
  633. size_t done_size = 0;
  634. for (llama_load_tensor & lt : tensors_map.tensors) {
  635. if (progress_callback) {
  636. progress_callback((float) done_size / data_size, progress_callback_user_data);
  637. }
  638. LLAMA_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already
  639. lt.data = (uint8_t *) lt.ggml_tensor->data;
  640. // allocate temp buffer if not using mmap
  641. if (!use_mmap && lt.data == NULL) {
  642. GGML_ASSERT(lt.ggml_tensor->backend != GGML_BACKEND_CPU);
  643. lt.data = (uint8_t*)malloc(ggml_nbytes(lt.ggml_tensor));
  644. }
  645. load_data_for(lt);
  646. switch(lt.ggml_tensor->backend) {
  647. case GGML_BACKEND_CPU:
  648. lt.ggml_tensor->data = lt.data;
  649. if (use_mmap && lmlock) {
  650. lock_size += lt.size;
  651. lmlock->grow_to(lock_size);
  652. }
  653. break;
  654. #if defined(GGML_USE_CUBLAS)
  655. case GGML_BACKEND_GPU:
  656. case GGML_BACKEND_GPU_SPLIT:
  657. ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor);
  658. if (!use_mmap) {
  659. free(lt.data);
  660. }
  661. break;
  662. #elif defined(GGML_USE_CLBLAST)
  663. case GGML_BACKEND_GPU:
  664. ggml_cl_transform_tensor(lt.data, lt.ggml_tensor);
  665. if (!use_mmap) {
  666. free(lt.data);
  667. }
  668. break;
  669. #endif
  670. default:
  671. continue;
  672. }
  673. done_size += lt.size;
  674. }
  675. }
  676. void load_data_for(llama_load_tensor & lt) {
  677. if (use_mmap) {
  678. lt.data = (uint8_t *) mapping->addr + lt.file_off;
  679. } else {
  680. llama_file & file = file_loader->file;
  681. file.seek(lt.file_off, SEEK_SET);
  682. file.read_raw(lt.data, lt.size);
  683. }
  684. if (0) {
  685. print_checksum(lt);
  686. }
  687. }
  688. static void print_checksum(llama_load_tensor & lt) {
  689. uint32_t sum = 0;
  690. for (size_t i = 0; i < lt.size; i++) {
  691. uint8_t byte = lt.data[i];
  692. sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
  693. }
  694. fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
  695. llama_format_tensor_shape(lt.ne).c_str(), lt.size);
  696. }
  697. };
  698. //
  699. // kv cache
  700. //
  701. static bool kv_cache_init(
  702. const struct llama_hparams & hparams,
  703. struct llama_kv_cache & cache,
  704. ggml_type wtype,
  705. int n_ctx,
  706. int n_gpu_layers) {
  707. const int n_embd = hparams.n_embd_gqa();
  708. const int n_layer = hparams.n_layer;
  709. const int64_t n_mem = n_layer*n_ctx;
  710. const int64_t n_elements = n_embd*n_mem;
  711. cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
  712. cache.n = 0;
  713. struct ggml_init_params params;
  714. params.mem_size = cache.buf.size;
  715. params.mem_buffer = cache.buf.addr;
  716. params.no_alloc = false;
  717. cache.ctx = ggml_init(params);
  718. if (!cache.ctx) {
  719. fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
  720. return false;
  721. }
  722. cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  723. cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  724. ggml_set_name(cache.k, "cache_k");
  725. ggml_set_name(cache.v, "cache_v");
  726. (void) n_gpu_layers;
  727. #ifdef GGML_USE_CUBLAS
  728. if (n_gpu_layers > n_layer + 1) {
  729. ggml_cuda_assign_buffers_no_scratch(cache.v);
  730. }
  731. if (n_gpu_layers > n_layer + 2) {
  732. ggml_cuda_assign_buffers_no_scratch(cache.k);
  733. }
  734. #endif // GGML_USE_CUBLAS
  735. return true;
  736. }
  737. struct llama_context_params llama_context_default_params() {
  738. struct llama_context_params result = {
  739. /*.seed =*/ LLAMA_DEFAULT_SEED,
  740. /*.n_ctx =*/ 512,
  741. /*.n_batch =*/ 512,
  742. /*.n_gqa =*/ 1,
  743. /*.rms_norm_eps =*/ 1e-6f,
  744. /*.gpu_layers =*/ 0,
  745. /*.main_gpu =*/ 0,
  746. /*.tensor_split =*/ nullptr,
  747. /*.rope_freq_base =*/ 10000.0f,
  748. /*.rope_freq_scale =*/ 1.0f,
  749. /*.progress_callback =*/ nullptr,
  750. /*.progress_callback_user_data =*/ nullptr,
  751. /*.low_vram =*/ false,
  752. /*.f16_kv =*/ true,
  753. /*.logits_all =*/ false,
  754. /*.vocab_only =*/ false,
  755. /*.use_mmap =*/ true,
  756. /*.use_mlock =*/ false,
  757. /*.embedding =*/ false,
  758. };
  759. return result;
  760. }
  761. struct llama_model_quantize_params llama_model_quantize_default_params() {
  762. struct llama_model_quantize_params result = {
  763. /*.nthread =*/ 0,
  764. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  765. /*.allow_requantize =*/ false,
  766. /*.quantize_output_tensor =*/ true,
  767. };
  768. return result;
  769. }
  770. int llama_max_devices() {
  771. return LLAMA_MAX_DEVICES;
  772. }
  773. bool llama_mmap_supported() {
  774. return llama_mmap::SUPPORTED;
  775. }
  776. bool llama_mlock_supported() {
  777. return llama_mlock::SUPPORTED;
  778. }
  779. void llama_backend_init(bool numa) {
  780. ggml_time_init();
  781. // needed to initialize f16 tables
  782. {
  783. struct ggml_init_params params = { 0, NULL, false };
  784. struct ggml_context * ctx = ggml_init(params);
  785. ggml_free(ctx);
  786. }
  787. if (numa) {
  788. ggml_numa_init();
  789. }
  790. #ifdef GGML_USE_MPI
  791. ggml_mpi_backend_init();
  792. #endif
  793. }
  794. void llama_backend_free() {
  795. #ifdef GGML_USE_MPI
  796. ggml_mpi_backend_free();
  797. #endif
  798. }
  799. int64_t llama_time_us() {
  800. return ggml_time_us();
  801. }
  802. //
  803. // model loading
  804. //
  805. static const char *llama_file_version_name(llama_file_version version) {
  806. switch (version) {
  807. case LLAMA_FILE_VERSION_GGML: return "'ggml' (old version with low tokenizer quality and no mmap support)";
  808. case LLAMA_FILE_VERSION_GGMF_V1: return "ggmf v1 (old version with no mmap support)";
  809. case LLAMA_FILE_VERSION_GGJT_V1: return "ggjt v1 (pre #1405)";
  810. case LLAMA_FILE_VERSION_GGJT_V2: return "ggjt v2 (pre #1508)";
  811. case LLAMA_FILE_VERSION_GGJT_V3: return "ggjt v3 (latest)";
  812. }
  813. return "unknown";
  814. }
  815. static const char *llama_ftype_name(enum llama_ftype ftype) {
  816. switch (ftype) {
  817. case LLAMA_FTYPE_ALL_F32: return "all F32";
  818. case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
  819. case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
  820. case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
  821. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  822. return "mostly Q4_1, some F16";
  823. case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0";
  824. case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1";
  825. case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0";
  826. // K-quants
  827. case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K";
  828. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small";
  829. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium";
  830. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large";
  831. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "mostly Q4_K - Small";
  832. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium";
  833. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small";
  834. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium";
  835. case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K";
  836. default: return "unknown, may not work";
  837. }
  838. }
  839. static const char *llama_model_type_name(e_model type) {
  840. switch (type) {
  841. case MODEL_3B: return "3B";
  842. case MODEL_7B: return "7B";
  843. case MODEL_13B: return "13B";
  844. case MODEL_30B: return "30B";
  845. case MODEL_65B: return "65B";
  846. case MODEL_70B: return "70B";
  847. default: LLAMA_ASSERT(false);
  848. }
  849. }
  850. static void llama_model_load_internal(
  851. const std::string & fname,
  852. llama_model & model,
  853. llama_vocab & vocab,
  854. int n_ctx,
  855. int n_batch,
  856. int n_gqa,
  857. float rms_norm_eps,
  858. int n_gpu_layers,
  859. int main_gpu,
  860. const float * tensor_split,
  861. float rope_freq_base,
  862. float rope_freq_scale,
  863. bool low_vram,
  864. ggml_type memory_type,
  865. bool use_mmap,
  866. bool use_mlock,
  867. bool vocab_only,
  868. llama_progress_callback progress_callback,
  869. void * progress_callback_user_data) {
  870. model.t_start_us = ggml_time_us();
  871. std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap));
  872. vocab = std::move(ml->file_loader->vocab);
  873. model.hparams = ml->file_loader->hparams;
  874. model.n_gpu_layers = n_gpu_layers;
  875. llama_file_version file_version = ml->file_loader->file_version;
  876. auto & hparams = model.hparams;
  877. // TODO: read from file
  878. hparams.f_rms_norm_eps = rms_norm_eps;
  879. {
  880. switch (hparams.n_layer) {
  881. case 26: model.type = e_model::MODEL_3B; break;
  882. case 32: model.type = e_model::MODEL_7B; break;
  883. case 40: model.type = e_model::MODEL_13B; break;
  884. case 60: model.type = e_model::MODEL_30B; break;
  885. case 80: model.type = e_model::MODEL_65B; break;
  886. default:
  887. {
  888. if (hparams.n_layer < 32) {
  889. model.type = e_model::MODEL_7B;
  890. }
  891. } break;
  892. }
  893. hparams.n_ctx = n_ctx;
  894. // LLaMAv2
  895. // TODO: temporary until GGUF
  896. LLAMA_ASSERT(hparams.n_head % n_gqa == 0);
  897. hparams.n_head_kv = hparams.n_head / n_gqa;
  898. if (model.type == e_model::MODEL_65B && n_gqa == 8) {
  899. fprintf(stderr, "%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
  900. model.type = e_model::MODEL_70B;
  901. hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model
  902. }
  903. hparams.rope_freq_base = rope_freq_base;
  904. hparams.rope_freq_scale = rope_freq_scale;
  905. }
  906. // ref: https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/model.py#L194-L199
  907. const uint32_t n_ff_raw = 2*(4*hparams.n_embd)/3;
  908. const uint32_t n_ff_mult = hparams.f_ffn_mult*n_ff_raw;
  909. const uint32_t n_ff = ((n_ff_mult + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
  910. //const uint32_t n_ff = 28672;
  911. {
  912. fprintf(stderr, "%s: format = %s\n", __func__, llama_file_version_name(file_version));
  913. fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  914. fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx);
  915. fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd);
  916. fprintf(stderr, "%s: n_mult = %u\n", __func__, hparams.n_mult);
  917. fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head);
  918. fprintf(stderr, "%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  919. fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer);
  920. fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
  921. fprintf(stderr, "%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  922. fprintf(stderr, "%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps);
  923. fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff);
  924. fprintf(stderr, "%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
  925. fprintf(stderr, "%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
  926. fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
  927. fprintf(stderr, "%s: model size = %s\n", __func__, llama_model_type_name(model.type));
  928. }
  929. if (file_version < LLAMA_FILE_VERSION_GGJT_V2) {
  930. if (hparams.ftype != LLAMA_FTYPE_ALL_F32 &&
  931. hparams.ftype != LLAMA_FTYPE_MOSTLY_F16 &&
  932. hparams.ftype != LLAMA_FTYPE_MOSTLY_Q8_0) {
  933. throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1405)"));
  934. }
  935. }
  936. if (file_version < LLAMA_FILE_VERSION_GGJT_V3) {
  937. if (hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
  938. hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_1 ||
  939. hparams.ftype == LLAMA_FTYPE_MOSTLY_Q8_0) {
  940. throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1508)"));
  941. }
  942. }
  943. if (vocab_only) {
  944. return;
  945. }
  946. auto & ctx = model.ctx;
  947. size_t ctx_size;
  948. size_t mmapped_size;
  949. ml->calc_sizes(&ctx_size, &mmapped_size);
  950. fprintf(stderr, "%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
  951. // create the ggml context
  952. {
  953. model.buf.resize(ctx_size);
  954. if (use_mlock) {
  955. model.mlock_buf.init (model.buf.addr);
  956. model.mlock_buf.grow_to(model.buf.size);
  957. }
  958. struct ggml_init_params params = {
  959. /*.mem_size =*/ model.buf.size,
  960. /*.mem_buffer =*/ model.buf.addr,
  961. /*.no_alloc =*/ ml->use_mmap,
  962. };
  963. model.ctx = ggml_init(params);
  964. if (!model.ctx) {
  965. throw std::runtime_error(format("ggml_init() failed"));
  966. }
  967. }
  968. (void) main_gpu;
  969. #if defined(GGML_USE_CUBLAS)
  970. fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__);
  971. ggml_cuda_set_main_device(main_gpu);
  972. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
  973. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
  974. #elif defined(GGML_USE_CLBLAST)
  975. fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__);
  976. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
  977. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
  978. #else
  979. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU
  980. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_CPU
  981. #endif
  982. // prepare memory for the weights
  983. size_t vram_weights = 0;
  984. size_t vram_scratch = 0;
  985. {
  986. const uint32_t n_embd = hparams.n_embd;
  987. const uint32_t n_embd_gqa = hparams.n_embd_gqa();
  988. const uint32_t n_layer = hparams.n_layer;
  989. const uint32_t n_vocab = hparams.n_vocab;
  990. ml->ggml_ctx = ctx;
  991. model.tok_embeddings = ml->get_tensor("tok_embeddings.weight", {n_embd, n_vocab}, GGML_BACKEND_CPU);
  992. // "output" tensor
  993. {
  994. ggml_backend backend_norm;
  995. ggml_backend backend_output;
  996. if (n_gpu_layers > int(n_layer)) { // NOLINT
  997. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  998. // on Windows however this is detrimental unless everything is on the GPU
  999. #ifndef _WIN32
  1000. backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
  1001. #else
  1002. backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
  1003. #endif // _WIN32
  1004. backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
  1005. } else {
  1006. backend_norm = GGML_BACKEND_CPU;
  1007. backend_output = GGML_BACKEND_CPU;
  1008. }
  1009. model.norm = ml->get_tensor("norm.weight", {n_embd}, backend_norm);
  1010. model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}, backend_output);
  1011. if (backend_norm == GGML_BACKEND_GPU) {
  1012. vram_weights += ggml_nbytes(model.norm);
  1013. }
  1014. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  1015. vram_weights += ggml_nbytes(model.output);
  1016. }
  1017. }
  1018. const int i_gpu_start = n_layer - n_gpu_layers;
  1019. model.layers.resize(n_layer);
  1020. for (uint32_t i = 0; i < n_layer; ++i) {
  1021. const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
  1022. const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
  1023. auto & layer = model.layers[i];
  1024. std::string layers_i = "layers." + std::to_string(i);
  1025. layer.attention_norm = ml->get_tensor(layers_i + ".attention_norm.weight", {n_embd}, backend);
  1026. layer.wq = ml->get_tensor(layers_i + ".attention.wq.weight", {n_embd, n_embd}, backend_split);
  1027. layer.wk = ml->get_tensor(layers_i + ".attention.wk.weight", {n_embd, n_embd_gqa}, backend_split);
  1028. layer.wv = ml->get_tensor(layers_i + ".attention.wv.weight", {n_embd, n_embd_gqa}, backend_split);
  1029. layer.wo = ml->get_tensor(layers_i + ".attention.wo.weight", {n_embd, n_embd}, backend_split);
  1030. layer.ffn_norm = ml->get_tensor(layers_i + ".ffn_norm.weight", {n_embd}, backend);
  1031. layer.w1 = ml->get_tensor(layers_i + ".feed_forward.w1.weight", {n_embd, n_ff}, backend_split);
  1032. layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend_split);
  1033. layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend_split);
  1034. if (backend == GGML_BACKEND_GPU) {
  1035. vram_weights +=
  1036. ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
  1037. ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
  1038. ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
  1039. }
  1040. }
  1041. }
  1042. ml->done_getting_tensors();
  1043. // print memory requirements
  1044. {
  1045. const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
  1046. // this is the total memory required to run the inference
  1047. const size_t mem_required =
  1048. ctx_size +
  1049. mmapped_size - vram_weights + // weights in VRAM not in memory
  1050. MEM_REQ_SCRATCH0(hparams.n_ctx).at(model.type) +
  1051. MEM_REQ_SCRATCH1().at(model.type) +
  1052. MEM_REQ_EVAL().at(model.type);
  1053. // this is the memory required by one llama_state
  1054. const size_t mem_required_state =
  1055. scale*hparams.kv_size();
  1056. fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
  1057. mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
  1058. (void) vram_scratch;
  1059. (void) n_batch;
  1060. #ifdef GGML_USE_CUBLAS
  1061. if (low_vram) {
  1062. fprintf(stderr, "%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
  1063. ggml_cuda_set_scratch_size(0); // disable scratch
  1064. } else {
  1065. const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type);
  1066. const size_t vram_scratch_per_context = VRAM_REQ_SCRATCH_PER_CONTEXT().at(model.type);
  1067. vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context);
  1068. ggml_cuda_set_scratch_size(vram_scratch);
  1069. if (n_gpu_layers > 0) {
  1070. fprintf(stderr, "%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
  1071. __func__, vram_scratch_base / kB, vram_scratch_per_context,
  1072. (vram_scratch + MB - 1) / MB); // round up
  1073. }
  1074. }
  1075. #endif // GGML_USE_CUBLAS
  1076. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  1077. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  1078. fprintf(stderr, "%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  1079. if (n_gpu_layers > (int) hparams.n_layer) {
  1080. fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__);
  1081. }
  1082. size_t vram_kv_cache = 0;
  1083. #ifdef GGML_USE_CUBLAS
  1084. const int max_backend_supported_layers = hparams.n_layer + 3;
  1085. const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
  1086. if (n_gpu_layers > (int) hparams.n_layer + 1) {
  1087. if (low_vram) {
  1088. fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
  1089. } else {
  1090. fprintf(stderr, "%s: offloading v cache to GPU\n", __func__);
  1091. vram_kv_cache += hparams.kv_size() / 2;
  1092. }
  1093. }
  1094. if (n_gpu_layers > (int) hparams.n_layer + 2) {
  1095. if (low_vram) {
  1096. fprintf(stderr, "%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
  1097. } else {
  1098. fprintf(stderr, "%s: offloading k cache to GPU\n", __func__);
  1099. vram_kv_cache += hparams.kv_size() / 2;
  1100. }
  1101. }
  1102. #elif defined(GGML_USE_CLBLAST)
  1103. const int max_backend_supported_layers = hparams.n_layer + 1;
  1104. const int max_offloadable_layers = hparams.n_layer + 1;
  1105. #endif // GGML_USE_CUBLAS
  1106. fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n",
  1107. __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  1108. fprintf(stderr, "%s: total VRAM used: %zu MB\n",
  1109. __func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
  1110. #else
  1111. (void) n_gpu_layers;
  1112. #endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  1113. }
  1114. // populate `tensors_by_name`
  1115. for (llama_load_tensor & lt : ml->tensors_map.tensors) {
  1116. model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor);
  1117. }
  1118. (void) tensor_split;
  1119. #if defined(GGML_USE_CUBLAS)
  1120. {
  1121. ggml_cuda_set_tensor_split(tensor_split);
  1122. }
  1123. #endif
  1124. ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL);
  1125. if (progress_callback) {
  1126. progress_callback(1.0f, progress_callback_user_data);
  1127. }
  1128. model.mapping = std::move(ml->mapping);
  1129. // loading time will be recalculate after the first eval, so
  1130. // we take page faults deferred by mmap() into consideration
  1131. model.t_load_us = ggml_time_us() - model.t_start_us;
  1132. }
  1133. static bool llama_model_load(
  1134. const std::string & fname,
  1135. llama_model & model,
  1136. llama_vocab & vocab,
  1137. int n_ctx,
  1138. int n_batch,
  1139. int n_gqa,
  1140. float rms_norm_eps,
  1141. int n_gpu_layers,
  1142. int main_gpu,
  1143. const float * tensor_split,
  1144. float rope_freq_base,
  1145. float rope_freq_scale,
  1146. bool low_vram,
  1147. ggml_type memory_type,
  1148. bool use_mmap,
  1149. bool use_mlock,
  1150. bool vocab_only,
  1151. llama_progress_callback progress_callback,
  1152. void *progress_callback_user_data) {
  1153. try {
  1154. llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers, main_gpu, tensor_split, rope_freq_base, rope_freq_scale, low_vram, memory_type,
  1155. use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
  1156. return true;
  1157. } catch (const std::exception & err) {
  1158. fprintf(stderr, "error loading model: %s\n", err.what());
  1159. return false;
  1160. }
  1161. }
  1162. // evaluate the transformer
  1163. //
  1164. // - lctx: llama context
  1165. // - tokens: new batch of tokens to process
  1166. // - embd embeddings input
  1167. // - n_tokens number of tokens
  1168. // - n_past: the context size so far
  1169. // - n_threads: number of threads to use
  1170. //
  1171. static bool llama_eval_internal(
  1172. llama_context & lctx,
  1173. const llama_token * tokens,
  1174. const float * embd,
  1175. int n_tokens,
  1176. int n_past,
  1177. int n_threads,
  1178. const char * cgraph_fname) {
  1179. LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
  1180. #ifdef GGML_USE_MPI
  1181. ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
  1182. #endif
  1183. const int64_t t_start_us = ggml_time_us();
  1184. const int N = n_tokens;
  1185. const auto & model = lctx.model;
  1186. const auto & hparams = model.hparams;
  1187. const auto & kv_self = lctx.kv_self;
  1188. LLAMA_ASSERT(!!kv_self.ctx);
  1189. const int64_t n_embd = hparams.n_embd;
  1190. const int64_t n_layer = hparams.n_layer;
  1191. const int64_t n_ctx = hparams.n_ctx;
  1192. const int64_t n_head = hparams.n_head;
  1193. const int64_t n_head_kv = hparams.n_head_kv;
  1194. const int64_t n_embd_head = hparams.n_embd_head();
  1195. const int64_t n_vocab = hparams.n_vocab;
  1196. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  1197. LLAMA_ASSERT(n_embd_head == hparams.n_rot);
  1198. const float freq_base = hparams.rope_freq_base;
  1199. const float freq_scale = hparams.rope_freq_scale;
  1200. const float rms_norm_eps = hparams.f_rms_norm_eps;
  1201. const int n_gpu_layers = model.n_gpu_layers;
  1202. auto & mem_per_token = lctx.mem_per_token;
  1203. auto & buf_compute = lctx.buf_compute;
  1204. struct ggml_init_params params = {
  1205. /*.mem_size =*/ buf_compute.size,
  1206. /*.mem_buffer =*/ buf_compute.addr,
  1207. /*.no_alloc =*/ false,
  1208. };
  1209. struct ggml_context * ctx0 = ggml_init(params);
  1210. ggml_cgraph gf = {};
  1211. // for big prompts, if BLAS is enabled, it is better to use only one thread
  1212. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  1213. n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
  1214. struct ggml_tensor * cur;
  1215. struct ggml_tensor * inpL;
  1216. if (tokens) {
  1217. struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
  1218. memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
  1219. ggml_set_name(inp_tokens, "inp_tokens");
  1220. inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
  1221. } else {
  1222. #ifdef GGML_USE_MPI
  1223. GGML_ASSERT(false && "not implemented");
  1224. #endif
  1225. inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
  1226. memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
  1227. }
  1228. const int i_gpu_start = n_layer - n_gpu_layers;
  1229. (void) i_gpu_start;
  1230. // offload functions set the tensor output backend to GPU
  1231. // tensors are GPU-accelerated if any input or the output has been offloaded
  1232. //
  1233. // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
  1234. // in that case ggml_cuda_assign_buffers has no effect
  1235. offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
  1236. offload_func_t offload_func_kq = llama_nop;
  1237. offload_func_t offload_func_v = llama_nop;
  1238. #ifdef GGML_USE_CUBLAS
  1239. if (n_gpu_layers > n_layer) {
  1240. offload_func_nr = ggml_cuda_assign_buffers;
  1241. }
  1242. if (n_gpu_layers > n_layer + 1) {
  1243. offload_func_v = ggml_cuda_assign_buffers;
  1244. }
  1245. if (n_gpu_layers > n_layer + 2) {
  1246. offload_func_kq = ggml_cuda_assign_buffers;
  1247. }
  1248. #endif // GGML_USE_CUBLAS
  1249. for (int il = 0; il < n_layer; ++il) {
  1250. ggml_format_name(inpL, "layer_inp_%d", il);
  1251. offload_func_t offload_func = llama_nop;
  1252. #ifdef GGML_USE_CUBLAS
  1253. if (il >= i_gpu_start) {
  1254. offload_func = ggml_cuda_assign_buffers;
  1255. }
  1256. #endif // GGML_USE_CUBLAS
  1257. struct ggml_tensor * inpSA = inpL;
  1258. lctx.use_buf(ctx0, 0);
  1259. // norm
  1260. {
  1261. cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  1262. offload_func(cur);
  1263. ggml_set_name(cur, "rms_norm_0");
  1264. // cur = cur*attention_norm(broadcasted)
  1265. cur = ggml_mul(ctx0, cur, model.layers[il].attention_norm);
  1266. offload_func(cur);
  1267. ggml_set_name(cur, "attention_norm_0");
  1268. }
  1269. // self-attention
  1270. {
  1271. // compute Q and K and RoPE them
  1272. struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  1273. offload_func_kq(tmpk);
  1274. ggml_set_name(tmpk, "tmpk");
  1275. struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  1276. offload_func_kq(tmpq);
  1277. ggml_set_name(tmpq, "tmpq");
  1278. struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
  1279. offload_func_kq(Kcur);
  1280. ggml_set_name(Kcur, "Kcur");
  1281. struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
  1282. offload_func_kq(Qcur);
  1283. ggml_set_name(Qcur, "Qcur");
  1284. // store key and value to memory
  1285. {
  1286. // compute the transposed [N, n_embd] V matrix
  1287. struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  1288. offload_func_v(tmpv);
  1289. ggml_set_name(tmpv, "tmpv");
  1290. struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, N));
  1291. offload_func_v(Vcur);
  1292. ggml_set_name(Vcur, "Vcur");
  1293. struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
  1294. offload_func_kq(k);
  1295. ggml_set_name(k, "k");
  1296. struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
  1297. ( n_ctx)*ggml_element_size(kv_self.v),
  1298. (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
  1299. offload_func_v(v);
  1300. ggml_set_name(v, "v");
  1301. // important: storing RoPE-ed version of K in the KV cache!
  1302. ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
  1303. ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
  1304. }
  1305. struct ggml_tensor * Q =
  1306. ggml_permute(ctx0,
  1307. Qcur,
  1308. 0, 2, 1, 3);
  1309. offload_func_kq(Q);
  1310. ggml_set_name(Q, "Q");
  1311. struct ggml_tensor * K =
  1312. ggml_permute(ctx0,
  1313. ggml_reshape_3d(ctx0,
  1314. ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd_gqa, il*n_ctx*ggml_element_size(kv_self.k)*n_embd_gqa),
  1315. n_embd_head, n_head_kv, n_past + N),
  1316. 0, 2, 1, 3);
  1317. offload_func_kq(K);
  1318. ggml_set_name(K, "K");
  1319. // K * Q
  1320. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  1321. offload_func_kq(KQ);
  1322. ggml_set_name(KQ, "KQ");
  1323. // KQ_scaled = KQ / sqrt(n_embd_head)
  1324. struct ggml_tensor * KQ_scale = ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head));
  1325. ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
  1326. // KQ_scaled shape [n_past + N, N, n_head, 1]
  1327. struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
  1328. offload_func_kq(KQ_scaled);
  1329. ggml_set_name(KQ_scaled, "KQ_scaled");
  1330. // KQ_masked = mask_past(KQ_scaled)
  1331. struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
  1332. offload_func_kq(KQ_masked);
  1333. ggml_set_name(KQ_masked, "KQ_masked");
  1334. // KQ = soft_max(KQ_masked)
  1335. struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
  1336. offload_func_v(KQ_soft_max);
  1337. ggml_set_name(KQ_soft_max, "KQ_soft_max");
  1338. // split cached V into n_head heads
  1339. struct ggml_tensor * V =
  1340. ggml_view_3d(ctx0, kv_self.v,
  1341. n_past + N, n_embd_head, n_head_kv,
  1342. n_ctx*ggml_element_size(kv_self.v),
  1343. n_ctx*ggml_element_size(kv_self.v)*n_embd_head,
  1344. n_ctx*ggml_element_size(kv_self.v)*n_embd_gqa*il);
  1345. offload_func_v(V);
  1346. ggml_set_name(V, "V");
  1347. #if 1
  1348. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
  1349. offload_func_v(KQV);
  1350. ggml_set_name(KQV, "KQV");
  1351. #else
  1352. // make V contiguous in memory to speed up the matmul, however we waste time on the copy
  1353. // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation
  1354. // is there a better way?
  1355. struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd_head, n_head));
  1356. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max);
  1357. #endif
  1358. // KQV_merged = KQV.permute(0, 2, 1, 3)
  1359. struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  1360. offload_func_v(KQV_merged);
  1361. ggml_set_name(KQV_merged, "KQV_merged");
  1362. // cur = KQV_merged.contiguous().view(n_embd, N)
  1363. cur = ggml_cpy(ctx0,
  1364. KQV_merged,
  1365. ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
  1366. offload_func_v(cur);
  1367. ggml_set_name(cur, "KQV_merged_contiguous");
  1368. // projection (no bias)
  1369. cur = ggml_mul_mat(ctx0,
  1370. model.layers[il].wo,
  1371. cur);
  1372. offload_func(cur);
  1373. ggml_set_name(cur, "result_wo");
  1374. }
  1375. lctx.use_buf(ctx0, 1);
  1376. struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
  1377. offload_func(inpFF);
  1378. ggml_set_name(inpFF, "inpFF");
  1379. // feed-forward network
  1380. {
  1381. // norm
  1382. {
  1383. cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps);
  1384. offload_func(cur);
  1385. ggml_set_name(cur, "rms_norm_1");
  1386. // cur = cur*ffn_norm(broadcasted)
  1387. cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm);
  1388. offload_func(cur);
  1389. ggml_set_name(cur, "ffn_norm");
  1390. }
  1391. struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
  1392. model.layers[il].w3,
  1393. cur);
  1394. offload_func(tmp);
  1395. ggml_set_name(tmp, "result_w3");
  1396. cur = ggml_mul_mat(ctx0,
  1397. model.layers[il].w1,
  1398. cur);
  1399. offload_func(cur);
  1400. ggml_set_name(cur, "result_w1");
  1401. // SILU activation
  1402. cur = ggml_silu(ctx0, cur);
  1403. offload_func(cur);
  1404. ggml_set_name(cur, "silu");
  1405. cur = ggml_mul(ctx0, cur, tmp);
  1406. offload_func(cur);
  1407. ggml_set_name(cur, "silu_x_result_w3");
  1408. cur = ggml_mul_mat(ctx0,
  1409. model.layers[il].w2,
  1410. cur);
  1411. offload_func(cur);
  1412. ggml_set_name(cur, "result_w2");
  1413. }
  1414. cur = ggml_add(ctx0, cur, inpFF);
  1415. offload_func(cur);
  1416. ggml_set_name(cur, "inpFF_+_result_w2");
  1417. // input for next layer
  1418. inpL = cur;
  1419. }
  1420. lctx.use_buf(ctx0, 0);
  1421. // used at the end to optionally extract the embeddings
  1422. struct ggml_tensor * embeddings = NULL;
  1423. // norm
  1424. {
  1425. cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  1426. offload_func_nr(cur);
  1427. ggml_set_name(cur, "rms_norm_2");
  1428. // cur = cur*norm(broadcasted)
  1429. cur = ggml_mul(ctx0, cur, model.norm);
  1430. // offload_func_nr(cur); // TODO CPU + GPU mirrored backend
  1431. ggml_set_name(cur, "result_norm");
  1432. embeddings = cur;
  1433. }
  1434. // lm_head
  1435. cur = ggml_mul_mat(ctx0, model.output, cur);
  1436. ggml_set_name(cur, "result_output");
  1437. lctx.use_buf(ctx0, -1);
  1438. // logits -> probs
  1439. //cur = ggml_soft_max_inplace(ctx0, cur);
  1440. // run the computation
  1441. ggml_build_forward_expand(&gf, cur);
  1442. #if GGML_USE_MPI
  1443. ggml_mpi_graph_compute_pre(lctx.ctx_mpi, &gf, n_layer);
  1444. #endif
  1445. #ifdef GGML_USE_METAL
  1446. if (lctx.ctx_metal && N == 1) {
  1447. ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
  1448. ggml_metal_graph_compute(lctx.ctx_metal, &gf);
  1449. ggml_metal_get_tensor (lctx.ctx_metal, cur);
  1450. } else {
  1451. // IMPORTANT:
  1452. // Since we don't have efficient Matrix x Matrix Metal multiplication yet, we fallback to vanilla
  1453. // ggml_graph_compute(). It uses Apple's Accelerate CBLAS API which takes advantage of the ANE or the AMX
  1454. // coprocessor.
  1455. //
  1456. // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch.
  1457. // But for now, we have focused only on Matrix x Vector Metal multiplication.
  1458. //
  1459. // TODO: avoid these syncs via shared memory (ref #1696)
  1460. //
  1461. if (lctx.ctx_metal) {
  1462. // We need to sync the GPU KV cache with the CPU KV cache
  1463. ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k);
  1464. ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v);
  1465. }
  1466. ggml_graph_compute_helper(lctx.work_buffer, &gf, n_threads);
  1467. }
  1468. #else
  1469. ggml_graph_compute_helper(lctx.work_buffer, &gf, n_threads);
  1470. #endif
  1471. #if GGML_USE_MPI
  1472. ggml_mpi_graph_compute_post(lctx.ctx_mpi, &gf, n_layer);
  1473. #endif
  1474. // update kv token count
  1475. lctx.kv_self.n = n_past + N;
  1476. struct ggml_tensor * res = gf.nodes[gf.n_nodes - 1];
  1477. if (cgraph_fname) {
  1478. ggml_graph_export(&gf, cgraph_fname);
  1479. }
  1480. #ifdef GGML_PERF
  1481. // print timing information per ggml operation (for debugging purposes)
  1482. // requires GGML_PERF to be defined
  1483. ggml_graph_print(&gf);
  1484. #endif
  1485. // plot the computation graph in dot format (for debugging purposes)
  1486. //if (n_past%100 == 0) {
  1487. // ggml_graph_dump_dot(&gf, NULL, "llama.dot");
  1488. //}
  1489. // extract logits
  1490. {
  1491. auto & logits_out = lctx.logits;
  1492. if (lctx.logits_all) {
  1493. logits_out.resize(n_vocab * N);
  1494. memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*N);
  1495. } else {
  1496. // return result for just the last token
  1497. logits_out.resize(n_vocab);
  1498. memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
  1499. }
  1500. }
  1501. // extract embeddings
  1502. if (!lctx.embedding.empty()) {
  1503. auto & embedding_out = lctx.embedding;
  1504. embedding_out.resize(n_embd);
  1505. memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
  1506. }
  1507. if (mem_per_token == 0) {
  1508. mem_per_token = ggml_used_mem(ctx0)/N;
  1509. }
  1510. #if 0
  1511. printf("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
  1512. ggml_used_mem(ctx0)/1024.0/1024.0,
  1513. lctx.get_buf_max_mem(0)/1024.0/1024.0,
  1514. lctx.get_buf_max_mem(1)/1024.0/1024.0,
  1515. lctx.work_buffer.size()/1024.0/1024.0,
  1516. n_past, N);
  1517. #endif
  1518. ggml_free(ctx0);
  1519. // measure the performance only for the single-token evals
  1520. if (N == 1) {
  1521. lctx.t_eval_us += ggml_time_us() - t_start_us;
  1522. lctx.n_eval++;
  1523. }
  1524. else if (N > 1) {
  1525. lctx.t_p_eval_us += ggml_time_us() - t_start_us;
  1526. lctx.n_p_eval += N;
  1527. }
  1528. return true;
  1529. }
  1530. //
  1531. // tokenizer
  1532. //
  1533. static size_t utf8_len(char src) {
  1534. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  1535. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  1536. return lookup[highbits];
  1537. }
  1538. struct llama_sp_symbol {
  1539. using index = int;
  1540. index prev;
  1541. index next;
  1542. const char * text;
  1543. size_t n;
  1544. };
  1545. static_assert(std::is_trivially_copyable<llama_sp_symbol>::value, "llama_sp_symbol is not trivially copyable");
  1546. struct llama_sp_bigram {
  1547. struct comparator {
  1548. bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) {
  1549. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  1550. }
  1551. };
  1552. using queue_storage = std::vector<llama_sp_bigram>;
  1553. using queue = std::priority_queue<llama_sp_bigram, queue_storage, comparator>;
  1554. llama_sp_symbol::index left;
  1555. llama_sp_symbol::index right;
  1556. float score;
  1557. size_t size;
  1558. };
  1559. // original implementation:
  1560. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  1561. struct llama_tokenizer {
  1562. llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {}
  1563. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  1564. // split string into utf8 chars
  1565. int index = 0;
  1566. size_t offs = 0;
  1567. while (offs < text.size()) {
  1568. llama_sp_symbol sym;
  1569. size_t char_len = std::min(text.size() - offs, utf8_len(text[offs]));
  1570. sym.text = text.c_str() + offs;
  1571. sym.n = char_len;
  1572. offs += char_len;
  1573. sym.prev = index - 1;
  1574. sym.next = offs == text.size() ? -1 : index + 1;
  1575. index++;
  1576. symbols_.emplace_back(sym);
  1577. }
  1578. // seed the work queue with all possible 2-character tokens.
  1579. for (size_t i = 1; i < symbols_.size(); ++i) {
  1580. try_add_bigram(i - 1, i);
  1581. }
  1582. // keep substituting the highest frequency pairs for as long as we can.
  1583. while (!work_queue_.empty()) {
  1584. auto bigram = work_queue_.top();
  1585. work_queue_.pop();
  1586. auto & left_sym = symbols_[bigram.left];
  1587. auto & right_sym = symbols_[bigram.right];
  1588. // if one of the symbols already got merged, skip it.
  1589. if (left_sym.n == 0 || right_sym.n == 0 ||
  1590. left_sym.n + right_sym.n != bigram.size) {
  1591. continue;
  1592. }
  1593. // merge the right sym into the left one
  1594. left_sym.n += right_sym.n;
  1595. right_sym.n = 0;
  1596. //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  1597. // remove the right sym from the chain
  1598. left_sym.next = right_sym.next;
  1599. if (right_sym.next >= 0) {
  1600. symbols_[right_sym.next].prev = bigram.left;
  1601. }
  1602. // find more substitutions
  1603. try_add_bigram(left_sym.prev, bigram.left);
  1604. try_add_bigram(bigram.left, left_sym.next);
  1605. }
  1606. for (int i = 0; i != -1; i = symbols_[i].next) {
  1607. auto & symbol = symbols_[i];
  1608. auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n));
  1609. if (token == vocab_.token_to_id.end()) {
  1610. // output any symbols that did not form tokens as bytes.
  1611. for (int j = 0; j < (int) symbol.n; ++j) {
  1612. llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
  1613. output.push_back(token_id);
  1614. }
  1615. } else {
  1616. output.push_back((*token).second);
  1617. }
  1618. }
  1619. }
  1620. private:
  1621. void try_add_bigram(int left, int right) {
  1622. if (left == -1 || right == -1) {
  1623. return;
  1624. }
  1625. const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n);
  1626. auto token = vocab_.token_to_id.find(text);
  1627. if (token == vocab_.token_to_id.end()) {
  1628. return;
  1629. }
  1630. if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) {
  1631. return;
  1632. }
  1633. const auto &tok_score = vocab_.id_to_token[(*token).second];
  1634. llama_sp_bigram bigram;
  1635. bigram.left = left;
  1636. bigram.right = right;
  1637. bigram.score = tok_score.score;
  1638. bigram.size = text.size();
  1639. work_queue_.push(bigram);
  1640. }
  1641. const llama_vocab & vocab_;
  1642. std::vector<llama_sp_symbol> symbols_;
  1643. llama_sp_bigram::queue work_queue_;
  1644. };
  1645. static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) {
  1646. llama_tokenizer tokenizer(vocab);
  1647. std::vector<llama_vocab::id> output;
  1648. if (text.empty()) {
  1649. return output;
  1650. }
  1651. if (bos) {
  1652. output.push_back(llama_token_bos());
  1653. }
  1654. tokenizer.tokenize(text, output);
  1655. return output;
  1656. }
  1657. //
  1658. // grammar - internal
  1659. //
  1660. struct llama_grammar {
  1661. const std::vector<std::vector<llama_grammar_element>> rules;
  1662. std::vector<std::vector<const llama_grammar_element *>> stacks;
  1663. };
  1664. struct llama_grammar_candidate {
  1665. size_t index;
  1666. const uint32_t * code_points;
  1667. };
  1668. // NOTE: assumes valid utf8 (but checks for overrun)
  1669. // adds a terminating 0 for use as pointer
  1670. std::vector<uint32_t> decode_utf8(const char * src) {
  1671. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  1672. const char * pos = src;
  1673. std::vector<uint32_t> code_points;
  1674. while (*pos != 0) {
  1675. uint8_t first_byte = static_cast<uint8_t>(*pos);
  1676. uint8_t highbits = first_byte >> 4;
  1677. int len = lookup[highbits];
  1678. uint8_t mask = (1 << (8 - len)) - 1;
  1679. uint32_t value = first_byte & mask;
  1680. const char * end = pos + len; // may overrun!
  1681. ++pos;
  1682. for ( ; pos < end && *pos != 0; ++pos) {
  1683. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  1684. }
  1685. code_points.push_back(value);
  1686. }
  1687. code_points.push_back(0);
  1688. return code_points;
  1689. }
  1690. // returns true iff pos points to the end of one of the definitions of a rule
  1691. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  1692. switch (pos->type) {
  1693. case LLAMA_GRETYPE_END: return true;
  1694. case LLAMA_GRETYPE_ALT: return true;
  1695. default: return false;
  1696. }
  1697. }
  1698. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  1699. // asserts that pos is pointing to a char range element
  1700. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  1701. const llama_grammar_element * pos,
  1702. const uint32_t chr) {
  1703. bool found = false;
  1704. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  1705. LLAMA_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  1706. do {
  1707. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  1708. // inclusive range, e.g. [a-z]
  1709. found = found || (pos->value <= chr && chr <= pos[1].value);
  1710. pos += 2;
  1711. } else {
  1712. // exact char match, e.g. [a] or "a"
  1713. found = found || pos->value == chr;
  1714. pos += 1;
  1715. }
  1716. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  1717. return std::make_pair(found == is_positive_char, pos);
  1718. }
  1719. // transforms a grammar pushdown stack into N possible stacks, all ending
  1720. // at a character range (terminal element)
  1721. static void llama_grammar_advance_stack(
  1722. const std::vector<std::vector<llama_grammar_element>> & rules,
  1723. const std::vector<const llama_grammar_element *> & stack,
  1724. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  1725. if (stack.empty()) {
  1726. new_stacks.push_back(stack);
  1727. return;
  1728. }
  1729. const llama_grammar_element * pos = stack.back();
  1730. switch (pos->type) {
  1731. case LLAMA_GRETYPE_RULE_REF: {
  1732. const size_t rule_id = static_cast<size_t>(pos->value);
  1733. const llama_grammar_element * subpos = rules[rule_id].data();
  1734. do {
  1735. // init new stack without the top (pos)
  1736. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  1737. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  1738. // if this rule ref is followed by another element, add that to stack
  1739. new_stack.push_back(pos + 1);
  1740. }
  1741. if (!llama_grammar_is_end_of_sequence(subpos)) {
  1742. // if alternate is nonempty, add to stack
  1743. new_stack.push_back(subpos);
  1744. }
  1745. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  1746. while (!llama_grammar_is_end_of_sequence(subpos)) {
  1747. // scan to end of alternate def
  1748. subpos++;
  1749. }
  1750. if (subpos->type == LLAMA_GRETYPE_ALT) {
  1751. // there's another alternate def of this rule to process
  1752. subpos++;
  1753. } else {
  1754. break;
  1755. }
  1756. } while (true);
  1757. break;
  1758. }
  1759. case LLAMA_GRETYPE_CHAR:
  1760. case LLAMA_GRETYPE_CHAR_NOT:
  1761. new_stacks.push_back(stack);
  1762. break;
  1763. default:
  1764. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  1765. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  1766. // those
  1767. LLAMA_ASSERT(false);
  1768. }
  1769. }
  1770. // takes a set of possible pushdown stacks on a grammar, which are required to
  1771. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  1772. // produces the N possible stacks if the given char is accepted at those
  1773. // positions
  1774. static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
  1775. const std::vector<std::vector<llama_grammar_element>> & rules,
  1776. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  1777. const uint32_t chr) {
  1778. std::vector<std::vector<const llama_grammar_element *>> new_stacks;
  1779. for (const auto & stack : stacks) {
  1780. if (stack.empty()) {
  1781. continue;
  1782. }
  1783. auto match = llama_grammar_match_char(stack.back(), chr);
  1784. if (match.first) {
  1785. const llama_grammar_element * pos = match.second;
  1786. // update top of stack to next element, if any
  1787. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  1788. if (!llama_grammar_is_end_of_sequence(pos)) {
  1789. new_stack.push_back(pos);
  1790. }
  1791. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  1792. }
  1793. }
  1794. return new_stacks;
  1795. }
  1796. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  1797. const std::vector<std::vector<llama_grammar_element>> & rules,
  1798. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  1799. const std::vector<llama_grammar_candidate> & candidates);
  1800. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  1801. const std::vector<std::vector<llama_grammar_element>> & rules,
  1802. const std::vector<const llama_grammar_element *> & stack,
  1803. const std::vector<llama_grammar_candidate> & candidates) {
  1804. std::vector<llama_grammar_candidate> rejects;
  1805. if (stack.empty()) {
  1806. // accept nothing; EOS is handled elsewhere
  1807. rejects.insert(rejects.end(), candidates.begin(), candidates.end());
  1808. return rejects;
  1809. }
  1810. const llama_grammar_element * stack_pos = stack.back();
  1811. std::vector<llama_grammar_candidate> next_candidates;
  1812. for (auto tok : candidates) {
  1813. if (llama_grammar_match_char(stack_pos, tok.code_points[0]).first) {
  1814. if (tok.code_points[1] != 0) {
  1815. next_candidates.push_back({ tok.index, tok.code_points + 1 });
  1816. }
  1817. } else {
  1818. rejects.push_back(tok);
  1819. }
  1820. }
  1821. auto stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  1822. // update top of stack to next element, if any
  1823. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  1824. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  1825. stack_after.push_back(stack_pos_after);
  1826. }
  1827. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  1828. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  1829. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  1830. for (auto tok : next_rejects) {
  1831. rejects.push_back({ tok.index, tok.code_points - 1 });
  1832. }
  1833. return rejects;
  1834. }
  1835. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  1836. const std::vector<std::vector<llama_grammar_element>> & rules,
  1837. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  1838. const std::vector<llama_grammar_candidate> & candidates) {
  1839. LLAMA_ASSERT(!stacks.empty()); // REVIEW
  1840. if (candidates.empty()) {
  1841. return std::vector<llama_grammar_candidate>();
  1842. }
  1843. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  1844. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  1845. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  1846. }
  1847. return rejects;
  1848. }
  1849. //
  1850. // grammar - external
  1851. //
  1852. struct llama_grammar * llama_grammar_init(
  1853. const llama_grammar_element ** rules,
  1854. size_t n_rules,
  1855. size_t start_rule_index) {
  1856. const llama_grammar_element * pos;
  1857. // copy rule definitions into vectors
  1858. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  1859. for (size_t i = 0; i < n_rules; i++) {
  1860. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  1861. vec_rules[i].push_back(*pos);
  1862. }
  1863. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  1864. }
  1865. // loop over alternates of start rule to build initial stacks
  1866. std::vector<std::vector<const llama_grammar_element *>> stacks;
  1867. pos = rules[start_rule_index];
  1868. do {
  1869. std::vector<const llama_grammar_element *> stack;
  1870. if (!llama_grammar_is_end_of_sequence(pos)) {
  1871. // if alternate is nonempty, add to stack
  1872. stack.push_back(pos);
  1873. }
  1874. llama_grammar_advance_stack(vec_rules, stack, stacks);
  1875. while (!llama_grammar_is_end_of_sequence(pos)) {
  1876. // scan to end of alternate def
  1877. pos++;
  1878. }
  1879. if (pos->type == LLAMA_GRETYPE_ALT) {
  1880. // there's another alternate def of this rule to process
  1881. pos++;
  1882. } else {
  1883. break;
  1884. }
  1885. } while (true);
  1886. return new llama_grammar{ std::move(vec_rules), std::move(stacks) };
  1887. }
  1888. void llama_grammar_free(struct llama_grammar * grammar) {
  1889. delete grammar;
  1890. }
  1891. //
  1892. // sampling
  1893. //
  1894. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  1895. assert(candidates->size > 0);
  1896. const int64_t t_start_sample_us = ggml_time_us();
  1897. // Sort the logits in descending order
  1898. if (!candidates->sorted) {
  1899. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  1900. return a.logit > b.logit;
  1901. });
  1902. candidates->sorted = true;
  1903. }
  1904. float max_l = candidates->data[0].logit;
  1905. float cum_sum = 0.0f;
  1906. for (size_t i = 0; i < candidates->size; ++i) {
  1907. float p = expf(candidates->data[i].logit - max_l);
  1908. candidates->data[i].p = p;
  1909. cum_sum += p;
  1910. }
  1911. for (size_t i = 0; i < candidates->size; ++i) {
  1912. candidates->data[i].p /= cum_sum;
  1913. }
  1914. if (ctx) {
  1915. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1916. }
  1917. }
  1918. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep) {
  1919. const int64_t t_start_sample_us = ggml_time_us();
  1920. k = std::max(k, (int) min_keep);
  1921. k = std::min(k, (int) candidates->size);
  1922. // Sort scores in descending order
  1923. if (!candidates->sorted) {
  1924. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  1925. return a.logit > b.logit;
  1926. };
  1927. if (k == (int) candidates->size) {
  1928. std::sort(candidates->data, candidates->data + candidates->size, comp);
  1929. } else {
  1930. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  1931. }
  1932. candidates->sorted = true;
  1933. }
  1934. candidates->size = k;
  1935. if (ctx) {
  1936. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1937. }
  1938. }
  1939. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  1940. if (p >= 1.0f) {
  1941. return;
  1942. }
  1943. llama_sample_softmax(ctx, candidates);
  1944. const int64_t t_start_sample_us = ggml_time_us();
  1945. // Compute the cumulative probabilities
  1946. float cum_sum = 0.0f;
  1947. size_t last_idx = candidates->size;
  1948. for (size_t i = 0; i < candidates->size; ++i) {
  1949. cum_sum += candidates->data[i].p;
  1950. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  1951. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  1952. if (cum_sum >= p && i + 1 >= min_keep) {
  1953. last_idx = i + 1;
  1954. break;
  1955. }
  1956. }
  1957. // Resize the output vector to keep only the top-p tokens
  1958. candidates->size = last_idx;
  1959. if (ctx) {
  1960. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1961. }
  1962. }
  1963. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  1964. if (z >= 1.0f || candidates->size <= 2) {
  1965. return;
  1966. }
  1967. llama_sample_softmax(nullptr, candidates);
  1968. const int64_t t_start_sample_us = ggml_time_us();
  1969. // Compute the first and second derivatives
  1970. std::vector<float> first_derivatives(candidates->size - 1);
  1971. std::vector<float> second_derivatives(candidates->size - 2);
  1972. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  1973. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  1974. }
  1975. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  1976. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  1977. }
  1978. // Calculate absolute value of second derivatives
  1979. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  1980. second_derivatives[i] = abs(second_derivatives[i]);
  1981. }
  1982. // Normalize the second derivatives
  1983. {
  1984. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  1985. if (second_derivatives_sum > 1e-6f) {
  1986. for (float & value : second_derivatives) {
  1987. value /= second_derivatives_sum;
  1988. }
  1989. } else {
  1990. for (float & value : second_derivatives) {
  1991. value = 1.0f / second_derivatives.size();
  1992. }
  1993. }
  1994. }
  1995. float cum_sum = 0.0f;
  1996. size_t last_idx = candidates->size;
  1997. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  1998. cum_sum += second_derivatives[i];
  1999. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  2000. if (cum_sum > z && i >= min_keep) {
  2001. last_idx = i;
  2002. break;
  2003. }
  2004. }
  2005. // Resize the output vector to keep only the tokens above the tail location
  2006. candidates->size = last_idx;
  2007. if (ctx) {
  2008. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2009. }
  2010. }
  2011. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  2012. // Reference implementation:
  2013. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  2014. if (p >= 1.0f) {
  2015. return;
  2016. }
  2017. // Compute the softmax of logits and calculate entropy
  2018. llama_sample_softmax(nullptr, candidates);
  2019. const int64_t t_start_sample_us = ggml_time_us();
  2020. float entropy = 0.0f;
  2021. for (size_t i = 0; i < candidates->size; ++i) {
  2022. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  2023. }
  2024. // Compute the absolute difference between negative log probability and entropy for each candidate
  2025. std::vector<float> shifted_scores;
  2026. for (size_t i = 0; i < candidates->size; ++i) {
  2027. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  2028. shifted_scores.push_back(shifted_score);
  2029. }
  2030. // Sort tokens based on the shifted_scores and their corresponding indices
  2031. std::vector<size_t> indices(candidates->size);
  2032. std::iota(indices.begin(), indices.end(), 0);
  2033. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  2034. return shifted_scores[a] < shifted_scores[b];
  2035. });
  2036. // Compute the cumulative probabilities
  2037. float cum_sum = 0.0f;
  2038. size_t last_idx = indices.size();
  2039. for (size_t i = 0; i < indices.size(); ++i) {
  2040. size_t idx = indices[i];
  2041. cum_sum += candidates->data[idx].p;
  2042. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  2043. if (cum_sum > p && i >= min_keep - 1) {
  2044. last_idx = i + 1;
  2045. break;
  2046. }
  2047. }
  2048. // Resize the output vector to keep only the locally typical tokens
  2049. std::vector<llama_token_data> new_candidates;
  2050. for (size_t i = 0; i < last_idx; ++i) {
  2051. size_t idx = indices[i];
  2052. new_candidates.push_back(candidates->data[idx]);
  2053. }
  2054. // Replace the data in candidates with the new_candidates data
  2055. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  2056. candidates->size = new_candidates.size();
  2057. if (ctx) {
  2058. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2059. }
  2060. }
  2061. void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  2062. const int64_t t_start_sample_us = ggml_time_us();
  2063. for (size_t i = 0; i < candidates_p->size; ++i) {
  2064. candidates_p->data[i].logit /= temp;
  2065. }
  2066. if (ctx) {
  2067. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2068. }
  2069. }
  2070. void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty) {
  2071. if (last_tokens_size == 0 || penalty == 1.0f) {
  2072. return;
  2073. }
  2074. const int64_t t_start_sample_us = ggml_time_us();
  2075. for (size_t i = 0; i < candidates->size; ++i) {
  2076. const auto * token_iter = std::find(last_tokens, last_tokens + last_tokens_size, candidates->data[i].id);
  2077. if (token_iter == last_tokens + last_tokens_size) {
  2078. continue;
  2079. }
  2080. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  2081. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  2082. if (candidates->data[i].logit <= 0) {
  2083. candidates->data[i].logit *= penalty;
  2084. } else {
  2085. candidates->data[i].logit /= penalty;
  2086. }
  2087. }
  2088. candidates->sorted = false;
  2089. if (ctx) {
  2090. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2091. }
  2092. }
  2093. void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) {
  2094. if (last_tokens_size == 0 || (alpha_frequency == 0.0f && alpha_presence == 0.0f)) {
  2095. return;
  2096. }
  2097. const int64_t t_start_sample_us = ggml_time_us();
  2098. // Create a frequency map to count occurrences of each token in last_tokens
  2099. std::unordered_map<llama_token, int> token_count;
  2100. for (size_t i = 0; i < last_tokens_size; ++i) {
  2101. token_count[last_tokens_p[i]]++;
  2102. }
  2103. // Apply frequency and presence penalties to the candidates
  2104. for (size_t i = 0; i < candidates->size; ++i) {
  2105. auto token_iter = token_count.find(candidates->data[i].id);
  2106. if (token_iter == token_count.end()) {
  2107. continue;
  2108. }
  2109. int count = token_iter->second;
  2110. candidates->data[i].logit -= float(count) * alpha_frequency + float(count > 0) * alpha_presence;
  2111. }
  2112. candidates->sorted = false;
  2113. if (ctx) {
  2114. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2115. }
  2116. }
  2117. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  2118. assert(ctx);
  2119. const int64_t t_start_sample_us = ggml_time_us();
  2120. bool allow_eos = false;
  2121. for (const auto & stack : grammar->stacks) {
  2122. if (stack.empty()) {
  2123. allow_eos = true;
  2124. break;
  2125. }
  2126. }
  2127. const llama_token eos = llama_token_eos();
  2128. std::vector<std::vector<uint32_t>> candidates_decoded;
  2129. std::vector<llama_grammar_candidate> candidates_grammar;
  2130. for (size_t i = 0; i < candidates->size; ++i) {
  2131. const llama_token id = candidates->data[i].id;
  2132. const char * str = llama_token_to_str(ctx, id);
  2133. if (id == eos) {
  2134. if (!allow_eos) {
  2135. candidates->data[i].logit = -INFINITY;
  2136. }
  2137. } else if (*str == 0) {
  2138. candidates->data[i].logit = -INFINITY;
  2139. } else {
  2140. candidates_decoded.push_back(decode_utf8(str));
  2141. candidates_grammar.push_back({ i, candidates_decoded.back().data() });
  2142. }
  2143. }
  2144. const auto rejects =
  2145. llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  2146. for (auto & reject : rejects) {
  2147. candidates->data[reject.index].logit = -INFINITY;
  2148. }
  2149. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2150. }
  2151. static void llama_log_softmax(float * array, size_t size) {
  2152. float max_l = *std::max_element(array, array + size);
  2153. float sum = 0.f;
  2154. for (size_t i = 0; i < size; ++i) {
  2155. float p = expf(array[i] - max_l);
  2156. sum += p;
  2157. array[i] = p;
  2158. }
  2159. for (size_t i = 0; i < size; ++i) {
  2160. array[i] = logf(array[i] / sum);
  2161. }
  2162. }
  2163. void llama_sample_classifier_free_guidance(
  2164. struct llama_context * ctx,
  2165. llama_token_data_array * candidates,
  2166. struct llama_context * guidance_ctx,
  2167. float scale) {
  2168. int64_t t_start_sample_us = ggml_time_us();
  2169. assert(ctx);
  2170. auto n_vocab = llama_n_vocab(ctx);
  2171. assert(n_vocab == (int)candidates->size);
  2172. assert(!candidates->sorted);
  2173. std::vector<float> logits_base;
  2174. logits_base.reserve(candidates->size);
  2175. for (size_t i = 0; i < candidates->size; ++i) {
  2176. logits_base.push_back(candidates->data[i].logit);
  2177. }
  2178. llama_log_softmax(logits_base.data(), candidates->size);
  2179. float* logits_guidance = llama_get_logits(guidance_ctx);
  2180. llama_log_softmax(logits_guidance, n_vocab);
  2181. for (int i = 0; i < n_vocab; ++i) {
  2182. float logit_guidance = logits_guidance[i];
  2183. float logit_base = logits_base[i];
  2184. candidates->data[i].logit = scale * (logit_base - logit_guidance) + logit_guidance;
  2185. }
  2186. if (ctx) {
  2187. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2188. }
  2189. }
  2190. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) {
  2191. assert(ctx);
  2192. auto N = float(llama_n_vocab(ctx));
  2193. int64_t t_start_sample_us;
  2194. t_start_sample_us = ggml_time_us();
  2195. llama_sample_softmax(nullptr, candidates);
  2196. // Estimate s_hat using the most probable m tokens
  2197. float s_hat = 0.0;
  2198. float sum_ti_bi = 0.0;
  2199. float sum_ti_sq = 0.0;
  2200. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  2201. float t_i = logf(float(i + 2) / float(i + 1));
  2202. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  2203. sum_ti_bi += t_i * b_i;
  2204. sum_ti_sq += t_i * t_i;
  2205. }
  2206. s_hat = sum_ti_bi / sum_ti_sq;
  2207. // Compute k from the estimated s_hat and target surprise value
  2208. float epsilon_hat = s_hat - 1;
  2209. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  2210. // Sample the next word X using top-k sampling
  2211. llama_sample_top_k(nullptr, candidates, int(k), 1);
  2212. if (ctx) {
  2213. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2214. }
  2215. llama_token X = llama_sample_token(ctx, candidates);
  2216. t_start_sample_us = ggml_time_us();
  2217. // Compute error as the difference between observed surprise and target surprise value
  2218. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  2219. return candidate.id == X;
  2220. }));
  2221. float observed_surprise = -log2f(candidates->data[X_idx].p);
  2222. float e = observed_surprise - tau;
  2223. // Update mu using the learning rate and error
  2224. *mu = *mu - eta * e;
  2225. if (ctx) {
  2226. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2227. }
  2228. return X;
  2229. }
  2230. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  2231. int64_t t_start_sample_us;
  2232. t_start_sample_us = ggml_time_us();
  2233. llama_sample_softmax(ctx, candidates);
  2234. // Truncate the words with surprise values greater than mu
  2235. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  2236. return -log2f(candidate.p) > *mu;
  2237. }));
  2238. if (candidates->size == 0) {
  2239. candidates->size = 1;
  2240. }
  2241. if (ctx) {
  2242. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2243. }
  2244. // Normalize the probabilities of the remaining words
  2245. llama_sample_softmax(ctx, candidates);
  2246. // Sample the next word X from the remaining words
  2247. llama_token X = llama_sample_token(ctx, candidates);
  2248. t_start_sample_us = ggml_time_us();
  2249. // Compute error as the difference between observed surprise and target surprise value
  2250. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  2251. return candidate.id == X;
  2252. }));
  2253. float observed_surprise = -log2f(candidates->data[X_idx].p);
  2254. float e = observed_surprise - tau;
  2255. // Update mu using the learning rate and error
  2256. *mu = *mu - eta * e;
  2257. if (ctx) {
  2258. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2259. }
  2260. return X;
  2261. }
  2262. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  2263. const int64_t t_start_sample_us = ggml_time_us();
  2264. // Find max element
  2265. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  2266. return a.logit < b.logit;
  2267. });
  2268. llama_token result = max_iter->id;
  2269. if (ctx) {
  2270. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2271. ctx->n_sample++;
  2272. }
  2273. return result;
  2274. }
  2275. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  2276. assert(ctx);
  2277. const int64_t t_start_sample_us = ggml_time_us();
  2278. llama_sample_softmax(nullptr, candidates);
  2279. std::vector<float> probs;
  2280. probs.reserve(candidates->size);
  2281. for (size_t i = 0; i < candidates->size; ++i) {
  2282. probs.push_back(candidates->data[i].p);
  2283. }
  2284. std::discrete_distribution<> dist(probs.begin(), probs.end());
  2285. auto & rng = ctx->rng;
  2286. int idx = dist(rng);
  2287. llama_token result = candidates->data[idx].id;
  2288. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2289. ctx->n_sample++;
  2290. return result;
  2291. }
  2292. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  2293. const int64_t t_start_sample_us = ggml_time_us();
  2294. if (token == llama_token_eos()) {
  2295. for (const auto & stack : grammar->stacks) {
  2296. if (stack.empty()) {
  2297. return;
  2298. }
  2299. }
  2300. LLAMA_ASSERT(false);
  2301. }
  2302. const char * str = llama_token_to_str(ctx, token);
  2303. // Note terminating 0 in decoded string
  2304. auto code_points = decode_utf8(str);
  2305. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  2306. grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
  2307. }
  2308. LLAMA_ASSERT(!grammar->stacks.empty());
  2309. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2310. }
  2311. //
  2312. // quantization
  2313. //
  2314. static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llama_buffer & output, const int nelements, const int nthread) {
  2315. if (output.size < nelements * sizeof(float)) {
  2316. output.resize(nelements * sizeof(float));
  2317. }
  2318. float * f32_output = (float *) output.addr;
  2319. ggml_type_traits_t qtype;
  2320. if (ggml_is_quantized(tensor.type)) {
  2321. qtype = ggml_internal_get_type_traits(tensor.type);
  2322. if (qtype.to_float == NULL) {
  2323. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type)));
  2324. }
  2325. } else if (tensor.type != GGML_TYPE_F16) {
  2326. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor.type)));
  2327. }
  2328. if (nthread < 2) {
  2329. if (tensor.type == GGML_TYPE_F16) {
  2330. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements);
  2331. } else if (ggml_is_quantized(tensor.type)) {
  2332. qtype.to_float(tensor.data, f32_output, nelements);
  2333. } else {
  2334. LLAMA_ASSERT(false); // unreachable
  2335. }
  2336. return;
  2337. }
  2338. auto block_size = tensor.type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor.type);
  2339. auto block_size_bytes = ggml_type_size(tensor.type);
  2340. LLAMA_ASSERT(nelements % block_size == 0);
  2341. auto nblocks = nelements / block_size;
  2342. auto blocks_per_thread = nblocks / nthread;
  2343. auto spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  2344. std::vector<std::thread> workers;
  2345. for (auto tnum = 0, in_buff_offs = 0, out_buff_offs = 0; tnum < nthread; tnum++) {
  2346. auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  2347. auto thr_elems = thr_blocks * block_size; // number of elements for this thread
  2348. auto thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  2349. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  2350. if (typ == GGML_TYPE_F16) {
  2351. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  2352. } else {
  2353. qtype.to_float(inbuf, outbuf, nels);
  2354. }
  2355. };
  2356. workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems));
  2357. in_buff_offs += thr_block_bytes;
  2358. out_buff_offs += thr_elems;
  2359. }
  2360. for (auto & worker : workers) {
  2361. worker.join();
  2362. }
  2363. }
  2364. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  2365. ggml_type quantized_type;
  2366. llama_ftype ftype = params->ftype;
  2367. int nthread = params->nthread;
  2368. switch (params->ftype) {
  2369. case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
  2370. case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
  2371. case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
  2372. case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
  2373. case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
  2374. case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
  2375. case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
  2376. #ifdef GGML_USE_K_QUANTS
  2377. // K-quants
  2378. case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
  2379. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  2380. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  2381. case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
  2382. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  2383. case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
  2384. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  2385. case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
  2386. case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
  2387. #endif
  2388. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  2389. }
  2390. if (nthread <= 0) {
  2391. nthread = std::thread::hardware_concurrency();
  2392. }
  2393. std::unique_ptr<llama_model_loader> model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false));
  2394. llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loader.get(), params->ftype);
  2395. #ifdef GGML_USE_K_QUANTS
  2396. int n_attention_wv = 0;
  2397. int n_feed_forward_w2 = 0;
  2398. for (auto& tensor : model_loader->tensors_map.tensors) {
  2399. if (tensor.name.find("attention.wv.weight") != std::string::npos) {
  2400. ++n_attention_wv;
  2401. }
  2402. else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
  2403. ++n_feed_forward_w2;
  2404. }
  2405. }
  2406. int i_attention_wv = 0;
  2407. int i_feed_forward_w2 = 0;
  2408. #endif
  2409. size_t total_size_org = 0;
  2410. size_t total_size_new = 0;
  2411. std::vector<int64_t> hist_all(1 << 4, 0);
  2412. std::vector<std::thread> workers;
  2413. std::mutex mutex;
  2414. auto use_more_bits = [] (int i_layer, int num_layers) -> bool {
  2415. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  2416. };
  2417. size_t idx = 0;
  2418. for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) {
  2419. llama_buffer read_data;
  2420. read_data.resize(tensor.size);
  2421. tensor.data = read_data.addr;
  2422. model_loader->load_data_for(tensor);
  2423. printf("[%4zu/%4zu] %36s - %16s, type = %6s, ",
  2424. ++idx, model_loader->tensors_map.tensors.size(),
  2425. tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
  2426. ggml_type_name(tensor.type));
  2427. // This used to be a regex, but <regex> has an extreme cost to compile times.
  2428. bool quantize = tensor.name.rfind("weight") == tensor.name.size() - 6; // ends with 'weight'?
  2429. // quantize only 2D tensors
  2430. quantize &= (tensor.ne.size() == 2);
  2431. quantize &= params->quantize_output_tensor || tensor.name != "output.weight";
  2432. quantize &= quantized_type != tensor.type;
  2433. enum ggml_type new_type;
  2434. void * new_data;
  2435. size_t new_size;
  2436. llama_buffer work;
  2437. if (!quantize) {
  2438. new_type = tensor.type;
  2439. new_data = tensor.data;
  2440. new_size = tensor.size;
  2441. printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
  2442. } else {
  2443. new_type = quantized_type;
  2444. #ifdef GGML_USE_K_QUANTS
  2445. if (tensor.name == "output.weight") {
  2446. int nx = tensor.ne.at(0);
  2447. int ny = tensor.ne.at(1);
  2448. if (nx % QK_K == 0 && ny % QK_K == 0) {
  2449. new_type = GGML_TYPE_Q6_K;
  2450. }
  2451. } else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
  2452. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2453. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2454. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  2455. use_more_bits(i_attention_wv, n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  2456. else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
  2457. (i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
  2458. ++i_attention_wv;
  2459. } else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
  2460. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2461. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2462. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  2463. use_more_bits(i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
  2464. //else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && i_feed_forward_w2 < n_feed_forward_w2/8) new_type = GGML_TYPE_Q6_K;
  2465. ++i_feed_forward_w2;
  2466. } else if (tensor.name.find("attention.wo.weight") != std::string::npos) {
  2467. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2468. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2469. }
  2470. bool convert_incompatible_tensor = false;
  2471. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  2472. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) {
  2473. int nx = tensor.ne.at(0);
  2474. int ny = tensor.ne.at(1);
  2475. if (nx % QK_K != 0 || ny % QK_K != 0) {
  2476. fprintf(stderr, "\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
  2477. convert_incompatible_tensor = true;
  2478. }
  2479. }
  2480. if (convert_incompatible_tensor) {
  2481. if (tensor.name == "output.weight") {
  2482. new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
  2483. fprintf(stderr, "F16 will be used for this tensor instead.\n");
  2484. } else if (tensor.name == "tok_embeddings.weight") {
  2485. new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
  2486. fprintf(stderr, "Q4_0 will be used for this tensor instead.\n");
  2487. } else {
  2488. throw std::runtime_error("Unsupported tensor size encountered\n");
  2489. }
  2490. }
  2491. #endif
  2492. float * f32_data;
  2493. size_t nelements = tensor.ne.at(0) * tensor.ne.at(1);
  2494. llama_buffer f32_conv_buf;
  2495. if (tensor.type == GGML_TYPE_F32) {
  2496. f32_data = (float *) tensor.data;
  2497. } else if (ggml_is_quantized(tensor.type) && !params->allow_requantize) {
  2498. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor.type)));
  2499. } else {
  2500. llama_convert_tensor_internal(tensor, f32_conv_buf, nelements, nthread);
  2501. f32_data = (float *) f32_conv_buf.addr;
  2502. }
  2503. printf("quantizing to %s .. ", ggml_type_name(new_type));
  2504. fflush(stdout);
  2505. work.resize(nelements * 4); // upper bound on size
  2506. new_data = work.addr;
  2507. std::vector<int64_t> hist_cur(1 << 4, 0);
  2508. int chunk_size = 32 * 512;
  2509. const int nchunk = (nelements + chunk_size - 1)/chunk_size;
  2510. const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
  2511. if (nthread_use < 2) {
  2512. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data());
  2513. } else {
  2514. size_t counter = 0;
  2515. new_size = 0;
  2516. auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () {
  2517. std::vector<int64_t> local_hist;
  2518. size_t local_size = 0;
  2519. while (true) {
  2520. std::unique_lock<std::mutex> lock(mutex);
  2521. size_t first = counter; counter += chunk_size;
  2522. if (first >= nelements) {
  2523. if (!local_hist.empty()) {
  2524. for (int j=0; j<int(local_hist.size()); ++j) {
  2525. hist_cur[j] += local_hist[j];
  2526. }
  2527. new_size += local_size;
  2528. }
  2529. break;
  2530. }
  2531. lock.unlock();
  2532. size_t last = std::min(nelements, first + chunk_size);
  2533. if (local_hist.empty()) {
  2534. local_hist.resize(hist_cur.size(), 0);
  2535. }
  2536. local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
  2537. }
  2538. };
  2539. if ((int) workers.size() < nthread_use - 1) {
  2540. workers.resize(nthread_use - 1);
  2541. }
  2542. for (int it = 0; it < nthread_use - 1; ++it) {
  2543. workers[it] = std::thread(compute);
  2544. }
  2545. compute();
  2546. for (int it = 0; it < nthread_use - 1; ++it) {
  2547. workers[it].join();
  2548. }
  2549. }
  2550. printf("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
  2551. int64_t tot_count = 0;
  2552. for (size_t i = 0; i < hist_cur.size(); i++) {
  2553. hist_all[i] += hist_cur[i];
  2554. tot_count += hist_cur[i];
  2555. }
  2556. if (tot_count > 0) {
  2557. for (size_t i = 0; i < hist_cur.size(); i++) {
  2558. printf("%5.3f ", hist_cur[i] / float(nelements));
  2559. }
  2560. }
  2561. printf("\n");
  2562. }
  2563. total_size_org += tensor.size;
  2564. total_size_new += new_size;
  2565. file_saver.write_tensor(tensor, new_type, new_data, new_size);
  2566. }
  2567. printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  2568. printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  2569. {
  2570. int64_t sum_all = 0;
  2571. for (size_t i = 0; i < hist_all.size(); i++) {
  2572. sum_all += hist_all[i];
  2573. }
  2574. if (sum_all > 0) {
  2575. printf("%s: hist: ", __func__);
  2576. for (size_t i = 0; i < hist_all.size(); i++) {
  2577. printf("%5.3f ", hist_all[i] / float(sum_all));
  2578. }
  2579. printf("\n");
  2580. }
  2581. }
  2582. }
  2583. //
  2584. // interface implementation
  2585. //
  2586. struct llama_model * llama_load_model_from_file(
  2587. const char * path_model,
  2588. struct llama_context_params params) {
  2589. ggml_time_init();
  2590. llama_model * model = new llama_model;
  2591. ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
  2592. if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.rms_norm_eps, params.n_gpu_layers,
  2593. params.main_gpu, params.tensor_split, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
  2594. memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
  2595. params.progress_callback_user_data)) {
  2596. delete model;
  2597. fprintf(stderr, "%s: failed to load model\n", __func__);
  2598. return nullptr;
  2599. }
  2600. return model;
  2601. }
  2602. void llama_free_model(struct llama_model * model) {
  2603. delete model;
  2604. }
  2605. struct llama_context * llama_new_context_with_model(
  2606. struct llama_model * model,
  2607. struct llama_context_params params) {
  2608. if (!model) {
  2609. return nullptr;
  2610. }
  2611. llama_context * ctx = new llama_context(*model);
  2612. if (params.seed == LLAMA_DEFAULT_SEED) {
  2613. params.seed = time(NULL);
  2614. }
  2615. unsigned cur_percentage = 0;
  2616. if (params.progress_callback == NULL) {
  2617. params.progress_callback_user_data = &cur_percentage;
  2618. params.progress_callback = [](float progress, void * ctx) {
  2619. unsigned * cur_percentage_p = (unsigned *) ctx;
  2620. unsigned percentage = (unsigned) (100 * progress);
  2621. while (percentage > *cur_percentage_p) {
  2622. *cur_percentage_p = percentage;
  2623. fprintf(stderr, ".");
  2624. fflush(stderr);
  2625. if (percentage >= 100) {
  2626. fprintf(stderr, "\n");
  2627. }
  2628. }
  2629. };
  2630. }
  2631. ctx->rng = std::mt19937(params.seed);
  2632. ctx->logits_all = params.logits_all;
  2633. ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
  2634. // reserve memory for context buffers
  2635. if (!params.vocab_only) {
  2636. if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
  2637. fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
  2638. llama_free(ctx);
  2639. return nullptr;
  2640. }
  2641. {
  2642. const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
  2643. fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
  2644. }
  2645. const auto & hparams = ctx->model.hparams;
  2646. // resized during inference
  2647. if (params.logits_all) {
  2648. ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
  2649. } else {
  2650. ctx->logits.reserve(hparams.n_vocab);
  2651. }
  2652. if (params.embedding){
  2653. ctx->embedding.resize(hparams.n_embd);
  2654. }
  2655. ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type));
  2656. ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type));
  2657. ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type));
  2658. }
  2659. #ifdef GGML_USE_METAL
  2660. if (params.n_gpu_layers > 0) {
  2661. // this allocates all Metal resources and memory buffers
  2662. ctx->ctx_metal = ggml_metal_init(1);
  2663. void * data_ptr = NULL;
  2664. size_t data_size = 0;
  2665. if (params.use_mmap) {
  2666. data_ptr = ctx->model.mapping->addr;
  2667. data_size = ctx->model.mapping->size;
  2668. } else {
  2669. data_ptr = ggml_get_mem_buffer(ctx->model.ctx);
  2670. data_size = ggml_get_mem_size (ctx->model.ctx);
  2671. }
  2672. const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
  2673. fprintf(stderr, "%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
  2674. #define LLAMA_METAL_CHECK_BUF(result) \
  2675. if (!(result)) { \
  2676. fprintf(stderr, "%s: failed to add buffer\n", __func__); \
  2677. llama_free(ctx); \
  2678. return NULL; \
  2679. }
  2680. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
  2681. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size, 0));
  2682. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.addr, ctx->kv_self.buf.size, 0));
  2683. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].addr, ctx->buf_scratch[0].size, 0));
  2684. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].addr, ctx->buf_scratch[1].size, 0));
  2685. #undef LLAMA_METAL_CHECK_BUF
  2686. }
  2687. #endif
  2688. #ifdef GGML_USE_MPI
  2689. ctx->ctx_mpi = ggml_mpi_init();
  2690. if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
  2691. // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
  2692. const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos());
  2693. while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
  2694. llama_backend_free();
  2695. exit(1);
  2696. }
  2697. #endif
  2698. return ctx;
  2699. }
  2700. struct llama_context * llama_init_from_file(
  2701. const char * path_model,
  2702. struct llama_context_params params) {
  2703. struct llama_model * model = llama_load_model_from_file(path_model, params);
  2704. if (!model) {
  2705. return nullptr;
  2706. }
  2707. struct llama_context * ctx = llama_new_context_with_model(model, params);
  2708. ctx->model_owner = true;
  2709. return ctx;
  2710. }
  2711. void llama_free(struct llama_context * ctx) {
  2712. if (ctx->model_owner) {
  2713. delete &ctx->model;
  2714. }
  2715. delete ctx;
  2716. }
  2717. int llama_model_quantize(
  2718. const char * fname_inp,
  2719. const char * fname_out,
  2720. const llama_model_quantize_params *params) {
  2721. try {
  2722. llama_model_quantize_internal(fname_inp, fname_out, params);
  2723. return 0;
  2724. } catch (const std::exception & err) {
  2725. fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.what());
  2726. return 1;
  2727. }
  2728. }
  2729. int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
  2730. fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  2731. const int64_t t_start_lora_us = ggml_time_us();
  2732. auto fin = std::ifstream(path_lora, std::ios::binary);
  2733. if (!fin) {
  2734. fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora);
  2735. return 1;
  2736. }
  2737. // verify magic and version
  2738. {
  2739. uint32_t magic;
  2740. fin.read((char *) &magic, sizeof(magic));
  2741. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  2742. fprintf(stderr, "%s: bad file magic\n", __func__);
  2743. return 1;
  2744. }
  2745. uint32_t format_version;
  2746. fin.read((char *) &format_version, sizeof(format_version));
  2747. if (format_version != 1) {
  2748. fprintf(stderr, "%s: unsupported file version\n", __func__ );
  2749. return 1;
  2750. }
  2751. }
  2752. int32_t lora_r;
  2753. int32_t lora_alpha;
  2754. fin.read((char *) &lora_r, sizeof(lora_r));
  2755. fin.read((char *) &lora_alpha, sizeof(lora_alpha));
  2756. float scaling = (float)lora_alpha / (float)lora_r;
  2757. fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  2758. // create a temporary ggml context to store the lora tensors
  2759. // todo: calculate size from biggest possible tensor
  2760. std::vector<uint8_t> lora_buf(1024ull * 1024ull * 1024ull);
  2761. struct ggml_init_params params;
  2762. params.mem_size = lora_buf.size();
  2763. params.mem_buffer = lora_buf.data();
  2764. params.no_alloc = false;
  2765. ggml_context * lora_ctx = ggml_init(params);
  2766. std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
  2767. // create a name -> tensor map of the model to accelerate lookups
  2768. std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
  2769. for (const auto & kv: model.tensors_by_name) {
  2770. model_tensors.insert(kv);
  2771. }
  2772. // load base model
  2773. std::unique_ptr<llama_model_loader> model_loader;
  2774. ggml_context * base_ctx = NULL;
  2775. llama_buffer base_buf;
  2776. if (path_base_model) {
  2777. fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model);
  2778. model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
  2779. size_t ctx_size;
  2780. size_t mmapped_size;
  2781. model_loader->calc_sizes(&ctx_size, &mmapped_size);
  2782. base_buf.resize(ctx_size);
  2783. ggml_init_params base_params;
  2784. base_params.mem_size = base_buf.size;
  2785. base_params.mem_buffer = base_buf.addr;
  2786. base_params.no_alloc = model_loader->use_mmap;
  2787. base_ctx = ggml_init(base_params);
  2788. model_loader->ggml_ctx = base_ctx;
  2789. // maybe this should in llama_model_loader
  2790. if (model_loader->use_mmap) {
  2791. model_loader->mapping.reset(new llama_mmap(&model_loader->file_loader->file, /* prefetch */ 0, ggml_is_numa()));
  2792. }
  2793. }
  2794. // read tensors and apply
  2795. bool warned = false;
  2796. int n_tensors = 0;
  2797. std::vector<uint8_t> work_buffer;
  2798. while (true) {
  2799. int32_t n_dims;
  2800. int32_t length;
  2801. int32_t ftype;
  2802. fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
  2803. fin.read(reinterpret_cast<char *>(&length), sizeof(length));
  2804. fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
  2805. if (fin.eof()) {
  2806. break;
  2807. }
  2808. int32_t ne[2] = { 1, 1 };
  2809. for (int i = 0; i < n_dims; ++i) {
  2810. fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
  2811. }
  2812. std::string name;
  2813. {
  2814. char buf[1024];
  2815. fin.read(buf, length);
  2816. name = std::string(buf, length);
  2817. }
  2818. // check for lora suffix and get the type of tensor
  2819. const std::string lora_suffix = ".lora";
  2820. size_t pos = name.rfind(lora_suffix);
  2821. if (pos == std::string::npos) {
  2822. fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  2823. return 1;
  2824. }
  2825. std::string lora_type = name.substr(pos + lora_suffix.length());
  2826. std::string base_name = name;
  2827. base_name.erase(pos);
  2828. // fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
  2829. if (model_tensors.find(base_name) == model_tensors.end()) {
  2830. fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
  2831. return 1;
  2832. }
  2833. // create ggml tensor
  2834. ggml_type wtype;
  2835. switch (ftype) {
  2836. case 0: wtype = GGML_TYPE_F32; break;
  2837. case 1: wtype = GGML_TYPE_F16; break;
  2838. default:
  2839. {
  2840. fprintf(stderr, "%s: invalid tensor data type '%d'\n",
  2841. __func__, ftype);
  2842. return false;
  2843. }
  2844. }
  2845. ggml_tensor * lora_tensor;
  2846. if (n_dims == 2) {
  2847. lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
  2848. }
  2849. else {
  2850. fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims);
  2851. return 1;
  2852. }
  2853. ggml_set_name(lora_tensor, "lora_tensor");
  2854. // load tensor data
  2855. size_t offset = fin.tellg();
  2856. size_t tensor_data_size = ggml_nbytes(lora_tensor);
  2857. offset = (offset + 31) & -32;
  2858. fin.seekg(offset);
  2859. fin.read((char*)lora_tensor->data, tensor_data_size);
  2860. lora_tensors[name] = lora_tensor;
  2861. // check if we have both A and B tensors and apply
  2862. if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
  2863. lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
  2864. ggml_tensor * dest_t = model_tensors[base_name];
  2865. offload_func_t offload_func = llama_nop;
  2866. offload_func_t offload_func_force_inplace = llama_nop;
  2867. #ifdef GGML_USE_CUBLAS
  2868. if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
  2869. if (dest_t->type != GGML_TYPE_F16) {
  2870. throw std::runtime_error(format(
  2871. "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models", __func__));
  2872. }
  2873. offload_func = ggml_cuda_assign_buffers;
  2874. offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;
  2875. }
  2876. #endif // GGML_USE_CUBLAS
  2877. ggml_tensor * base_t;
  2878. if (model_loader) {
  2879. // load from base model
  2880. if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
  2881. fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  2882. return 1;
  2883. }
  2884. size_t idx = model_loader->tensors_map.name_to_idx[base_name];
  2885. llama_load_tensor & lt = model_loader->tensors_map.tensors[idx];
  2886. base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
  2887. lt.data = (uint8_t *) lt.ggml_tensor->data;
  2888. model_loader->load_data_for(lt);
  2889. lt.ggml_tensor->data = lt.data;
  2890. }
  2891. else {
  2892. base_t = dest_t;
  2893. }
  2894. if (ggml_is_quantized(base_t->type)) {
  2895. if (!warned) {
  2896. fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  2897. "use a f16 or f32 base model with --lora-base\n", __func__);
  2898. warned = true;
  2899. }
  2900. }
  2901. ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
  2902. GGML_ASSERT(loraA->type == GGML_TYPE_F32);
  2903. ggml_set_name(loraA, "loraA");
  2904. ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
  2905. GGML_ASSERT(loraB->type == GGML_TYPE_F32);
  2906. ggml_set_name(loraB, "loraB");
  2907. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  2908. fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  2909. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  2910. return 1;
  2911. }
  2912. // w = w + BA*s
  2913. ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
  2914. offload_func(BA);
  2915. ggml_set_name(BA, "BA");
  2916. if (scaling != 1.0f) {
  2917. ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling);
  2918. ggml_set_name(scale_tensor, "scale_tensor");
  2919. BA = ggml_scale_inplace(lora_ctx, BA, scale_tensor);
  2920. offload_func(BA);
  2921. ggml_set_name(BA, "BA_scaled");
  2922. }
  2923. ggml_tensor * r;
  2924. if (base_t == dest_t) {
  2925. r = ggml_add_inplace(lora_ctx, dest_t, BA);
  2926. offload_func_force_inplace(r);
  2927. ggml_set_name(r, "r_add_inplace");
  2928. }
  2929. else {
  2930. r = ggml_add(lora_ctx, base_t, BA);
  2931. offload_func(r);
  2932. ggml_set_name(r, "r_add");
  2933. r = ggml_cpy(lora_ctx, r, dest_t);
  2934. offload_func(r);
  2935. ggml_set_name(r, "r_cpy");
  2936. }
  2937. struct ggml_cgraph gf = ggml_build_forward(r);
  2938. ggml_graph_compute_helper(work_buffer, &gf, n_threads);
  2939. // we won't need these tensors again, reset the context to save memory
  2940. ggml_free(lora_ctx);
  2941. lora_ctx = ggml_init(params);
  2942. lora_tensors.clear();
  2943. n_tensors++;
  2944. if (n_tensors % 4 == 0) {
  2945. fprintf(stderr, ".");
  2946. }
  2947. }
  2948. }
  2949. // TODO: this should be in a destructor, it will leak on failure
  2950. ggml_free(lora_ctx);
  2951. if (base_ctx) {
  2952. ggml_free(base_ctx);
  2953. }
  2954. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  2955. fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0);
  2956. return 0;
  2957. }
  2958. int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) {
  2959. try {
  2960. return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
  2961. } catch (const std::exception & err) {
  2962. fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what());
  2963. return 1;
  2964. }
  2965. }
  2966. int llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, const char * path_base_model, int n_threads) {
  2967. try {
  2968. return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
  2969. } catch (const std::exception & err) {
  2970. fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what());
  2971. return 1;
  2972. }
  2973. }
  2974. int llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  2975. return ctx->kv_self.n;
  2976. }
  2977. #define LLAMA_MAX_RNG_STATE (64*1024)
  2978. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  2979. if (seed == LLAMA_DEFAULT_SEED) {
  2980. seed = time(NULL);
  2981. }
  2982. ctx->rng.seed(seed);
  2983. }
  2984. // Returns the *maximum* size of the state
  2985. size_t llama_get_state_size(const struct llama_context * ctx) {
  2986. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  2987. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  2988. const size_t s_rng_size = sizeof(size_t);
  2989. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  2990. const size_t s_logits_capacity = sizeof(size_t);
  2991. const size_t s_logits_size = sizeof(size_t);
  2992. const size_t s_logits = ctx->logits.capacity() * sizeof(float);
  2993. const size_t s_embedding_size = sizeof(size_t);
  2994. const size_t s_embedding = ctx->embedding.size() * sizeof(float);
  2995. const size_t s_kv_size = sizeof(size_t);
  2996. const size_t s_kv_ntok = sizeof(int);
  2997. const size_t s_kv = ctx->kv_self.buf.size;
  2998. const size_t s_total = (
  2999. + s_rng_size
  3000. + s_rng
  3001. + s_logits_capacity
  3002. + s_logits_size
  3003. + s_logits
  3004. + s_embedding_size
  3005. + s_embedding
  3006. + s_kv_size
  3007. + s_kv_ntok
  3008. + s_kv
  3009. );
  3010. return s_total;
  3011. }
  3012. // Copies the state to the specified destination address
  3013. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  3014. uint8_t * out = dst;
  3015. // copy rng
  3016. {
  3017. std::stringstream rng_ss;
  3018. rng_ss << ctx->rng;
  3019. const size_t rng_size = rng_ss.str().size();
  3020. char rng_buf[LLAMA_MAX_RNG_STATE];
  3021. memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
  3022. memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
  3023. memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size);
  3024. memcpy(out, &rng_buf[0], LLAMA_MAX_RNG_STATE); out += LLAMA_MAX_RNG_STATE;
  3025. }
  3026. // copy logits
  3027. {
  3028. const size_t logits_cap = ctx->logits.capacity();
  3029. const size_t logits_size = ctx->logits.size();
  3030. memcpy(out, &logits_cap, sizeof(logits_cap)); out += sizeof(logits_cap);
  3031. memcpy(out, &logits_size, sizeof(logits_size)); out += sizeof(logits_size);
  3032. if (logits_size) {
  3033. memcpy(out, ctx->logits.data(), logits_size * sizeof(float));
  3034. }
  3035. out += logits_cap * sizeof(float);
  3036. }
  3037. // copy embeddings
  3038. {
  3039. const size_t embedding_size = ctx->embedding.size();
  3040. memcpy(out, &embedding_size, sizeof(embedding_size)); out += sizeof(embedding_size);
  3041. if (embedding_size) {
  3042. memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float));
  3043. out += embedding_size * sizeof(float);
  3044. }
  3045. }
  3046. // copy kv cache
  3047. {
  3048. const auto & kv_self = ctx->kv_self;
  3049. const auto & hparams = ctx->model.hparams;
  3050. const int n_layer = hparams.n_layer;
  3051. const int n_embd = hparams.n_embd;
  3052. const int n_ctx = hparams.n_ctx;
  3053. const size_t kv_size = kv_self.buf.size;
  3054. const int kv_ntok = llama_get_kv_cache_token_count(ctx);
  3055. memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size);
  3056. memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok);
  3057. if (kv_size) {
  3058. const size_t elt_size = ggml_element_size(kv_self.k);
  3059. ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
  3060. ggml_cgraph gf{};
  3061. ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
  3062. kout3d->data = out;
  3063. out += ggml_nbytes(kout3d);
  3064. ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
  3065. vout3d->data = out;
  3066. out += ggml_nbytes(vout3d);
  3067. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  3068. n_embd, kv_ntok, n_layer,
  3069. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  3070. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  3071. kv_ntok, n_embd, n_layer,
  3072. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  3073. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
  3074. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d));
  3075. ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
  3076. ggml_free(cpy_ctx);
  3077. }
  3078. }
  3079. const size_t written = out - dst;
  3080. const size_t max_size = llama_get_state_size(ctx);
  3081. LLAMA_ASSERT(written <= max_size);
  3082. return written;
  3083. }
  3084. // Sets the state reading from the specified source address
  3085. size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
  3086. uint8_t * inp = src;
  3087. // set rng
  3088. {
  3089. size_t rng_size;
  3090. char rng_buf[LLAMA_MAX_RNG_STATE];
  3091. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  3092. memcpy(&rng_buf[0], inp, LLAMA_MAX_RNG_STATE); inp += LLAMA_MAX_RNG_STATE;
  3093. std::stringstream rng_ss;
  3094. rng_ss.str(std::string(&rng_buf[0], rng_size));
  3095. rng_ss >> ctx->rng;
  3096. LLAMA_ASSERT(rng_ss.fail() == false);
  3097. }
  3098. // set logits
  3099. {
  3100. size_t logits_cap;
  3101. size_t logits_size;
  3102. memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap);
  3103. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  3104. LLAMA_ASSERT(ctx->logits.capacity() == logits_cap);
  3105. if (logits_size) {
  3106. ctx->logits.resize(logits_size);
  3107. memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
  3108. }
  3109. inp += logits_cap * sizeof(float);
  3110. }
  3111. // set embeddings
  3112. {
  3113. size_t embedding_size;
  3114. memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
  3115. LLAMA_ASSERT(ctx->embedding.capacity() == embedding_size);
  3116. if (embedding_size) {
  3117. memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
  3118. inp += embedding_size * sizeof(float);
  3119. }
  3120. }
  3121. // set kv cache
  3122. {
  3123. const auto & kv_self = ctx->kv_self;
  3124. const auto & hparams = ctx->model.hparams;
  3125. const int n_layer = hparams.n_layer;
  3126. const int n_embd = hparams.n_embd;
  3127. const int n_ctx = hparams.n_ctx;
  3128. size_t kv_size;
  3129. int kv_ntok;
  3130. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  3131. memcpy(&kv_ntok, inp, sizeof(kv_ntok)); inp += sizeof(kv_ntok);
  3132. if (kv_size) {
  3133. LLAMA_ASSERT(kv_self.buf.size == kv_size);
  3134. const size_t elt_size = ggml_element_size(kv_self.k);
  3135. ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
  3136. ggml_cgraph gf{};
  3137. ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
  3138. kin3d->data = (void *) inp;
  3139. inp += ggml_nbytes(kin3d);
  3140. ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
  3141. vin3d->data = (void *) inp;
  3142. inp += ggml_nbytes(vin3d);
  3143. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  3144. n_embd, kv_ntok, n_layer,
  3145. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  3146. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  3147. kv_ntok, n_embd, n_layer,
  3148. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  3149. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
  3150. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d));
  3151. ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
  3152. ggml_free(cpy_ctx);
  3153. }
  3154. ctx->kv_self.n = kv_ntok;
  3155. }
  3156. const size_t nread = inp - src;
  3157. const size_t max_size = llama_get_state_size(ctx);
  3158. LLAMA_ASSERT(nread <= max_size);
  3159. return nread;
  3160. }
  3161. static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  3162. llama_file file(path_session, "rb");
  3163. // sanity checks
  3164. {
  3165. const uint32_t magic = file.read_u32();
  3166. const uint32_t version = file.read_u32();
  3167. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  3168. fprintf(stderr, "%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  3169. return false;
  3170. }
  3171. llama_hparams session_hparams;
  3172. file.read_raw(&session_hparams, sizeof(llama_hparams));
  3173. if (session_hparams != ctx->model.hparams) {
  3174. fprintf(stderr, "%s : model hparams didn't match from session file!\n", __func__);
  3175. return false;
  3176. }
  3177. }
  3178. // load the prompt
  3179. {
  3180. const uint32_t n_token_count = file.read_u32();
  3181. if (n_token_count > n_token_capacity) {
  3182. fprintf(stderr, "%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  3183. return false;
  3184. }
  3185. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  3186. *n_token_count_out = n_token_count;
  3187. }
  3188. // restore the context state
  3189. {
  3190. const size_t n_state_size_cur = file.size - file.tell();
  3191. const size_t n_state_size_max = llama_get_state_size(ctx);
  3192. if (n_state_size_cur > n_state_size_max) {
  3193. fprintf(stderr, "%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  3194. return false;
  3195. }
  3196. std::vector<uint8_t> state_data(n_state_size_max);
  3197. file.read_raw(state_data.data(), n_state_size_cur);
  3198. llama_set_state_data(ctx, state_data.data());
  3199. }
  3200. return true;
  3201. }
  3202. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  3203. try {
  3204. return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  3205. } catch (const std::exception & err) {
  3206. fprintf(stderr, "error loading session file: %s\n", err.what());
  3207. return false;
  3208. }
  3209. }
  3210. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  3211. llama_file file(path_session, "wb");
  3212. file.write_u32(LLAMA_SESSION_MAGIC);
  3213. file.write_u32(LLAMA_SESSION_VERSION);
  3214. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  3215. // save the prompt
  3216. file.write_u32((uint32_t) n_token_count);
  3217. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  3218. // save the context state
  3219. {
  3220. const size_t n_state_size_max = llama_get_state_size(ctx);
  3221. std::vector<uint8_t> state_data(n_state_size_max);
  3222. const size_t n_state_size_cur = llama_copy_state_data(ctx, state_data.data());
  3223. file.write_raw(state_data.data(), n_state_size_cur);
  3224. }
  3225. return true;
  3226. }
  3227. int llama_eval(
  3228. struct llama_context * ctx,
  3229. const llama_token * tokens,
  3230. int n_tokens,
  3231. int n_past,
  3232. int n_threads) {
  3233. if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
  3234. fprintf(stderr, "%s: failed to eval\n", __func__);
  3235. return 1;
  3236. }
  3237. // get a more accurate load time, upon first eval
  3238. // TODO: fix this
  3239. if (!ctx->has_evaluated_once) {
  3240. ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
  3241. ctx->has_evaluated_once = true;
  3242. }
  3243. return 0;
  3244. }
  3245. int llama_eval_embd(
  3246. struct llama_context * ctx,
  3247. const float * embd,
  3248. int n_tokens,
  3249. int n_past,
  3250. int n_threads) {
  3251. if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
  3252. fprintf(stderr, "%s: failed to eval\n", __func__);
  3253. return 1;
  3254. }
  3255. // get a more accurate load time, upon first eval
  3256. // TODO: fix this
  3257. if (!ctx->has_evaluated_once) {
  3258. ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
  3259. ctx->has_evaluated_once = true;
  3260. }
  3261. return 0;
  3262. }
  3263. int llama_eval_export(struct llama_context * ctx, const char * fname) {
  3264. const int n_batch = 1;
  3265. const int n_ctx = 512 - n_batch;
  3266. const std::vector<llama_token> tmp(n_batch, llama_token_bos());
  3267. if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
  3268. fprintf(stderr, "%s: failed to eval\n", __func__);
  3269. return 1;
  3270. }
  3271. return 0;
  3272. }
  3273. int llama_tokenize_with_model(
  3274. const struct llama_model * model,
  3275. const char * text,
  3276. llama_token * tokens,
  3277. int n_max_tokens,
  3278. bool add_bos) {
  3279. auto res = llama_tokenize(model->vocab, text, add_bos);
  3280. if (n_max_tokens < (int) res.size()) {
  3281. fprintf(stderr, "%s: too many tokens\n", __func__);
  3282. return -((int) res.size());
  3283. }
  3284. for (size_t i = 0; i < res.size(); i++) {
  3285. tokens[i] = res[i];
  3286. }
  3287. return res.size();
  3288. }
  3289. int llama_tokenize(
  3290. struct llama_context * ctx,
  3291. const char * text,
  3292. llama_token * tokens,
  3293. int n_max_tokens,
  3294. bool add_bos) {
  3295. return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos);
  3296. }
  3297. int llama_n_vocab_from_model(const struct llama_model * model) {
  3298. return model->vocab.id_to_token.size();
  3299. }
  3300. int llama_n_ctx_from_model(const struct llama_model * model) {
  3301. return model->hparams.n_ctx;
  3302. }
  3303. int llama_n_embd_from_model(const struct llama_model * model) {
  3304. return model->hparams.n_embd;
  3305. }
  3306. int llama_n_vocab(const struct llama_context * ctx) {
  3307. return ctx->model.vocab.id_to_token.size();
  3308. }
  3309. int llama_n_ctx(const struct llama_context * ctx) {
  3310. return ctx->model.hparams.n_ctx;
  3311. }
  3312. int llama_n_embd(const struct llama_context * ctx) {
  3313. return ctx->model.hparams.n_embd;
  3314. }
  3315. int llama_get_vocab_from_model(
  3316. const struct llama_model * model,
  3317. const char * * strings,
  3318. float * scores,
  3319. int capacity) {
  3320. int n = std::min(capacity, (int) model->vocab.id_to_token.size());
  3321. for (int i = 0; i<n; ++i) {
  3322. strings[i] = model->vocab.id_to_token[i].tok.c_str();
  3323. scores[i] = model->vocab.id_to_token[i].score;
  3324. }
  3325. return n;
  3326. }
  3327. int llama_get_vocab(
  3328. const struct llama_context * ctx,
  3329. const char * * strings,
  3330. float * scores,
  3331. int capacity) {
  3332. return llama_get_vocab_from_model(&ctx->model, strings, scores, capacity);
  3333. }
  3334. float * llama_get_logits(struct llama_context * ctx) {
  3335. return ctx->logits.data();
  3336. }
  3337. float * llama_get_embeddings(struct llama_context * ctx) {
  3338. return ctx->embedding.data();
  3339. }
  3340. const char * llama_token_to_str_with_model(const struct llama_model * model, llama_token token) {
  3341. if (token >= llama_n_vocab_from_model(model)) {
  3342. return nullptr;
  3343. }
  3344. return model->vocab.id_to_token[token].tok.c_str();
  3345. }
  3346. const char * llama_token_to_str(const struct llama_context * ctx, llama_token token) {
  3347. return llama_token_to_str_with_model(&ctx->model, token);
  3348. }
  3349. llama_token llama_token_bos() {
  3350. return 1;
  3351. }
  3352. llama_token llama_token_eos() {
  3353. return 2;
  3354. }
  3355. llama_token llama_token_nl() {
  3356. return 13;
  3357. }
  3358. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  3359. struct llama_timings result = {
  3360. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  3361. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  3362. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  3363. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  3364. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  3365. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  3366. /*.n_sample =*/ std::max(1, ctx->n_sample),
  3367. /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
  3368. /*.n_eval =*/ std::max(1, ctx->n_eval),
  3369. };
  3370. return result;
  3371. }
  3372. void llama_print_timings(struct llama_context * ctx) {
  3373. const llama_timings timings = llama_get_timings(ctx);
  3374. fprintf(stderr, "\n");
  3375. fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
  3376. fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  3377. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  3378. fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  3379. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  3380. fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  3381. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  3382. fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
  3383. }
  3384. void llama_reset_timings(struct llama_context * ctx) {
  3385. ctx->t_start_us = ggml_time_us();
  3386. ctx->t_sample_us = ctx->n_sample = 0;
  3387. ctx->t_eval_us = ctx->n_eval = 0;
  3388. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  3389. }
  3390. const char * llama_print_system_info(void) {
  3391. static std::string s;
  3392. s = "";
  3393. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  3394. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  3395. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  3396. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  3397. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  3398. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  3399. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  3400. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  3401. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  3402. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  3403. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  3404. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  3405. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  3406. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  3407. return s.c_str();
  3408. }
  3409. // For internal test use
  3410. const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
  3411. return ctx->model.tensors_by_name;
  3412. }