llama.cpp 153 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445
  1. // Defines fileno on msys:
  2. #ifndef _GNU_SOURCE
  3. #define _GNU_SOURCE
  4. #include <cstddef>
  5. #include <cstdint>
  6. #include <cstdio>
  7. #endif
  8. #include "llama-util.h"
  9. #include "llama.h"
  10. #include "ggml.h"
  11. #ifdef GGML_USE_CUBLAS
  12. #include "ggml-cuda.h"
  13. #elif defined(GGML_USE_CLBLAST)
  14. #include "ggml-opencl.h"
  15. #endif
  16. #ifdef GGML_USE_METAL
  17. #include "ggml-metal.h"
  18. #endif
  19. #ifdef GGML_USE_MPI
  20. #include "ggml-mpi.h"
  21. #endif
  22. #ifdef GGML_USE_K_QUANTS
  23. #ifndef QK_K
  24. #ifdef GGML_QKK_64
  25. #define QK_K 64
  26. #else
  27. #define QK_K 256
  28. #endif
  29. #endif
  30. #endif
  31. #include <array>
  32. #include <ctime>
  33. #include <cinttypes>
  34. #include <fstream>
  35. #include <random>
  36. #include <map>
  37. #include <unordered_map>
  38. #include <queue>
  39. #include <cassert>
  40. #include <cstring>
  41. #include <climits>
  42. #include <memory>
  43. #include <algorithm>
  44. #include <initializer_list>
  45. #include <thread>
  46. #include <atomic>
  47. #include <mutex>
  48. #include <sstream>
  49. #include <numeric>
  50. #if defined(_MSC_VER)
  51. #pragma warning(disable: 4244 4267) // possible loss of data
  52. #endif
  53. static void llama_log_internal(llama_log_level level, const char* format, ...);
  54. static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data);
  55. #define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__)
  56. #define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__)
  57. #define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__)
  58. #if !defined(GGML_USE_CUBLAS)
  59. #include "ggml-alloc.h"
  60. #define LLAMA_USE_ALLOCATOR
  61. #else
  62. #define LLAMA_USE_SCRATCH
  63. #define LLAMA_MAX_SCRATCH_BUFFERS 16
  64. #endif
  65. // available llama models
  66. enum e_model {
  67. MODEL_UNKNOWN,
  68. MODEL_3B,
  69. MODEL_7B,
  70. MODEL_13B,
  71. MODEL_30B,
  72. MODEL_65B,
  73. MODEL_70B,
  74. };
  75. static const size_t kB = 1024;
  76. static const size_t MB = 1024*1024;
  77. // computed for n_ctx == 2048
  78. // TODO: dynamically determine these sizes
  79. // needs modifications in ggml
  80. typedef void (*offload_func_t)(struct ggml_tensor * tensor);
  81. void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
  82. (void) tensor;
  83. }
  84. //
  85. // ggml helpers
  86. //
  87. static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
  88. struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
  89. if (plan.work_size > 0) {
  90. buf.resize(plan.work_size);
  91. plan.work_data = buf.data();
  92. }
  93. ggml_graph_compute(graph, &plan);
  94. }
  95. //
  96. // memory sizes (calculated for n_batch == 512)
  97. //
  98. static std::map<e_model, size_t> MEM_REQ_SCRATCH0(int n_ctx)
  99. {
  100. std::map<e_model, size_t> k_sizes = {
  101. { MODEL_3B, ((size_t) n_ctx / 16ull + 92ull) * MB },
  102. { MODEL_7B, ((size_t) n_ctx / 16ull + 100ull) * MB },
  103. { MODEL_13B, ((size_t) n_ctx / 12ull + 120ull) * MB },
  104. { MODEL_30B, ((size_t) n_ctx / 9ull + 160ull) * MB },
  105. { MODEL_65B, ((size_t) n_ctx / 6ull + 256ull) * MB }, // guess
  106. { MODEL_70B, ((size_t) n_ctx / 7ull + 164ull) * MB },
  107. };
  108. return k_sizes;
  109. }
  110. static const std::map<e_model, size_t> & MEM_REQ_SCRATCH1()
  111. {
  112. static std::map<e_model, size_t> k_sizes = {
  113. { MODEL_3B, 128ull * MB },
  114. { MODEL_7B, 160ull * MB },
  115. { MODEL_13B, 192ull * MB },
  116. { MODEL_30B, 256ull * MB },
  117. { MODEL_65B, 384ull * MB }, // guess
  118. { MODEL_70B, 304ull * MB },
  119. };
  120. return k_sizes;
  121. }
  122. // used to store the compute graph tensors + non-scratch data
  123. static const std::map<e_model, size_t> & MEM_REQ_EVAL()
  124. {
  125. static std::map<e_model, size_t> k_sizes = {
  126. { MODEL_3B, 8ull * MB },
  127. { MODEL_7B, 10ull * MB },
  128. { MODEL_13B, 12ull * MB },
  129. { MODEL_30B, 16ull * MB },
  130. { MODEL_65B, 24ull * MB }, // guess
  131. { MODEL_70B, 24ull * MB },
  132. };
  133. return k_sizes;
  134. }
  135. // amount of VRAM needed per batch size to hold temporary results
  136. // the values for 3b are not derived from testing but instead chosen conservatively
  137. static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_BASE()
  138. {
  139. static std::map<e_model, size_t> k_sizes = {
  140. { MODEL_3B, 512ull * kB },
  141. { MODEL_7B, 512ull * kB },
  142. { MODEL_13B, 640ull * kB },
  143. { MODEL_30B, 768ull * kB },
  144. { MODEL_65B, 1280ull * kB },
  145. { MODEL_70B, 1280ull * kB },
  146. };
  147. return k_sizes;
  148. }
  149. // amount of VRAM needed per batch size and context to hold temporary results
  150. // the values for 3b are not derived from testing but instead chosen conservatively
  151. static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_PER_CONTEXT()
  152. {
  153. static std::map<e_model, size_t> k_sizes = {
  154. { MODEL_3B, 128ull },
  155. { MODEL_7B, 128ull },
  156. { MODEL_13B, 160ull },
  157. { MODEL_30B, 208ull },
  158. { MODEL_65B, 256ull },
  159. { MODEL_70B, 256ull },
  160. };
  161. return k_sizes;
  162. }
  163. // default hparams (LLaMA 7B)
  164. struct llama_hparams {
  165. uint32_t n_vocab = 32000;
  166. uint32_t n_ctx = 512; // this is provided as user input?
  167. uint32_t n_embd = 4096;
  168. uint32_t n_mult = 256;
  169. uint32_t n_head = 32;
  170. uint32_t n_head_kv = 32;
  171. uint32_t n_layer = 32;
  172. uint32_t n_rot = 64;
  173. // LLaMAv2
  174. // TODO: load from model data hparams
  175. float f_ffn_mult = 1.0f;
  176. float f_rms_norm_eps = LLAMA_DEFAULT_RMS_EPS;
  177. float rope_freq_base = 10000.0f;
  178. float rope_freq_scale = 1.0f;
  179. enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;
  180. bool operator!=(const llama_hparams & other) const {
  181. return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT
  182. }
  183. uint32_t n_gqa() const {
  184. return n_head/n_head_kv;
  185. }
  186. uint32_t n_embd_head() const {
  187. return n_embd/n_head;
  188. }
  189. uint32_t n_embd_gqa() const {
  190. return n_embd/n_gqa();
  191. }
  192. size_t kv_size() const {
  193. size_t result = 2ull;
  194. result *= (size_t) n_embd_gqa();
  195. result *= (size_t) n_ctx;
  196. result *= (size_t) n_layer;
  197. result *= sizeof(ggml_fp16_t);
  198. return result;
  199. }
  200. };
  201. struct llama_layer {
  202. // normalization
  203. struct ggml_tensor * attention_norm;
  204. // attention
  205. struct ggml_tensor * wq;
  206. struct ggml_tensor * wk;
  207. struct ggml_tensor * wv;
  208. struct ggml_tensor * wo;
  209. // normalization
  210. struct ggml_tensor * ffn_norm;
  211. // ff
  212. struct ggml_tensor * w1;
  213. struct ggml_tensor * w2;
  214. struct ggml_tensor * w3;
  215. };
  216. struct llama_kv_cache {
  217. struct ggml_tensor * k = NULL;
  218. struct ggml_tensor * v = NULL;
  219. struct ggml_context * ctx = NULL;
  220. llama_ctx_buffer buf;
  221. int n; // number of tokens currently in the cache
  222. ~llama_kv_cache() {
  223. if (ctx) {
  224. ggml_free(ctx);
  225. }
  226. #ifdef GGML_USE_CUBLAS
  227. ggml_cuda_free_data(k);
  228. ggml_cuda_free_data(v);
  229. #endif // GGML_USE_CUBLAS
  230. }
  231. };
  232. struct llama_vocab {
  233. using id = int32_t;
  234. using token = std::string;
  235. struct token_score {
  236. token tok;
  237. float score;
  238. };
  239. std::unordered_map<token, id> token_to_id;
  240. std::vector<token_score> id_to_token;
  241. };
  242. struct llama_model {
  243. e_model type = MODEL_UNKNOWN;
  244. llama_hparams hparams;
  245. struct ggml_tensor * tok_embeddings;
  246. struct ggml_tensor * norm;
  247. struct ggml_tensor * output;
  248. std::vector<llama_layer> layers;
  249. int n_gpu_layers;
  250. // context
  251. struct ggml_context * ctx = NULL;
  252. // the model memory buffer
  253. llama_ctx_buffer buf;
  254. // model memory mapped file
  255. std::unique_ptr<llama_mmap> mapping;
  256. // objects representing data potentially being locked in memory
  257. llama_mlock mlock_buf;
  258. llama_mlock mlock_mmap;
  259. // for quantize-stats only
  260. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  261. int64_t t_load_us = 0;
  262. int64_t t_start_us = 0;
  263. llama_vocab vocab;
  264. ~llama_model() {
  265. if (ctx) {
  266. ggml_free(ctx);
  267. }
  268. #ifdef GGML_USE_CUBLAS
  269. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  270. ggml_cuda_free_data(tensors_by_name[i].second);
  271. }
  272. ggml_cuda_free_scratch();
  273. #elif defined(GGML_USE_CLBLAST)
  274. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  275. ggml_cl_free_data(tensors_by_name[i].second);
  276. }
  277. #endif
  278. }
  279. };
  280. struct llama_context {
  281. llama_context(const llama_model & model) : model(model), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {}
  282. ~llama_context() {
  283. if (model_owner) {
  284. delete &model;
  285. }
  286. #ifdef GGML_USE_METAL
  287. if (ctx_metal) {
  288. ggml_metal_free(ctx_metal);
  289. }
  290. #endif
  291. #ifdef LLAMA_USE_ALLOCATOR
  292. if (alloc) {
  293. ggml_allocr_free(alloc);
  294. }
  295. #endif
  296. }
  297. std::mt19937 rng;
  298. bool has_evaluated_once = false;
  299. int64_t t_sample_us = 0;
  300. int64_t t_eval_us = 0;
  301. int64_t t_p_eval_us = 0;
  302. int32_t n_sample = 0; // number of tokens sampled
  303. int32_t n_eval = 0; // number of eval calls
  304. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  305. const llama_model & model;
  306. bool model_owner = false;
  307. int64_t t_load_us;
  308. int64_t t_start_us;
  309. // key + value cache for the self attention
  310. struct llama_kv_cache kv_self;
  311. size_t mem_per_token = 0;
  312. // decode output (2-dimensional array: [n_tokens][n_vocab])
  313. std::vector<float> logits;
  314. bool logits_all = false;
  315. // input embedding (1-dimensional array: [n_embd])
  316. std::vector<float> embedding;
  317. // reusable buffer for `struct ggml_graph_plan.work_data`
  318. std::vector<uint8_t> work_buffer;
  319. // memory buffers used to evaluate the model
  320. // TODO: move in llama_state
  321. llama_ctx_buffer buf_compute;
  322. #ifdef LLAMA_USE_ALLOCATOR
  323. llama_ctx_buffer buf_alloc;
  324. ggml_allocr * alloc = NULL;
  325. #endif
  326. #ifdef LLAMA_USE_SCRATCH
  327. llama_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
  328. int buf_last = 0;
  329. size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
  330. #endif
  331. #ifdef GGML_USE_METAL
  332. ggml_metal_context * ctx_metal = NULL;
  333. #endif
  334. #ifdef GGML_USE_MPI
  335. ggml_mpi_context * ctx_mpi = NULL;
  336. #endif
  337. void use_buf(struct ggml_context * ctx, int i) {
  338. #if defined(LLAMA_USE_SCRATCH)
  339. size_t last_size = 0;
  340. if (i == -1) {
  341. last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, });
  342. } else {
  343. auto & buf = buf_scratch[i];
  344. last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, });
  345. }
  346. if (buf_last >= 0) {
  347. buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size);
  348. }
  349. buf_last = i;
  350. #else
  351. (void) i;
  352. (void) ctx;
  353. #endif
  354. }
  355. size_t get_buf_max_mem(int i) const {
  356. #if defined(LLAMA_USE_SCRATCH)
  357. return buf_max_size[i];
  358. #else
  359. (void) i;
  360. return 0;
  361. #endif
  362. }
  363. };
  364. struct llama_state {
  365. // We save the log callback globally
  366. llama_log_callback log_callback = llama_log_callback_default;
  367. void * log_callback_user_data = nullptr;
  368. };
  369. // global state
  370. static llama_state g_state;
  371. template <typename T>
  372. static T checked_mul(T a, T b) {
  373. T ret = a * b;
  374. if (a != 0 && ret / a != b) {
  375. throw std::runtime_error(format("overflow multiplying %llu * %llu",
  376. (unsigned long long) a, (unsigned long long) b));
  377. }
  378. return ret;
  379. }
  380. static size_t checked_div(size_t a, size_t b) {
  381. if (b == 0 || a % b != 0) {
  382. throw std::runtime_error(format("error dividing %zu / %zu", a, b));
  383. }
  384. return a / b;
  385. }
  386. static std::string llama_format_tensor_shape(const std::vector<uint32_t> & ne) {
  387. char buf[256];
  388. snprintf(buf, sizeof(buf), "%5u", ne.at(0));
  389. for (size_t i = 1; i < ne.size(); i++) {
  390. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i));
  391. }
  392. return buf;
  393. }
  394. static size_t llama_calc_tensor_size(const std::vector<uint32_t> & ne, enum ggml_type type) {
  395. size_t size = ggml_type_size(type);
  396. for (uint32_t dim : ne) {
  397. size = checked_mul<size_t>(size, dim);
  398. }
  399. return size / ggml_blck_size(type);
  400. }
  401. struct llama_load_tensor {
  402. std::string name;
  403. enum ggml_type type = GGML_TYPE_F32;
  404. std::vector<uint32_t> ne;
  405. size_t file_off;
  406. size_t size;
  407. struct ggml_tensor * ggml_tensor = NULL;
  408. uint8_t * data;
  409. };
  410. struct llama_load_tensors_map {
  411. // tensors is kept in a separate vector to preserve file order
  412. std::vector<llama_load_tensor> tensors;
  413. std::unordered_map<std::string, size_t> name_to_idx;
  414. };
  415. enum llama_file_version {
  416. LLAMA_FILE_VERSION_GGML,
  417. LLAMA_FILE_VERSION_GGMF_V1, // added version field and scores in vocab
  418. LLAMA_FILE_VERSION_GGJT_V1, // added padding
  419. LLAMA_FILE_VERSION_GGJT_V2, // changed quantization format
  420. LLAMA_FILE_VERSION_GGJT_V3, // changed Q4 and Q8 quantization format
  421. };
  422. struct llama_file_loader {
  423. llama_file file;
  424. llama_file_version file_version;
  425. llama_hparams hparams;
  426. llama_vocab vocab;
  427. llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map)
  428. : file(fname, "rb") {
  429. LLAMA_LOG_INFO("llama.cpp: loading model from %s\n", fname);
  430. read_magic();
  431. read_hparams();
  432. read_vocab();
  433. read_tensor_metadata(tensors_map);
  434. }
  435. void read_magic() {
  436. uint32_t magic = file.read_u32();
  437. if (magic == LLAMA_FILE_MAGIC_GGML) {
  438. file_version = LLAMA_FILE_VERSION_GGML;
  439. return;
  440. }
  441. uint32_t version = file.read_u32();
  442. switch (magic) {
  443. case LLAMA_FILE_MAGIC_GGMF:
  444. switch (version) {
  445. case 1: file_version = LLAMA_FILE_VERSION_GGMF_V1; return;
  446. }
  447. break;
  448. case LLAMA_FILE_MAGIC_GGJT:
  449. switch (version) {
  450. case 1: file_version = LLAMA_FILE_VERSION_GGJT_V1; return;
  451. case 2: file_version = LLAMA_FILE_VERSION_GGJT_V2; return;
  452. case 3: file_version = LLAMA_FILE_VERSION_GGJT_V3; return;
  453. }
  454. }
  455. throw std::runtime_error(format("unknown (magic, version) combination: %08x, %08x; is this really a GGML file?",
  456. magic, version));
  457. }
  458. void read_hparams() {
  459. hparams.n_vocab = file.read_u32();
  460. hparams.n_embd = file.read_u32();
  461. hparams.n_mult = file.read_u32();
  462. hparams.n_head = file.read_u32();
  463. hparams.n_layer = file.read_u32();
  464. hparams.n_rot = file.read_u32();
  465. hparams.ftype = (enum llama_ftype) file.read_u32();
  466. // LLaMAv2
  467. // TODO: read from header
  468. hparams.n_head_kv = hparams.n_head;
  469. }
  470. void read_vocab() {
  471. vocab.id_to_token.resize(hparams.n_vocab);
  472. for (uint32_t i = 0; i < hparams.n_vocab; i++) {
  473. uint32_t len = file.read_u32();
  474. std::string word = file.read_string(len);
  475. float score = 0.0f;
  476. file.read_raw(&score, sizeof(score));
  477. vocab.token_to_id[word] = i;
  478. auto & tok_score = vocab.id_to_token[i];
  479. tok_score.tok = std::move(word);
  480. tok_score.score = score;
  481. }
  482. }
  483. void read_tensor_metadata(llama_load_tensors_map & tensors_map) {
  484. while (file.tell() < file.size) {
  485. llama_load_tensor tensor;
  486. uint32_t n_dims = file.read_u32();
  487. uint32_t name_len = file.read_u32();
  488. tensor.type = (enum ggml_type) file.read_u32();
  489. tensor.ne.resize(n_dims);
  490. file.read_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * n_dims);
  491. std::string name = file.read_string(name_len);
  492. if (n_dims < 1 || n_dims > 2) {
  493. throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name.c_str(), n_dims));
  494. }
  495. switch (tensor.type) {
  496. case GGML_TYPE_F32:
  497. case GGML_TYPE_F16:
  498. case GGML_TYPE_Q4_0:
  499. case GGML_TYPE_Q4_1:
  500. case GGML_TYPE_Q5_0:
  501. case GGML_TYPE_Q5_1:
  502. case GGML_TYPE_Q8_0:
  503. case GGML_TYPE_Q2_K:
  504. case GGML_TYPE_Q3_K:
  505. case GGML_TYPE_Q4_K:
  506. case GGML_TYPE_Q5_K:
  507. case GGML_TYPE_Q6_K:
  508. break;
  509. default: {
  510. throw std::runtime_error(format("unrecognized tensor type %u\n", tensor.type));
  511. }
  512. }
  513. // skip to the next multiple of 32 bytes
  514. if (file_version >= LLAMA_FILE_VERSION_GGJT_V1) {
  515. file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
  516. }
  517. tensor.file_off = file.tell();
  518. tensor.name = name;
  519. tensor.size = llama_calc_tensor_size(tensor.ne, tensor.type);
  520. file.seek(tensor.size, SEEK_CUR);
  521. tensors_map.tensors.push_back(tensor);
  522. tensors_map.name_to_idx[name] = tensors_map.tensors.size() - 1;
  523. }
  524. }
  525. };
  526. struct llama_file_saver {
  527. llama_file file;
  528. llama_file_loader * any_file_loader;
  529. llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
  530. : file(fname, "wb"), any_file_loader(any_file_loader) {
  531. LLAMA_LOG_INFO("llama.cpp: saving model to %s\n", fname);
  532. write_magic();
  533. write_hparams(new_ftype);
  534. write_vocab();
  535. }
  536. void write_magic() {
  537. file.write_u32(LLAMA_FILE_MAGIC); // magic
  538. file.write_u32(LLAMA_FILE_VERSION); // version
  539. }
  540. void write_hparams(enum llama_ftype new_ftype) {
  541. const llama_hparams & hparams = any_file_loader->hparams;
  542. file.write_u32(hparams.n_vocab);
  543. file.write_u32(hparams.n_embd);
  544. file.write_u32(hparams.n_mult);
  545. file.write_u32(hparams.n_head);
  546. file.write_u32(hparams.n_layer);
  547. file.write_u32(hparams.n_rot);
  548. file.write_u32(new_ftype);
  549. }
  550. void write_vocab() {
  551. if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
  552. LLAMA_LOG_WARN("llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
  553. }
  554. uint32_t n_vocab = any_file_loader->hparams.n_vocab;
  555. for (uint32_t i = 0; i < n_vocab; i++) {
  556. const auto & token_score = any_file_loader->vocab.id_to_token.at(i);
  557. file.write_u32((uint32_t) token_score.tok.size());
  558. file.write_raw(token_score.tok.data(), token_score.tok.size());
  559. file.write_raw(&token_score.score, sizeof(token_score.score));
  560. }
  561. }
  562. void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) {
  563. switch (new_type) {
  564. case GGML_TYPE_F32:
  565. case GGML_TYPE_F16:
  566. case GGML_TYPE_Q4_0:
  567. case GGML_TYPE_Q4_1:
  568. case GGML_TYPE_Q5_0:
  569. case GGML_TYPE_Q5_1:
  570. case GGML_TYPE_Q8_0:
  571. case GGML_TYPE_Q2_K:
  572. case GGML_TYPE_Q3_K:
  573. case GGML_TYPE_Q4_K:
  574. case GGML_TYPE_Q5_K:
  575. case GGML_TYPE_Q6_K:
  576. break;
  577. default: LLAMA_ASSERT(false);
  578. }
  579. file.write_u32((uint32_t) tensor.ne.size());
  580. file.write_u32((uint32_t) tensor.name.size());
  581. file.write_u32(new_type);
  582. file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size());
  583. file.write_raw(tensor.name.data(), tensor.name.size());
  584. file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
  585. LLAMA_ASSERT(new_size == llama_calc_tensor_size(tensor.ne, new_type));
  586. file.write_raw(new_data, new_size);
  587. }
  588. };
  589. struct llama_model_loader {
  590. std::unique_ptr<llama_file_loader> file_loader;
  591. llama_load_tensors_map tensors_map;
  592. bool use_mmap;
  593. size_t num_ggml_tensors_created = 0;
  594. struct ggml_context * ggml_ctx = NULL;
  595. std::unique_ptr<llama_mmap> mapping;
  596. llama_model_loader(const std::string & fname_base, bool use_mmap) {
  597. file_loader = std::unique_ptr<llama_file_loader>(new llama_file_loader(fname_base.c_str(), tensors_map));
  598. if (!llama_mmap::SUPPORTED) {
  599. use_mmap = false;
  600. }
  601. this->use_mmap = use_mmap;
  602. }
  603. void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const {
  604. *ctx_size_p = *mmapped_size_p = 0;
  605. for (const llama_load_tensor & lt : tensors_map.tensors) {
  606. *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
  607. *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size + 16;
  608. }
  609. }
  610. struct ggml_tensor * get_tensor(const std::string & name, const std::vector<uint32_t> & ne, ggml_backend backend) {
  611. auto it = tensors_map.name_to_idx.find(name);
  612. if (it == tensors_map.name_to_idx.end()) {
  613. throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str())));
  614. }
  615. llama_load_tensor & lt = tensors_map.tensors.at(it->second);
  616. if (lt.ne != ne) {
  617. throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s",
  618. name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str()));
  619. }
  620. return get_tensor_for(lt, backend);
  621. }
  622. struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) {
  623. struct ggml_tensor * tensor;
  624. if (backend != GGML_BACKEND_CPU) {
  625. ggml_set_no_alloc(ggml_ctx, true);
  626. }
  627. if (lt.ne.size() == 2) {
  628. tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1));
  629. } else {
  630. LLAMA_ASSERT(lt.ne.size() == 1);
  631. tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0));
  632. }
  633. ggml_set_name(tensor, lt.name.c_str());
  634. LLAMA_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor
  635. if (backend != GGML_BACKEND_CPU) {
  636. ggml_set_no_alloc(ggml_ctx, use_mmap);
  637. }
  638. tensor->backend = backend;
  639. lt.ggml_tensor = tensor;
  640. num_ggml_tensors_created++;
  641. return tensor;
  642. }
  643. void done_getting_tensors() const {
  644. if (num_ggml_tensors_created != tensors_map.tensors.size()) {
  645. throw std::runtime_error(std::string("llama.cpp: file contained more tensors than expected"));
  646. }
  647. }
  648. void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
  649. size_t data_size = 0;
  650. size_t prefetch_size = file_loader->file.size;
  651. size_t lock_size = 0;
  652. for (const llama_load_tensor & lt : tensors_map.tensors) {
  653. data_size += lt.size;
  654. if (lt.ggml_tensor->backend != GGML_BACKEND_CPU) {
  655. prefetch_size -= lt.size;
  656. }
  657. }
  658. if (use_mmap) {
  659. mapping.reset(new llama_mmap(&file_loader->file, prefetch_size, ggml_is_numa()));
  660. if (lmlock) {
  661. lmlock->init(mapping->addr);
  662. }
  663. }
  664. size_t done_size = 0;
  665. for (llama_load_tensor & lt : tensors_map.tensors) {
  666. if (progress_callback) {
  667. progress_callback((float) done_size / data_size, progress_callback_user_data);
  668. }
  669. LLAMA_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already
  670. lt.data = (uint8_t *) lt.ggml_tensor->data;
  671. // allocate temp buffer if not using mmap
  672. if (!use_mmap && lt.data == NULL) {
  673. GGML_ASSERT(lt.ggml_tensor->backend != GGML_BACKEND_CPU);
  674. lt.data = (uint8_t*)malloc(ggml_nbytes(lt.ggml_tensor));
  675. }
  676. load_data_for(lt);
  677. switch(lt.ggml_tensor->backend) {
  678. case GGML_BACKEND_CPU:
  679. lt.ggml_tensor->data = lt.data;
  680. if (use_mmap && lmlock) {
  681. lock_size += lt.size;
  682. lmlock->grow_to(lock_size);
  683. }
  684. break;
  685. #if defined(GGML_USE_CUBLAS)
  686. case GGML_BACKEND_GPU:
  687. case GGML_BACKEND_GPU_SPLIT:
  688. ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor);
  689. if (!use_mmap) {
  690. free(lt.data);
  691. }
  692. break;
  693. #elif defined(GGML_USE_CLBLAST)
  694. case GGML_BACKEND_GPU:
  695. ggml_cl_transform_tensor(lt.data, lt.ggml_tensor);
  696. if (!use_mmap) {
  697. free(lt.data);
  698. }
  699. break;
  700. #endif
  701. default:
  702. continue;
  703. }
  704. done_size += lt.size;
  705. }
  706. }
  707. void load_data_for(llama_load_tensor & lt) {
  708. if (use_mmap) {
  709. lt.data = (uint8_t *) mapping->addr + lt.file_off;
  710. } else {
  711. llama_file & file = file_loader->file;
  712. file.seek(lt.file_off, SEEK_SET);
  713. file.read_raw(lt.data, lt.size);
  714. }
  715. if (0) {
  716. print_checksum(lt);
  717. }
  718. }
  719. static void print_checksum(llama_load_tensor & lt) {
  720. uint32_t sum = 0;
  721. for (size_t i = 0; i < lt.size; i++) {
  722. uint8_t byte = lt.data[i];
  723. sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
  724. }
  725. LLAMA_LOG_INFO("%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
  726. llama_format_tensor_shape(lt.ne).c_str(), lt.size);
  727. }
  728. };
  729. //
  730. // kv cache
  731. //
  732. static bool kv_cache_init(
  733. const struct llama_hparams & hparams,
  734. struct llama_kv_cache & cache,
  735. ggml_type wtype,
  736. int n_ctx,
  737. int n_gpu_layers) {
  738. const int n_embd = hparams.n_embd_gqa();
  739. const int n_layer = hparams.n_layer;
  740. const int64_t n_mem = n_layer*n_ctx;
  741. const int64_t n_elements = n_embd*n_mem;
  742. cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
  743. cache.n = 0;
  744. struct ggml_init_params params;
  745. params.mem_size = cache.buf.size;
  746. params.mem_buffer = cache.buf.addr;
  747. params.no_alloc = false;
  748. cache.ctx = ggml_init(params);
  749. if (!cache.ctx) {
  750. LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
  751. return false;
  752. }
  753. cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  754. cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  755. ggml_set_name(cache.k, "cache_k");
  756. ggml_set_name(cache.v, "cache_v");
  757. (void) n_gpu_layers;
  758. #ifdef GGML_USE_CUBLAS
  759. if (n_gpu_layers > n_layer + 1) {
  760. ggml_cuda_assign_buffers_no_scratch(cache.v);
  761. }
  762. if (n_gpu_layers > n_layer + 2) {
  763. ggml_cuda_assign_buffers_no_scratch(cache.k);
  764. }
  765. #endif // GGML_USE_CUBLAS
  766. return true;
  767. }
  768. struct llama_context_params llama_context_default_params() {
  769. struct llama_context_params result = {
  770. /*.seed =*/ LLAMA_DEFAULT_SEED,
  771. /*.n_ctx =*/ 512,
  772. /*.n_batch =*/ 512,
  773. /*.n_gqa =*/ 1,
  774. /*.rms_norm_eps =*/ LLAMA_DEFAULT_RMS_EPS,
  775. /*.gpu_layers =*/ 0,
  776. /*.main_gpu =*/ 0,
  777. /*.tensor_split =*/ nullptr,
  778. /*.rope_freq_base =*/ 10000.0f,
  779. /*.rope_freq_scale =*/ 1.0f,
  780. /*.progress_callback =*/ nullptr,
  781. /*.progress_callback_user_data =*/ nullptr,
  782. /*.low_vram =*/ false,
  783. /*.mul_mat_q =*/ false,
  784. /*.f16_kv =*/ true,
  785. /*.logits_all =*/ false,
  786. /*.vocab_only =*/ false,
  787. /*.use_mmap =*/ true,
  788. /*.use_mlock =*/ false,
  789. /*.embedding =*/ false,
  790. };
  791. return result;
  792. }
  793. struct llama_model_quantize_params llama_model_quantize_default_params() {
  794. struct llama_model_quantize_params result = {
  795. /*.nthread =*/ 0,
  796. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  797. /*.allow_requantize =*/ false,
  798. /*.quantize_output_tensor =*/ true,
  799. };
  800. return result;
  801. }
  802. int llama_max_devices() {
  803. return LLAMA_MAX_DEVICES;
  804. }
  805. bool llama_mmap_supported() {
  806. return llama_mmap::SUPPORTED;
  807. }
  808. bool llama_mlock_supported() {
  809. return llama_mlock::SUPPORTED;
  810. }
  811. void llama_backend_init(bool numa) {
  812. ggml_time_init();
  813. // needed to initialize f16 tables
  814. {
  815. struct ggml_init_params params = { 0, NULL, false };
  816. struct ggml_context * ctx = ggml_init(params);
  817. ggml_free(ctx);
  818. }
  819. if (numa) {
  820. ggml_numa_init();
  821. }
  822. #ifdef GGML_USE_MPI
  823. ggml_mpi_backend_init();
  824. #endif
  825. }
  826. void llama_backend_free() {
  827. #ifdef GGML_USE_MPI
  828. ggml_mpi_backend_free();
  829. #endif
  830. }
  831. int64_t llama_time_us() {
  832. return ggml_time_us();
  833. }
  834. //
  835. // model loading
  836. //
  837. static const char * llama_file_version_name(llama_file_version version) {
  838. switch (version) {
  839. case LLAMA_FILE_VERSION_GGML: return "'ggml' (old version with low tokenizer quality and no mmap support)";
  840. case LLAMA_FILE_VERSION_GGMF_V1: return "ggmf v1 (old version with no mmap support)";
  841. case LLAMA_FILE_VERSION_GGJT_V1: return "ggjt v1 (pre #1405)";
  842. case LLAMA_FILE_VERSION_GGJT_V2: return "ggjt v2 (pre #1508)";
  843. case LLAMA_FILE_VERSION_GGJT_V3: return "ggjt v3 (latest)";
  844. }
  845. return "unknown";
  846. }
  847. const char * llama_ftype_name(enum llama_ftype ftype) {
  848. switch (ftype) {
  849. case LLAMA_FTYPE_ALL_F32: return "all F32";
  850. case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
  851. case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
  852. case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
  853. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  854. return "mostly Q4_1, some F16";
  855. case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0";
  856. case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1";
  857. case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0";
  858. // K-quants
  859. case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K";
  860. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small";
  861. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium";
  862. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large";
  863. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "mostly Q4_K - Small";
  864. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium";
  865. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small";
  866. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium";
  867. case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K";
  868. default: return "unknown, may not work";
  869. }
  870. }
  871. static const char * llama_model_type_name(e_model type) {
  872. switch (type) {
  873. case MODEL_3B: return "3B";
  874. case MODEL_7B: return "7B";
  875. case MODEL_13B: return "13B";
  876. case MODEL_30B: return "30B";
  877. case MODEL_65B: return "65B";
  878. case MODEL_70B: return "70B";
  879. default: LLAMA_ASSERT(false);
  880. }
  881. }
  882. static void llama_model_load_internal(
  883. const std::string & fname,
  884. llama_model & model,
  885. llama_vocab & vocab,
  886. int n_ctx,
  887. int n_batch,
  888. int n_gqa,
  889. float rms_norm_eps,
  890. int n_gpu_layers,
  891. int main_gpu,
  892. const float * tensor_split,
  893. const bool mul_mat_q,
  894. float rope_freq_base,
  895. float rope_freq_scale,
  896. bool low_vram,
  897. ggml_type memory_type,
  898. bool use_mmap,
  899. bool use_mlock,
  900. bool vocab_only,
  901. llama_progress_callback progress_callback,
  902. void * progress_callback_user_data) {
  903. model.t_start_us = ggml_time_us();
  904. std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap));
  905. vocab = std::move(ml->file_loader->vocab);
  906. model.hparams = ml->file_loader->hparams;
  907. model.n_gpu_layers = n_gpu_layers;
  908. llama_file_version file_version = ml->file_loader->file_version;
  909. auto & hparams = model.hparams;
  910. // TODO: read from file
  911. hparams.f_rms_norm_eps = rms_norm_eps;
  912. {
  913. switch (hparams.n_layer) {
  914. case 26: model.type = e_model::MODEL_3B; break;
  915. case 32: model.type = e_model::MODEL_7B; break;
  916. case 40: model.type = e_model::MODEL_13B; break;
  917. case 60: model.type = e_model::MODEL_30B; break;
  918. case 80: model.type = e_model::MODEL_65B; break;
  919. default:
  920. {
  921. if (hparams.n_layer < 32) {
  922. model.type = e_model::MODEL_7B;
  923. }
  924. } break;
  925. }
  926. hparams.n_ctx = n_ctx;
  927. // LLaMAv2
  928. // TODO: temporary until GGUF
  929. LLAMA_ASSERT(hparams.n_head % n_gqa == 0);
  930. hparams.n_head_kv = hparams.n_head / n_gqa;
  931. if (model.type == e_model::MODEL_65B && n_gqa == 8) {
  932. LLAMA_LOG_WARN("%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
  933. model.type = e_model::MODEL_70B;
  934. hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model
  935. }
  936. hparams.rope_freq_base = rope_freq_base;
  937. hparams.rope_freq_scale = rope_freq_scale;
  938. }
  939. // ref: https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/model.py#L194-L199
  940. const uint32_t n_ff_raw = 2*(4*hparams.n_embd)/3;
  941. const uint32_t n_ff_mult = hparams.f_ffn_mult*n_ff_raw;
  942. const uint32_t n_ff = ((n_ff_mult + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
  943. //const uint32_t n_ff = 28672;
  944. {
  945. LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(file_version));
  946. LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  947. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx);
  948. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  949. LLAMA_LOG_INFO("%s: n_mult = %u\n", __func__, hparams.n_mult);
  950. LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
  951. LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  952. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  953. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
  954. LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  955. LLAMA_LOG_INFO("%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps);
  956. LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, n_ff);
  957. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
  958. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
  959. LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
  960. LLAMA_LOG_INFO("%s: model size = %s\n", __func__, llama_model_type_name(model.type));
  961. }
  962. if (file_version < LLAMA_FILE_VERSION_GGJT_V2) {
  963. if (hparams.ftype != LLAMA_FTYPE_ALL_F32 &&
  964. hparams.ftype != LLAMA_FTYPE_MOSTLY_F16 &&
  965. hparams.ftype != LLAMA_FTYPE_MOSTLY_Q8_0) {
  966. throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1405)"));
  967. }
  968. }
  969. if (file_version < LLAMA_FILE_VERSION_GGJT_V3) {
  970. if (hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
  971. hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_1 ||
  972. hparams.ftype == LLAMA_FTYPE_MOSTLY_Q8_0) {
  973. throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1508)"));
  974. }
  975. }
  976. if (vocab_only) {
  977. return;
  978. }
  979. auto & ctx = model.ctx;
  980. size_t ctx_size;
  981. size_t mmapped_size;
  982. ml->calc_sizes(&ctx_size, &mmapped_size);
  983. LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
  984. // create the ggml context
  985. {
  986. model.buf.resize(ctx_size);
  987. if (use_mlock) {
  988. model.mlock_buf.init (model.buf.addr);
  989. model.mlock_buf.grow_to(model.buf.size);
  990. }
  991. struct ggml_init_params params = {
  992. /*.mem_size =*/ model.buf.size,
  993. /*.mem_buffer =*/ model.buf.addr,
  994. /*.no_alloc =*/ ml->use_mmap,
  995. };
  996. model.ctx = ggml_init(params);
  997. if (!model.ctx) {
  998. throw std::runtime_error(format("ggml_init() failed"));
  999. }
  1000. }
  1001. (void) main_gpu;
  1002. (void) mul_mat_q;
  1003. #if defined(GGML_USE_CUBLAS)
  1004. LLAMA_LOG_INFO("%s: using CUDA for GPU acceleration\n", __func__);
  1005. ggml_cuda_set_main_device(main_gpu);
  1006. ggml_cuda_set_mul_mat_q(mul_mat_q);
  1007. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
  1008. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
  1009. #elif defined(GGML_USE_CLBLAST)
  1010. LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
  1011. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
  1012. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
  1013. #else
  1014. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU
  1015. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_CPU
  1016. #endif
  1017. // prepare memory for the weights
  1018. size_t vram_weights = 0;
  1019. size_t vram_scratch = 0;
  1020. {
  1021. const uint32_t n_embd = hparams.n_embd;
  1022. const uint32_t n_embd_gqa = hparams.n_embd_gqa();
  1023. const uint32_t n_layer = hparams.n_layer;
  1024. const uint32_t n_vocab = hparams.n_vocab;
  1025. ml->ggml_ctx = ctx;
  1026. model.tok_embeddings = ml->get_tensor("tok_embeddings.weight", {n_embd, n_vocab}, GGML_BACKEND_CPU);
  1027. // "output" tensor
  1028. {
  1029. ggml_backend backend_norm;
  1030. ggml_backend backend_output;
  1031. if (n_gpu_layers > int(n_layer)) { // NOLINT
  1032. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  1033. // on Windows however this is detrimental unless everything is on the GPU
  1034. #ifndef _WIN32
  1035. backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
  1036. #else
  1037. backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
  1038. #endif // _WIN32
  1039. backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
  1040. } else {
  1041. backend_norm = GGML_BACKEND_CPU;
  1042. backend_output = GGML_BACKEND_CPU;
  1043. }
  1044. model.norm = ml->get_tensor("norm.weight", {n_embd}, backend_norm);
  1045. model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}, backend_output);
  1046. if (backend_norm == GGML_BACKEND_GPU) {
  1047. vram_weights += ggml_nbytes(model.norm);
  1048. }
  1049. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  1050. vram_weights += ggml_nbytes(model.output);
  1051. }
  1052. }
  1053. const int i_gpu_start = n_layer - n_gpu_layers;
  1054. model.layers.resize(n_layer);
  1055. for (uint32_t i = 0; i < n_layer; ++i) {
  1056. const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
  1057. const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
  1058. auto & layer = model.layers[i];
  1059. std::string layers_i = "layers." + std::to_string(i);
  1060. layer.attention_norm = ml->get_tensor(layers_i + ".attention_norm.weight", {n_embd}, backend);
  1061. layer.wq = ml->get_tensor(layers_i + ".attention.wq.weight", {n_embd, n_embd}, backend_split);
  1062. layer.wk = ml->get_tensor(layers_i + ".attention.wk.weight", {n_embd, n_embd_gqa}, backend_split);
  1063. layer.wv = ml->get_tensor(layers_i + ".attention.wv.weight", {n_embd, n_embd_gqa}, backend_split);
  1064. layer.wo = ml->get_tensor(layers_i + ".attention.wo.weight", {n_embd, n_embd}, backend_split);
  1065. layer.ffn_norm = ml->get_tensor(layers_i + ".ffn_norm.weight", {n_embd}, backend);
  1066. layer.w1 = ml->get_tensor(layers_i + ".feed_forward.w1.weight", {n_embd, n_ff}, backend_split);
  1067. layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend_split);
  1068. layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend_split);
  1069. if (backend == GGML_BACKEND_GPU) {
  1070. vram_weights +=
  1071. ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
  1072. ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
  1073. ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
  1074. }
  1075. }
  1076. }
  1077. ml->done_getting_tensors();
  1078. // print memory requirements
  1079. {
  1080. const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
  1081. // this is the total memory required to run the inference
  1082. size_t mem_required =
  1083. ctx_size +
  1084. mmapped_size - vram_weights; // weights in VRAM not in memory
  1085. #ifndef LLAMA_USE_ALLOCATOR
  1086. mem_required +=
  1087. MEM_REQ_SCRATCH0(hparams.n_ctx).at(model.type) +
  1088. MEM_REQ_SCRATCH1().at(model.type) +
  1089. MEM_REQ_EVAL().at(model.type);
  1090. #endif
  1091. // this is the memory required by one llama_state
  1092. const size_t mem_required_state =
  1093. scale*hparams.kv_size();
  1094. LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
  1095. mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
  1096. (void) vram_scratch;
  1097. (void) n_batch;
  1098. #ifdef GGML_USE_CUBLAS
  1099. if (low_vram) {
  1100. LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
  1101. ggml_cuda_set_scratch_size(0); // disable scratch
  1102. } else {
  1103. const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type);
  1104. const size_t vram_scratch_per_context = VRAM_REQ_SCRATCH_PER_CONTEXT().at(model.type);
  1105. vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context);
  1106. ggml_cuda_set_scratch_size(vram_scratch);
  1107. if (n_gpu_layers > 0) {
  1108. LLAMA_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
  1109. __func__, vram_scratch_base / kB, vram_scratch_per_context,
  1110. (vram_scratch + MB - 1) / MB); // round up
  1111. }
  1112. }
  1113. #endif // GGML_USE_CUBLAS
  1114. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  1115. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  1116. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  1117. if (n_gpu_layers > (int) hparams.n_layer) {
  1118. LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
  1119. }
  1120. size_t vram_kv_cache = 0;
  1121. #ifdef GGML_USE_CUBLAS
  1122. const int max_backend_supported_layers = hparams.n_layer + 3;
  1123. const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
  1124. if (n_gpu_layers > (int) hparams.n_layer + 1) {
  1125. if (low_vram) {
  1126. LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
  1127. } else {
  1128. LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
  1129. vram_kv_cache += hparams.kv_size() / 2;
  1130. }
  1131. }
  1132. if (n_gpu_layers > (int) hparams.n_layer + 2) {
  1133. if (low_vram) {
  1134. LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
  1135. } else {
  1136. LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
  1137. vram_kv_cache += hparams.kv_size() / 2;
  1138. }
  1139. }
  1140. #elif defined(GGML_USE_CLBLAST)
  1141. const int max_backend_supported_layers = hparams.n_layer + 1;
  1142. const int max_offloadable_layers = hparams.n_layer + 1;
  1143. #endif // GGML_USE_CUBLAS
  1144. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n",
  1145. __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  1146. LLAMA_LOG_INFO("%s: total VRAM used: %zu MB\n",
  1147. __func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
  1148. #else
  1149. (void) n_gpu_layers;
  1150. #endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  1151. }
  1152. // populate `tensors_by_name`
  1153. for (llama_load_tensor & lt : ml->tensors_map.tensors) {
  1154. model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor);
  1155. }
  1156. (void) tensor_split;
  1157. #if defined(GGML_USE_CUBLAS)
  1158. {
  1159. ggml_cuda_set_tensor_split(tensor_split);
  1160. }
  1161. #endif
  1162. ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL);
  1163. if (progress_callback) {
  1164. progress_callback(1.0f, progress_callback_user_data);
  1165. }
  1166. model.mapping = std::move(ml->mapping);
  1167. // loading time will be recalculate after the first eval, so
  1168. // we take page faults deferred by mmap() into consideration
  1169. model.t_load_us = ggml_time_us() - model.t_start_us;
  1170. }
  1171. static bool llama_model_load(
  1172. const std::string & fname,
  1173. llama_model & model,
  1174. llama_vocab & vocab,
  1175. int n_ctx,
  1176. int n_batch,
  1177. int n_gqa,
  1178. float rms_norm_eps,
  1179. int n_gpu_layers,
  1180. int main_gpu,
  1181. const float * tensor_split,
  1182. const bool mul_mat_q,
  1183. float rope_freq_base,
  1184. float rope_freq_scale,
  1185. bool low_vram,
  1186. ggml_type memory_type,
  1187. bool use_mmap,
  1188. bool use_mlock,
  1189. bool vocab_only,
  1190. llama_progress_callback progress_callback,
  1191. void *progress_callback_user_data) {
  1192. try {
  1193. llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers,
  1194. main_gpu, tensor_split, mul_mat_q, rope_freq_base, rope_freq_scale, low_vram, memory_type,
  1195. use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
  1196. return true;
  1197. } catch (const std::exception & err) {
  1198. LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
  1199. return false;
  1200. }
  1201. }
  1202. static struct ggml_cgraph * llama_build_graph(
  1203. llama_context & lctx,
  1204. const llama_token * tokens,
  1205. const float * embd,
  1206. int n_tokens,
  1207. int n_past) {
  1208. LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
  1209. const int N = n_tokens;
  1210. const auto & model = lctx.model;
  1211. const auto & hparams = model.hparams;
  1212. const auto & kv_self = lctx.kv_self;
  1213. LLAMA_ASSERT(!!kv_self.ctx);
  1214. const int64_t n_embd = hparams.n_embd;
  1215. const int64_t n_layer = hparams.n_layer;
  1216. const int64_t n_ctx = hparams.n_ctx;
  1217. const int64_t n_head = hparams.n_head;
  1218. const int64_t n_head_kv = hparams.n_head_kv;
  1219. const int64_t n_embd_head = hparams.n_embd_head();
  1220. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  1221. LLAMA_ASSERT(n_embd_head == hparams.n_rot);
  1222. const float freq_base = hparams.rope_freq_base;
  1223. const float freq_scale = hparams.rope_freq_scale;
  1224. const float rms_norm_eps = hparams.f_rms_norm_eps;
  1225. const int n_gpu_layers = model.n_gpu_layers;
  1226. auto & mem_per_token = lctx.mem_per_token;
  1227. auto & buf_compute = lctx.buf_compute;
  1228. struct ggml_init_params params = {
  1229. /*.mem_size =*/ buf_compute.size,
  1230. /*.mem_buffer =*/ buf_compute.addr,
  1231. /*.no_alloc =*/ false,
  1232. };
  1233. #ifdef LLAMA_USE_ALLOCATOR
  1234. params.no_alloc = true;
  1235. #endif
  1236. struct ggml_context * ctx0 = ggml_init(params);
  1237. ggml_cgraph * gf = ggml_new_graph(ctx0);
  1238. struct ggml_tensor * cur;
  1239. struct ggml_tensor * inpL;
  1240. if (tokens) {
  1241. struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
  1242. #ifdef LLAMA_USE_ALLOCATOR
  1243. ggml_allocr_alloc(lctx.alloc, inp_tokens);
  1244. if (!ggml_allocr_is_measure(lctx.alloc)) {
  1245. memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
  1246. }
  1247. #else
  1248. memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
  1249. #endif
  1250. ggml_set_name(inp_tokens, "inp_tokens");
  1251. inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
  1252. } else {
  1253. #ifdef GGML_USE_MPI
  1254. GGML_ASSERT(false && "not implemented");
  1255. #endif
  1256. inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
  1257. #ifdef LLAMA_USE_ALLOCATOR
  1258. ggml_allocr_alloc(lctx.alloc, inpL);
  1259. if (!ggml_allocr_is_measure(lctx.alloc)) {
  1260. memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
  1261. }
  1262. #else
  1263. memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
  1264. #endif
  1265. }
  1266. const int i_gpu_start = n_layer - n_gpu_layers;
  1267. (void) i_gpu_start;
  1268. // offload functions set the tensor output backend to GPU
  1269. // tensors are GPU-accelerated if any input or the output has been offloaded
  1270. //
  1271. // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
  1272. // in that case ggml_cuda_assign_buffers has no effect
  1273. offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
  1274. offload_func_t offload_func_kq = llama_nop;
  1275. offload_func_t offload_func_v = llama_nop;
  1276. #ifdef GGML_USE_CUBLAS
  1277. if (n_gpu_layers > n_layer) {
  1278. offload_func_nr = ggml_cuda_assign_buffers;
  1279. }
  1280. if (n_gpu_layers > n_layer + 1) {
  1281. offload_func_v = ggml_cuda_assign_buffers;
  1282. }
  1283. if (n_gpu_layers > n_layer + 2) {
  1284. offload_func_kq = ggml_cuda_assign_buffers;
  1285. }
  1286. #endif // GGML_USE_CUBLAS
  1287. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  1288. #ifdef LLAMA_USE_ALLOCATOR
  1289. ggml_allocr_alloc(lctx.alloc, KQ_scale);
  1290. if (!ggml_allocr_is_measure(lctx.alloc)) {
  1291. ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
  1292. }
  1293. #else
  1294. ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
  1295. #endif
  1296. ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
  1297. for (int il = 0; il < n_layer; ++il) {
  1298. ggml_format_name(inpL, "layer_inp_%d", il);
  1299. offload_func_t offload_func = llama_nop;
  1300. #ifdef GGML_USE_CUBLAS
  1301. if (il >= i_gpu_start) {
  1302. offload_func = ggml_cuda_assign_buffers;
  1303. }
  1304. #endif // GGML_USE_CUBLAS
  1305. struct ggml_tensor * inpSA = inpL;
  1306. lctx.use_buf(ctx0, 0);
  1307. // norm
  1308. {
  1309. cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  1310. offload_func(cur);
  1311. ggml_set_name(cur, "rms_norm_0");
  1312. // cur = cur*attention_norm(broadcasted)
  1313. cur = ggml_mul(ctx0, cur, model.layers[il].attention_norm);
  1314. offload_func(cur);
  1315. ggml_set_name(cur, "attention_norm_0");
  1316. }
  1317. // self-attention
  1318. {
  1319. // compute Q and K and RoPE them
  1320. struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  1321. offload_func_kq(tmpk);
  1322. ggml_set_name(tmpk, "tmpk");
  1323. struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  1324. offload_func_kq(tmpq);
  1325. ggml_set_name(tmpq, "tmpq");
  1326. struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
  1327. offload_func_kq(Kcur);
  1328. ggml_set_name(Kcur, "Kcur");
  1329. struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
  1330. offload_func_kq(Qcur);
  1331. ggml_set_name(Qcur, "Qcur");
  1332. // store key and value to memory
  1333. {
  1334. // compute the transposed [N, n_embd] V matrix
  1335. struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  1336. offload_func_v(tmpv);
  1337. ggml_set_name(tmpv, "tmpv");
  1338. struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, N));
  1339. offload_func_v(Vcur);
  1340. ggml_set_name(Vcur, "Vcur");
  1341. struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
  1342. offload_func_kq(k);
  1343. ggml_set_name(k, "k");
  1344. struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
  1345. ( n_ctx)*ggml_element_size(kv_self.v),
  1346. (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
  1347. offload_func_v(v);
  1348. ggml_set_name(v, "v");
  1349. // important: storing RoPE-ed version of K in the KV cache!
  1350. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
  1351. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
  1352. }
  1353. struct ggml_tensor * Q =
  1354. ggml_permute(ctx0,
  1355. Qcur,
  1356. 0, 2, 1, 3);
  1357. offload_func_kq(Q);
  1358. ggml_set_name(Q, "Q");
  1359. struct ggml_tensor * K =
  1360. ggml_view_3d(ctx0, kv_self.k,
  1361. n_embd_head, n_past + N, n_head_kv,
  1362. ggml_element_size(kv_self.k)*n_embd_gqa,
  1363. ggml_element_size(kv_self.k)*n_embd_head,
  1364. ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
  1365. offload_func_kq(K);
  1366. ggml_set_name(K, "K");
  1367. // K * Q
  1368. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  1369. offload_func_kq(KQ);
  1370. ggml_set_name(KQ, "KQ");
  1371. // KQ_scaled = KQ / sqrt(n_embd_head)
  1372. // KQ_scaled shape [n_past + N, N, n_head, 1]
  1373. struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
  1374. offload_func_kq(KQ_scaled);
  1375. ggml_set_name(KQ_scaled, "KQ_scaled");
  1376. // KQ_masked = mask_past(KQ_scaled)
  1377. struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
  1378. offload_func_kq(KQ_masked);
  1379. ggml_set_name(KQ_masked, "KQ_masked");
  1380. // KQ = soft_max(KQ_masked)
  1381. struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
  1382. offload_func_v(KQ_soft_max);
  1383. ggml_set_name(KQ_soft_max, "KQ_soft_max");
  1384. // split cached V into n_head heads
  1385. struct ggml_tensor * V =
  1386. ggml_view_3d(ctx0, kv_self.v,
  1387. n_past + N, n_embd_head, n_head_kv,
  1388. ggml_element_size(kv_self.v)*n_ctx,
  1389. ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
  1390. ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
  1391. offload_func_v(V);
  1392. ggml_set_name(V, "V");
  1393. #if 1
  1394. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
  1395. offload_func_v(KQV);
  1396. ggml_set_name(KQV, "KQV");
  1397. #else
  1398. // make V contiguous in memory to speed up the matmul, however we waste time on the copy
  1399. // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation
  1400. // is there a better way?
  1401. struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd_head, n_head));
  1402. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max);
  1403. #endif
  1404. // KQV_merged = KQV.permute(0, 2, 1, 3)
  1405. struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  1406. offload_func_v(KQV_merged);
  1407. ggml_set_name(KQV_merged, "KQV_merged");
  1408. // cur = KQV_merged.contiguous().view(n_embd, N)
  1409. cur = ggml_cpy(ctx0,
  1410. KQV_merged,
  1411. ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
  1412. offload_func_v(cur);
  1413. ggml_set_name(cur, "KQV_merged_contiguous");
  1414. // projection (no bias)
  1415. cur = ggml_mul_mat(ctx0,
  1416. model.layers[il].wo,
  1417. cur);
  1418. offload_func(cur);
  1419. ggml_set_name(cur, "result_wo");
  1420. }
  1421. lctx.use_buf(ctx0, 1);
  1422. struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
  1423. offload_func(inpFF);
  1424. ggml_set_name(inpFF, "inpFF");
  1425. // feed-forward network
  1426. {
  1427. // norm
  1428. {
  1429. cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps);
  1430. offload_func(cur);
  1431. ggml_set_name(cur, "rms_norm_1");
  1432. // cur = cur*ffn_norm(broadcasted)
  1433. cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm);
  1434. offload_func(cur);
  1435. ggml_set_name(cur, "ffn_norm");
  1436. }
  1437. struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
  1438. model.layers[il].w3,
  1439. cur);
  1440. offload_func(tmp);
  1441. ggml_set_name(tmp, "result_w3");
  1442. cur = ggml_mul_mat(ctx0,
  1443. model.layers[il].w1,
  1444. cur);
  1445. offload_func(cur);
  1446. ggml_set_name(cur, "result_w1");
  1447. // SILU activation
  1448. cur = ggml_silu(ctx0, cur);
  1449. offload_func(cur);
  1450. ggml_set_name(cur, "silu");
  1451. cur = ggml_mul(ctx0, cur, tmp);
  1452. offload_func(cur);
  1453. ggml_set_name(cur, "silu_x_result_w3");
  1454. cur = ggml_mul_mat(ctx0,
  1455. model.layers[il].w2,
  1456. cur);
  1457. offload_func(cur);
  1458. ggml_set_name(cur, "result_w2");
  1459. }
  1460. cur = ggml_add(ctx0, cur, inpFF);
  1461. offload_func(cur);
  1462. ggml_set_name(cur, "inpFF_+_result_w2");
  1463. // input for next layer
  1464. inpL = cur;
  1465. }
  1466. lctx.use_buf(ctx0, 0);
  1467. // norm
  1468. {
  1469. cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  1470. offload_func_nr(cur);
  1471. ggml_set_name(cur, "rms_norm_2");
  1472. // cur = cur*norm(broadcasted)
  1473. cur = ggml_mul(ctx0, cur, model.norm);
  1474. // offload_func_nr(cur); // TODO CPU + GPU mirrored backend
  1475. ggml_set_name(cur, "result_norm");
  1476. }
  1477. // lm_head
  1478. cur = ggml_mul_mat(ctx0, model.output, cur);
  1479. ggml_set_name(cur, "result_output");
  1480. lctx.use_buf(ctx0, -1);
  1481. // logits -> probs
  1482. //cur = ggml_soft_max_inplace(ctx0, cur);
  1483. ggml_build_forward_expand(gf, cur);
  1484. if (mem_per_token == 0) {
  1485. mem_per_token = ggml_used_mem(ctx0)/N;
  1486. }
  1487. #if 0
  1488. LLAMA_LOG_INFO("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
  1489. ggml_used_mem(ctx0)/1024.0/1024.0,
  1490. lctx.get_buf_max_mem(0)/1024.0/1024.0,
  1491. lctx.get_buf_max_mem(1)/1024.0/1024.0,
  1492. lctx.work_buffer.size()/1024.0/1024.0,
  1493. n_past, N);
  1494. #endif
  1495. ggml_free(ctx0);
  1496. return gf;
  1497. }
  1498. // evaluate the transformer
  1499. //
  1500. // - lctx: llama context
  1501. // - tokens: new batch of tokens to process
  1502. // - embd embeddings input
  1503. // - n_tokens number of tokens
  1504. // - n_past: the context size so far
  1505. // - n_threads: number of threads to use
  1506. //
  1507. static bool llama_eval_internal(
  1508. llama_context & lctx,
  1509. const llama_token * tokens,
  1510. const float * embd,
  1511. int n_tokens,
  1512. int n_past,
  1513. int n_threads,
  1514. const char * cgraph_fname) {
  1515. LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
  1516. LLAMA_ASSERT(n_tokens > 0);
  1517. LLAMA_ASSERT(n_past >= 0);
  1518. LLAMA_ASSERT(n_threads > 0);
  1519. // TODO: keep the values of n_batch and n_ctx
  1520. // LLAMA_ASSERT(n_tokens <= n_batch);
  1521. // LLAMA_ASSERT(n_past + n_tokens <= n_ctx);
  1522. const int64_t t_start_us = ggml_time_us();
  1523. #ifdef GGML_USE_MPI
  1524. ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
  1525. #endif
  1526. const int N = n_tokens;
  1527. const auto & model = lctx.model;
  1528. const auto & hparams = model.hparams;
  1529. const auto & kv_self = lctx.kv_self;
  1530. LLAMA_ASSERT(!!kv_self.ctx);
  1531. const int64_t n_embd = hparams.n_embd;
  1532. const int64_t n_vocab = hparams.n_vocab;
  1533. #ifdef LLAMA_USE_ALLOCATOR
  1534. ggml_allocr_reset(lctx.alloc);
  1535. #endif
  1536. ggml_cgraph * gf = llama_build_graph(lctx, tokens, embd, n_tokens, n_past);
  1537. #ifdef LLAMA_USE_ALLOCATOR
  1538. ggml_allocr_alloc_graph(lctx.alloc, gf);
  1539. #endif
  1540. // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
  1541. // for big prompts, if BLAS is enabled, it is better to use only one thread
  1542. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  1543. n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
  1544. struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
  1545. struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
  1546. LLAMA_ASSERT(strcmp(res->name, "result_output") == 0);
  1547. LLAMA_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
  1548. #if GGML_USE_MPI
  1549. const int64_t n_layer = hparams.n_layer;
  1550. ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
  1551. #endif
  1552. #ifdef GGML_USE_METAL
  1553. if (lctx.ctx_metal) {
  1554. ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
  1555. ggml_metal_graph_compute(lctx.ctx_metal, gf);
  1556. ggml_metal_get_tensor (lctx.ctx_metal, res);
  1557. if (!lctx.embedding.empty()) {
  1558. ggml_metal_get_tensor(lctx.ctx_metal, embeddings);
  1559. }
  1560. } else {
  1561. ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
  1562. }
  1563. #else
  1564. ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
  1565. #endif
  1566. #if GGML_USE_MPI
  1567. ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
  1568. #endif
  1569. // update kv token count
  1570. lctx.kv_self.n = n_past + N;
  1571. if (cgraph_fname) {
  1572. ggml_graph_export(gf, cgraph_fname);
  1573. }
  1574. #ifdef GGML_PERF
  1575. // print timing information per ggml operation (for debugging purposes)
  1576. // requires GGML_PERF to be defined
  1577. ggml_graph_print(gf);
  1578. #endif
  1579. // plot the computation graph in dot format (for debugging purposes)
  1580. //if (n_past%100 == 0) {
  1581. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  1582. //}
  1583. // extract logits
  1584. {
  1585. auto & logits_out = lctx.logits;
  1586. if (lctx.logits_all) {
  1587. logits_out.resize(n_vocab * N);
  1588. memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*N);
  1589. } else {
  1590. // return result for just the last token
  1591. logits_out.resize(n_vocab);
  1592. memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
  1593. }
  1594. }
  1595. // extract embeddings
  1596. if (!lctx.embedding.empty()) {
  1597. auto & embedding_out = lctx.embedding;
  1598. embedding_out.resize(n_embd);
  1599. memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
  1600. }
  1601. // measure the performance only for the single-token evals
  1602. if (N == 1) {
  1603. lctx.t_eval_us += ggml_time_us() - t_start_us;
  1604. lctx.n_eval++;
  1605. }
  1606. else if (N > 1) {
  1607. lctx.t_p_eval_us += ggml_time_us() - t_start_us;
  1608. lctx.n_p_eval += N;
  1609. }
  1610. return true;
  1611. }
  1612. //
  1613. // tokenizer
  1614. //
  1615. static size_t utf8_len(char src) {
  1616. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  1617. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  1618. return lookup[highbits];
  1619. }
  1620. struct llama_sp_symbol {
  1621. using index = int;
  1622. index prev;
  1623. index next;
  1624. const char * text;
  1625. size_t n;
  1626. };
  1627. static_assert(std::is_trivially_copyable<llama_sp_symbol>::value, "llama_sp_symbol is not trivially copyable");
  1628. struct llama_sp_bigram {
  1629. struct comparator {
  1630. bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) {
  1631. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  1632. }
  1633. };
  1634. using queue_storage = std::vector<llama_sp_bigram>;
  1635. using queue = std::priority_queue<llama_sp_bigram, queue_storage, comparator>;
  1636. llama_sp_symbol::index left;
  1637. llama_sp_symbol::index right;
  1638. float score;
  1639. size_t size;
  1640. };
  1641. // original implementation:
  1642. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  1643. struct llama_tokenizer {
  1644. llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {}
  1645. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  1646. // split string into utf8 chars
  1647. int index = 0;
  1648. size_t offs = 0;
  1649. while (offs < text.size()) {
  1650. llama_sp_symbol sym;
  1651. size_t char_len = std::min(text.size() - offs, utf8_len(text[offs]));
  1652. sym.text = text.c_str() + offs;
  1653. sym.n = char_len;
  1654. offs += char_len;
  1655. sym.prev = index - 1;
  1656. sym.next = offs == text.size() ? -1 : index + 1;
  1657. index++;
  1658. symbols_.emplace_back(sym);
  1659. }
  1660. // seed the work queue with all possible 2-character tokens.
  1661. for (size_t i = 1; i < symbols_.size(); ++i) {
  1662. try_add_bigram(i - 1, i);
  1663. }
  1664. // keep substituting the highest frequency pairs for as long as we can.
  1665. while (!work_queue_.empty()) {
  1666. auto bigram = work_queue_.top();
  1667. work_queue_.pop();
  1668. auto & left_sym = symbols_[bigram.left];
  1669. auto & right_sym = symbols_[bigram.right];
  1670. // if one of the symbols already got merged, skip it.
  1671. if (left_sym.n == 0 || right_sym.n == 0 ||
  1672. left_sym.n + right_sym.n != bigram.size) {
  1673. continue;
  1674. }
  1675. // merge the right sym into the left one
  1676. left_sym.n += right_sym.n;
  1677. right_sym.n = 0;
  1678. //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  1679. // remove the right sym from the chain
  1680. left_sym.next = right_sym.next;
  1681. if (right_sym.next >= 0) {
  1682. symbols_[right_sym.next].prev = bigram.left;
  1683. }
  1684. // find more substitutions
  1685. try_add_bigram(left_sym.prev, bigram.left);
  1686. try_add_bigram(bigram.left, left_sym.next);
  1687. }
  1688. for (int i = 0; i != -1; i = symbols_[i].next) {
  1689. auto & symbol = symbols_[i];
  1690. auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n));
  1691. if (token == vocab_.token_to_id.end()) {
  1692. // output any symbols that did not form tokens as bytes.
  1693. for (int j = 0; j < (int) symbol.n; ++j) {
  1694. // NOTE: old version, before #2420 - not sure what are the implications of this
  1695. //llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
  1696. llama_vocab::id token_id = vocab_.token_to_id.at(std::string(1, symbol.text[j]));
  1697. output.push_back(token_id);
  1698. }
  1699. } else {
  1700. output.push_back((*token).second);
  1701. }
  1702. }
  1703. }
  1704. private:
  1705. void try_add_bigram(int left, int right) {
  1706. if (left == -1 || right == -1) {
  1707. return;
  1708. }
  1709. const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n);
  1710. auto token = vocab_.token_to_id.find(text);
  1711. if (token == vocab_.token_to_id.end()) {
  1712. return;
  1713. }
  1714. if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) {
  1715. return;
  1716. }
  1717. const auto &tok_score = vocab_.id_to_token[(*token).second];
  1718. llama_sp_bigram bigram;
  1719. bigram.left = left;
  1720. bigram.right = right;
  1721. bigram.score = tok_score.score;
  1722. bigram.size = text.size();
  1723. work_queue_.push(bigram);
  1724. }
  1725. const llama_vocab & vocab_;
  1726. std::vector<llama_sp_symbol> symbols_;
  1727. llama_sp_bigram::queue work_queue_;
  1728. };
  1729. static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) {
  1730. llama_tokenizer tokenizer(vocab);
  1731. std::vector<llama_vocab::id> output;
  1732. if (text.empty()) {
  1733. return output;
  1734. }
  1735. if (bos) {
  1736. output.push_back(llama_token_bos());
  1737. }
  1738. tokenizer.tokenize(text, output);
  1739. return output;
  1740. }
  1741. //
  1742. // grammar - internal
  1743. //
  1744. struct llama_partial_utf8 {
  1745. uint32_t value; // bit value so far (unshifted)
  1746. int n_remain; // num bytes remaining; -1 indicates invalid sequence
  1747. };
  1748. struct llama_grammar {
  1749. const std::vector<std::vector<llama_grammar_element>> rules;
  1750. std::vector<std::vector<const llama_grammar_element *>> stacks;
  1751. // buffer for partially generated UTF-8 sequence from accepted tokens
  1752. llama_partial_utf8 partial_utf8;
  1753. };
  1754. struct llama_grammar_candidate {
  1755. size_t index;
  1756. const uint32_t * code_points;
  1757. llama_partial_utf8 partial_utf8;
  1758. };
  1759. // Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
  1760. // pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
  1761. std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
  1762. const char * src,
  1763. llama_partial_utf8 partial_start) {
  1764. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
  1765. const char * pos = src;
  1766. std::vector<uint32_t> code_points;
  1767. uint32_t value = partial_start.value;
  1768. int n_remain = partial_start.n_remain;
  1769. // continue previous decode, if applicable
  1770. while (*pos != 0 && n_remain > 0) {
  1771. uint8_t next_byte = static_cast<uint8_t>(*pos);
  1772. if ((next_byte >> 6) != 2) {
  1773. // invalid sequence, abort
  1774. code_points.push_back(0);
  1775. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, -1 });
  1776. }
  1777. value = (value << 6) + (next_byte & 0x3F);
  1778. ++pos;
  1779. --n_remain;
  1780. }
  1781. if (partial_start.n_remain > 0 && n_remain == 0) {
  1782. code_points.push_back(value);
  1783. }
  1784. // decode any subsequent utf-8 sequences, which may end in an incomplete one
  1785. while (*pos != 0) {
  1786. uint8_t first_byte = static_cast<uint8_t>(*pos);
  1787. uint8_t highbits = first_byte >> 4;
  1788. n_remain = lookup[highbits] - 1;
  1789. if (n_remain < 0) {
  1790. // invalid sequence, abort
  1791. code_points.clear();
  1792. code_points.push_back(0);
  1793. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, n_remain });
  1794. }
  1795. uint8_t mask = (1 << (7 - n_remain)) - 1;
  1796. value = first_byte & mask;
  1797. ++pos;
  1798. while (*pos != 0 && n_remain > 0) {
  1799. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  1800. ++pos;
  1801. --n_remain;
  1802. }
  1803. if (n_remain == 0) {
  1804. code_points.push_back(value);
  1805. }
  1806. }
  1807. code_points.push_back(0);
  1808. return std::make_pair(std::move(code_points), llama_partial_utf8{ value, n_remain });
  1809. }
  1810. // returns true iff pos points to the end of one of the definitions of a rule
  1811. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  1812. switch (pos->type) {
  1813. case LLAMA_GRETYPE_END: return true;
  1814. case LLAMA_GRETYPE_ALT: return true;
  1815. default: return false;
  1816. }
  1817. }
  1818. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  1819. // asserts that pos is pointing to a char range element
  1820. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  1821. const llama_grammar_element * pos,
  1822. const uint32_t chr) {
  1823. bool found = false;
  1824. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  1825. LLAMA_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  1826. do {
  1827. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  1828. // inclusive range, e.g. [a-z]
  1829. found = found || (pos->value <= chr && chr <= pos[1].value);
  1830. pos += 2;
  1831. } else {
  1832. // exact char match, e.g. [a] or "a"
  1833. found = found || pos->value == chr;
  1834. pos += 1;
  1835. }
  1836. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  1837. return std::make_pair(found == is_positive_char, pos);
  1838. }
  1839. // returns true iff some continuation of the given partial UTF-8 sequence could satisfy the char
  1840. // range at pos (regular or inverse range)
  1841. // asserts that pos is pointing to a char range element
  1842. static bool llama_grammar_match_partial_char(
  1843. const llama_grammar_element * pos,
  1844. const llama_partial_utf8 partial_utf8) {
  1845. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  1846. LLAMA_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  1847. uint32_t partial_value = partial_utf8.value;
  1848. int n_remain = partial_utf8.n_remain;
  1849. // invalid sequence or 7-bit char split across 2 bytes (overlong)
  1850. if (n_remain < 0 || (n_remain == 1 && partial_value < 2)) {
  1851. return false;
  1852. }
  1853. // range of possible code points this partial UTF-8 sequence could complete to
  1854. uint32_t low = partial_value << (n_remain * 6);
  1855. uint32_t high = low | ((1 << (n_remain * 6)) - 1);
  1856. if (low == 0) {
  1857. if (n_remain == 2) {
  1858. low = 1 << 11;
  1859. } else if (n_remain == 3) {
  1860. low = 1 << 16;
  1861. }
  1862. }
  1863. do {
  1864. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  1865. // inclusive range, e.g. [a-z]
  1866. if (pos->value <= high && low <= pos[1].value) {
  1867. return is_positive_char;
  1868. }
  1869. pos += 2;
  1870. } else {
  1871. // exact char match, e.g. [a] or "a"
  1872. if (low <= pos->value && pos->value <= high) {
  1873. return is_positive_char;
  1874. }
  1875. pos += 1;
  1876. }
  1877. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  1878. return !is_positive_char;
  1879. }
  1880. // transforms a grammar pushdown stack into N possible stacks, all ending
  1881. // at a character range (terminal element)
  1882. static void llama_grammar_advance_stack(
  1883. const std::vector<std::vector<llama_grammar_element>> & rules,
  1884. const std::vector<const llama_grammar_element *> & stack,
  1885. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  1886. if (stack.empty()) {
  1887. new_stacks.push_back(stack);
  1888. return;
  1889. }
  1890. const llama_grammar_element * pos = stack.back();
  1891. switch (pos->type) {
  1892. case LLAMA_GRETYPE_RULE_REF: {
  1893. const size_t rule_id = static_cast<size_t>(pos->value);
  1894. const llama_grammar_element * subpos = rules[rule_id].data();
  1895. do {
  1896. // init new stack without the top (pos)
  1897. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  1898. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  1899. // if this rule ref is followed by another element, add that to stack
  1900. new_stack.push_back(pos + 1);
  1901. }
  1902. if (!llama_grammar_is_end_of_sequence(subpos)) {
  1903. // if alternate is nonempty, add to stack
  1904. new_stack.push_back(subpos);
  1905. }
  1906. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  1907. while (!llama_grammar_is_end_of_sequence(subpos)) {
  1908. // scan to end of alternate def
  1909. subpos++;
  1910. }
  1911. if (subpos->type == LLAMA_GRETYPE_ALT) {
  1912. // there's another alternate def of this rule to process
  1913. subpos++;
  1914. } else {
  1915. break;
  1916. }
  1917. } while (true);
  1918. break;
  1919. }
  1920. case LLAMA_GRETYPE_CHAR:
  1921. case LLAMA_GRETYPE_CHAR_NOT:
  1922. new_stacks.push_back(stack);
  1923. break;
  1924. default:
  1925. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  1926. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  1927. // those
  1928. LLAMA_ASSERT(false);
  1929. }
  1930. }
  1931. // takes a set of possible pushdown stacks on a grammar, which are required to
  1932. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  1933. // produces the N possible stacks if the given char is accepted at those
  1934. // positions
  1935. static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
  1936. const std::vector<std::vector<llama_grammar_element>> & rules,
  1937. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  1938. const uint32_t chr) {
  1939. std::vector<std::vector<const llama_grammar_element *>> new_stacks;
  1940. for (const auto & stack : stacks) {
  1941. if (stack.empty()) {
  1942. continue;
  1943. }
  1944. auto match = llama_grammar_match_char(stack.back(), chr);
  1945. if (match.first) {
  1946. const llama_grammar_element * pos = match.second;
  1947. // update top of stack to next element, if any
  1948. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  1949. if (!llama_grammar_is_end_of_sequence(pos)) {
  1950. new_stack.push_back(pos);
  1951. }
  1952. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  1953. }
  1954. }
  1955. return new_stacks;
  1956. }
  1957. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  1958. const std::vector<std::vector<llama_grammar_element>> & rules,
  1959. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  1960. const std::vector<llama_grammar_candidate> & candidates);
  1961. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  1962. const std::vector<std::vector<llama_grammar_element>> & rules,
  1963. const std::vector<const llama_grammar_element *> & stack,
  1964. const std::vector<llama_grammar_candidate> & candidates) {
  1965. std::vector<llama_grammar_candidate> rejects;
  1966. if (stack.empty()) {
  1967. for (auto tok : candidates) {
  1968. if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
  1969. rejects.push_back(tok);
  1970. }
  1971. }
  1972. return rejects;
  1973. }
  1974. const llama_grammar_element * stack_pos = stack.back();
  1975. std::vector<llama_grammar_candidate> next_candidates;
  1976. for (auto tok : candidates) {
  1977. if (*tok.code_points == 0) {
  1978. // reached end of full codepoints in token, reject iff it ended in a partial sequence
  1979. // that cannot satisfy this position in grammar
  1980. if (tok.partial_utf8.n_remain != 0 &&
  1981. !llama_grammar_match_partial_char(stack_pos, tok.partial_utf8)) {
  1982. rejects.push_back(tok);
  1983. }
  1984. } else if (llama_grammar_match_char(stack_pos, *tok.code_points).first) {
  1985. next_candidates.push_back({ tok.index, tok.code_points + 1, tok.partial_utf8 });
  1986. } else {
  1987. rejects.push_back(tok);
  1988. }
  1989. }
  1990. auto stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  1991. // update top of stack to next element, if any
  1992. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  1993. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  1994. stack_after.push_back(stack_pos_after);
  1995. }
  1996. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  1997. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  1998. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  1999. for (auto tok : next_rejects) {
  2000. rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
  2001. }
  2002. return rejects;
  2003. }
  2004. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  2005. const std::vector<std::vector<llama_grammar_element>> & rules,
  2006. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  2007. const std::vector<llama_grammar_candidate> & candidates) {
  2008. LLAMA_ASSERT(!stacks.empty()); // REVIEW
  2009. if (candidates.empty()) {
  2010. return std::vector<llama_grammar_candidate>();
  2011. }
  2012. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  2013. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  2014. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  2015. }
  2016. return rejects;
  2017. }
  2018. //
  2019. // grammar - external
  2020. //
  2021. struct llama_grammar * llama_grammar_init(
  2022. const llama_grammar_element ** rules,
  2023. size_t n_rules,
  2024. size_t start_rule_index) {
  2025. const llama_grammar_element * pos;
  2026. // copy rule definitions into vectors
  2027. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  2028. for (size_t i = 0; i < n_rules; i++) {
  2029. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  2030. vec_rules[i].push_back(*pos);
  2031. }
  2032. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  2033. }
  2034. // loop over alternates of start rule to build initial stacks
  2035. std::vector<std::vector<const llama_grammar_element *>> stacks;
  2036. pos = rules[start_rule_index];
  2037. do {
  2038. std::vector<const llama_grammar_element *> stack;
  2039. if (!llama_grammar_is_end_of_sequence(pos)) {
  2040. // if alternate is nonempty, add to stack
  2041. stack.push_back(pos);
  2042. }
  2043. llama_grammar_advance_stack(vec_rules, stack, stacks);
  2044. while (!llama_grammar_is_end_of_sequence(pos)) {
  2045. // scan to end of alternate def
  2046. pos++;
  2047. }
  2048. if (pos->type == LLAMA_GRETYPE_ALT) {
  2049. // there's another alternate def of this rule to process
  2050. pos++;
  2051. } else {
  2052. break;
  2053. }
  2054. } while (true);
  2055. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} };
  2056. }
  2057. void llama_grammar_free(struct llama_grammar * grammar) {
  2058. delete grammar;
  2059. }
  2060. //
  2061. // sampling
  2062. //
  2063. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  2064. assert(candidates->size > 0);
  2065. const int64_t t_start_sample_us = ggml_time_us();
  2066. // Sort the logits in descending order
  2067. if (!candidates->sorted) {
  2068. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  2069. return a.logit > b.logit;
  2070. });
  2071. candidates->sorted = true;
  2072. }
  2073. float max_l = candidates->data[0].logit;
  2074. float cum_sum = 0.0f;
  2075. for (size_t i = 0; i < candidates->size; ++i) {
  2076. float p = expf(candidates->data[i].logit - max_l);
  2077. candidates->data[i].p = p;
  2078. cum_sum += p;
  2079. }
  2080. for (size_t i = 0; i < candidates->size; ++i) {
  2081. candidates->data[i].p /= cum_sum;
  2082. }
  2083. if (ctx) {
  2084. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2085. }
  2086. }
  2087. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep) {
  2088. const int64_t t_start_sample_us = ggml_time_us();
  2089. k = std::max(k, (int) min_keep);
  2090. k = std::min(k, (int) candidates->size);
  2091. // Sort scores in descending order
  2092. if (!candidates->sorted) {
  2093. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  2094. return a.logit > b.logit;
  2095. };
  2096. if (k == (int) candidates->size) {
  2097. std::sort(candidates->data, candidates->data + candidates->size, comp);
  2098. } else {
  2099. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  2100. }
  2101. candidates->sorted = true;
  2102. }
  2103. candidates->size = k;
  2104. if (ctx) {
  2105. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2106. }
  2107. }
  2108. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  2109. if (p >= 1.0f) {
  2110. return;
  2111. }
  2112. llama_sample_softmax(ctx, candidates);
  2113. const int64_t t_start_sample_us = ggml_time_us();
  2114. // Compute the cumulative probabilities
  2115. float cum_sum = 0.0f;
  2116. size_t last_idx = candidates->size;
  2117. for (size_t i = 0; i < candidates->size; ++i) {
  2118. cum_sum += candidates->data[i].p;
  2119. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  2120. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  2121. if (cum_sum >= p && i + 1 >= min_keep) {
  2122. last_idx = i + 1;
  2123. break;
  2124. }
  2125. }
  2126. // Resize the output vector to keep only the top-p tokens
  2127. candidates->size = last_idx;
  2128. if (ctx) {
  2129. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2130. }
  2131. }
  2132. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  2133. if (z >= 1.0f || candidates->size <= 2) {
  2134. return;
  2135. }
  2136. llama_sample_softmax(nullptr, candidates);
  2137. const int64_t t_start_sample_us = ggml_time_us();
  2138. // Compute the first and second derivatives
  2139. std::vector<float> first_derivatives(candidates->size - 1);
  2140. std::vector<float> second_derivatives(candidates->size - 2);
  2141. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  2142. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  2143. }
  2144. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  2145. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  2146. }
  2147. // Calculate absolute value of second derivatives
  2148. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  2149. second_derivatives[i] = abs(second_derivatives[i]);
  2150. }
  2151. // Normalize the second derivatives
  2152. {
  2153. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  2154. if (second_derivatives_sum > 1e-6f) {
  2155. for (float & value : second_derivatives) {
  2156. value /= second_derivatives_sum;
  2157. }
  2158. } else {
  2159. for (float & value : second_derivatives) {
  2160. value = 1.0f / second_derivatives.size();
  2161. }
  2162. }
  2163. }
  2164. float cum_sum = 0.0f;
  2165. size_t last_idx = candidates->size;
  2166. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  2167. cum_sum += second_derivatives[i];
  2168. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  2169. if (cum_sum > z && i >= min_keep) {
  2170. last_idx = i;
  2171. break;
  2172. }
  2173. }
  2174. // Resize the output vector to keep only the tokens above the tail location
  2175. candidates->size = last_idx;
  2176. if (ctx) {
  2177. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2178. }
  2179. }
  2180. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  2181. // Reference implementation:
  2182. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  2183. if (p >= 1.0f) {
  2184. return;
  2185. }
  2186. // Compute the softmax of logits and calculate entropy
  2187. llama_sample_softmax(nullptr, candidates);
  2188. const int64_t t_start_sample_us = ggml_time_us();
  2189. float entropy = 0.0f;
  2190. for (size_t i = 0; i < candidates->size; ++i) {
  2191. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  2192. }
  2193. // Compute the absolute difference between negative log probability and entropy for each candidate
  2194. std::vector<float> shifted_scores;
  2195. for (size_t i = 0; i < candidates->size; ++i) {
  2196. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  2197. shifted_scores.push_back(shifted_score);
  2198. }
  2199. // Sort tokens based on the shifted_scores and their corresponding indices
  2200. std::vector<size_t> indices(candidates->size);
  2201. std::iota(indices.begin(), indices.end(), 0);
  2202. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  2203. return shifted_scores[a] < shifted_scores[b];
  2204. });
  2205. // Compute the cumulative probabilities
  2206. float cum_sum = 0.0f;
  2207. size_t last_idx = indices.size();
  2208. for (size_t i = 0; i < indices.size(); ++i) {
  2209. size_t idx = indices[i];
  2210. cum_sum += candidates->data[idx].p;
  2211. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  2212. if (cum_sum > p && i >= min_keep - 1) {
  2213. last_idx = i + 1;
  2214. break;
  2215. }
  2216. }
  2217. // Resize the output vector to keep only the locally typical tokens
  2218. std::vector<llama_token_data> new_candidates;
  2219. for (size_t i = 0; i < last_idx; ++i) {
  2220. size_t idx = indices[i];
  2221. new_candidates.push_back(candidates->data[idx]);
  2222. }
  2223. // Replace the data in candidates with the new_candidates data
  2224. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  2225. candidates->size = new_candidates.size();
  2226. if (ctx) {
  2227. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2228. }
  2229. }
  2230. void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  2231. const int64_t t_start_sample_us = ggml_time_us();
  2232. for (size_t i = 0; i < candidates_p->size; ++i) {
  2233. candidates_p->data[i].logit /= temp;
  2234. }
  2235. if (ctx) {
  2236. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2237. }
  2238. }
  2239. void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty) {
  2240. if (last_tokens_size == 0 || penalty == 1.0f) {
  2241. return;
  2242. }
  2243. const int64_t t_start_sample_us = ggml_time_us();
  2244. for (size_t i = 0; i < candidates->size; ++i) {
  2245. const auto * token_iter = std::find(last_tokens, last_tokens + last_tokens_size, candidates->data[i].id);
  2246. if (token_iter == last_tokens + last_tokens_size) {
  2247. continue;
  2248. }
  2249. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  2250. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  2251. if (candidates->data[i].logit <= 0) {
  2252. candidates->data[i].logit *= penalty;
  2253. } else {
  2254. candidates->data[i].logit /= penalty;
  2255. }
  2256. }
  2257. candidates->sorted = false;
  2258. if (ctx) {
  2259. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2260. }
  2261. }
  2262. void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) {
  2263. if (last_tokens_size == 0 || (alpha_frequency == 0.0f && alpha_presence == 0.0f)) {
  2264. return;
  2265. }
  2266. const int64_t t_start_sample_us = ggml_time_us();
  2267. // Create a frequency map to count occurrences of each token in last_tokens
  2268. std::unordered_map<llama_token, int> token_count;
  2269. for (size_t i = 0; i < last_tokens_size; ++i) {
  2270. token_count[last_tokens_p[i]]++;
  2271. }
  2272. // Apply frequency and presence penalties to the candidates
  2273. for (size_t i = 0; i < candidates->size; ++i) {
  2274. auto token_iter = token_count.find(candidates->data[i].id);
  2275. if (token_iter == token_count.end()) {
  2276. continue;
  2277. }
  2278. int count = token_iter->second;
  2279. candidates->data[i].logit -= float(count) * alpha_frequency + float(count > 0) * alpha_presence;
  2280. }
  2281. candidates->sorted = false;
  2282. if (ctx) {
  2283. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2284. }
  2285. }
  2286. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  2287. assert(ctx);
  2288. const int64_t t_start_sample_us = ggml_time_us();
  2289. bool allow_eos = false;
  2290. for (const auto & stack : grammar->stacks) {
  2291. if (stack.empty()) {
  2292. allow_eos = true;
  2293. break;
  2294. }
  2295. }
  2296. const llama_token eos = llama_token_eos();
  2297. std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
  2298. std::vector<llama_grammar_candidate> candidates_grammar;
  2299. for (size_t i = 0; i < candidates->size; ++i) {
  2300. const llama_token id = candidates->data[i].id;
  2301. const char * str = llama_token_to_str(ctx, id);
  2302. if (id == eos) {
  2303. if (!allow_eos) {
  2304. candidates->data[i].logit = -INFINITY;
  2305. }
  2306. } else if (*str == 0) {
  2307. candidates->data[i].logit = -INFINITY;
  2308. } else {
  2309. candidates_decoded.push_back(decode_utf8(str, grammar->partial_utf8));
  2310. candidates_grammar.push_back({
  2311. i, candidates_decoded.back().first.data(), candidates_decoded.back().second
  2312. });
  2313. }
  2314. }
  2315. const auto rejects =
  2316. llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  2317. for (auto & reject : rejects) {
  2318. candidates->data[reject.index].logit = -INFINITY;
  2319. }
  2320. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2321. }
  2322. static void llama_log_softmax(float * array, size_t size) {
  2323. float max_l = *std::max_element(array, array + size);
  2324. float sum = 0.f;
  2325. for (size_t i = 0; i < size; ++i) {
  2326. float p = expf(array[i] - max_l);
  2327. sum += p;
  2328. array[i] = p;
  2329. }
  2330. for (size_t i = 0; i < size; ++i) {
  2331. array[i] = logf(array[i] / sum);
  2332. }
  2333. }
  2334. void llama_sample_classifier_free_guidance(
  2335. struct llama_context * ctx,
  2336. llama_token_data_array * candidates,
  2337. struct llama_context * guidance_ctx,
  2338. float scale) {
  2339. int64_t t_start_sample_us = ggml_time_us();
  2340. assert(ctx);
  2341. auto n_vocab = llama_n_vocab(ctx);
  2342. assert(n_vocab == (int)candidates->size);
  2343. assert(!candidates->sorted);
  2344. std::vector<float> logits_base;
  2345. logits_base.reserve(candidates->size);
  2346. for (size_t i = 0; i < candidates->size; ++i) {
  2347. logits_base.push_back(candidates->data[i].logit);
  2348. }
  2349. llama_log_softmax(logits_base.data(), candidates->size);
  2350. float* logits_guidance = llama_get_logits(guidance_ctx);
  2351. llama_log_softmax(logits_guidance, n_vocab);
  2352. for (int i = 0; i < n_vocab; ++i) {
  2353. float logit_guidance = logits_guidance[i];
  2354. float logit_base = logits_base[i];
  2355. candidates->data[i].logit = scale * (logit_base - logit_guidance) + logit_guidance;
  2356. }
  2357. if (ctx) {
  2358. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2359. }
  2360. }
  2361. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) {
  2362. assert(ctx);
  2363. auto N = float(llama_n_vocab(ctx));
  2364. int64_t t_start_sample_us;
  2365. t_start_sample_us = ggml_time_us();
  2366. llama_sample_softmax(nullptr, candidates);
  2367. // Estimate s_hat using the most probable m tokens
  2368. float s_hat = 0.0;
  2369. float sum_ti_bi = 0.0;
  2370. float sum_ti_sq = 0.0;
  2371. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  2372. float t_i = logf(float(i + 2) / float(i + 1));
  2373. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  2374. sum_ti_bi += t_i * b_i;
  2375. sum_ti_sq += t_i * t_i;
  2376. }
  2377. s_hat = sum_ti_bi / sum_ti_sq;
  2378. // Compute k from the estimated s_hat and target surprise value
  2379. float epsilon_hat = s_hat - 1;
  2380. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  2381. // Sample the next word X using top-k sampling
  2382. llama_sample_top_k(nullptr, candidates, int(k), 1);
  2383. if (ctx) {
  2384. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2385. }
  2386. llama_token X = llama_sample_token(ctx, candidates);
  2387. t_start_sample_us = ggml_time_us();
  2388. // Compute error as the difference between observed surprise and target surprise value
  2389. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  2390. return candidate.id == X;
  2391. }));
  2392. float observed_surprise = -log2f(candidates->data[X_idx].p);
  2393. float e = observed_surprise - tau;
  2394. // Update mu using the learning rate and error
  2395. *mu = *mu - eta * e;
  2396. if (ctx) {
  2397. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2398. }
  2399. return X;
  2400. }
  2401. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  2402. int64_t t_start_sample_us;
  2403. t_start_sample_us = ggml_time_us();
  2404. llama_sample_softmax(ctx, candidates);
  2405. // Truncate the words with surprise values greater than mu
  2406. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  2407. return -log2f(candidate.p) > *mu;
  2408. }));
  2409. if (candidates->size == 0) {
  2410. candidates->size = 1;
  2411. }
  2412. if (ctx) {
  2413. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2414. }
  2415. // Normalize the probabilities of the remaining words
  2416. llama_sample_softmax(ctx, candidates);
  2417. // Sample the next word X from the remaining words
  2418. llama_token X = llama_sample_token(ctx, candidates);
  2419. t_start_sample_us = ggml_time_us();
  2420. // Compute error as the difference between observed surprise and target surprise value
  2421. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  2422. return candidate.id == X;
  2423. }));
  2424. float observed_surprise = -log2f(candidates->data[X_idx].p);
  2425. float e = observed_surprise - tau;
  2426. // Update mu using the learning rate and error
  2427. *mu = *mu - eta * e;
  2428. if (ctx) {
  2429. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2430. }
  2431. return X;
  2432. }
  2433. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  2434. const int64_t t_start_sample_us = ggml_time_us();
  2435. // Find max element
  2436. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  2437. return a.logit < b.logit;
  2438. });
  2439. llama_token result = max_iter->id;
  2440. if (ctx) {
  2441. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2442. ctx->n_sample++;
  2443. }
  2444. return result;
  2445. }
  2446. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  2447. assert(ctx);
  2448. const int64_t t_start_sample_us = ggml_time_us();
  2449. llama_sample_softmax(nullptr, candidates);
  2450. std::vector<float> probs;
  2451. probs.reserve(candidates->size);
  2452. for (size_t i = 0; i < candidates->size; ++i) {
  2453. probs.push_back(candidates->data[i].p);
  2454. }
  2455. std::discrete_distribution<> dist(probs.begin(), probs.end());
  2456. auto & rng = ctx->rng;
  2457. int idx = dist(rng);
  2458. llama_token result = candidates->data[idx].id;
  2459. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2460. ctx->n_sample++;
  2461. return result;
  2462. }
  2463. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  2464. const int64_t t_start_sample_us = ggml_time_us();
  2465. if (token == llama_token_eos()) {
  2466. for (const auto & stack : grammar->stacks) {
  2467. if (stack.empty()) {
  2468. return;
  2469. }
  2470. }
  2471. LLAMA_ASSERT(false);
  2472. }
  2473. const char * str = llama_token_to_str(ctx, token);
  2474. // Note terminating 0 in decoded string
  2475. const auto decoded = decode_utf8(str, grammar->partial_utf8);
  2476. const auto & code_points = decoded.first;
  2477. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  2478. grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
  2479. }
  2480. grammar->partial_utf8 = decoded.second;
  2481. LLAMA_ASSERT(!grammar->stacks.empty());
  2482. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2483. }
  2484. //
  2485. // quantization
  2486. //
  2487. static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llama_buffer & output, const int nelements, const int nthread) {
  2488. if (output.size < nelements * sizeof(float)) {
  2489. output.resize(nelements * sizeof(float));
  2490. }
  2491. float * f32_output = (float *) output.addr;
  2492. ggml_type_traits_t qtype;
  2493. if (ggml_is_quantized(tensor.type)) {
  2494. qtype = ggml_internal_get_type_traits(tensor.type);
  2495. if (qtype.to_float == NULL) {
  2496. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type)));
  2497. }
  2498. } else if (tensor.type != GGML_TYPE_F16) {
  2499. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor.type)));
  2500. }
  2501. if (nthread < 2) {
  2502. if (tensor.type == GGML_TYPE_F16) {
  2503. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements);
  2504. } else if (ggml_is_quantized(tensor.type)) {
  2505. qtype.to_float(tensor.data, f32_output, nelements);
  2506. } else {
  2507. LLAMA_ASSERT(false); // unreachable
  2508. }
  2509. return;
  2510. }
  2511. auto block_size = tensor.type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor.type);
  2512. auto block_size_bytes = ggml_type_size(tensor.type);
  2513. LLAMA_ASSERT(nelements % block_size == 0);
  2514. auto nblocks = nelements / block_size;
  2515. auto blocks_per_thread = nblocks / nthread;
  2516. auto spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  2517. std::vector<std::thread> workers;
  2518. for (auto tnum = 0, in_buff_offs = 0, out_buff_offs = 0; tnum < nthread; tnum++) {
  2519. auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  2520. auto thr_elems = thr_blocks * block_size; // number of elements for this thread
  2521. auto thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  2522. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  2523. if (typ == GGML_TYPE_F16) {
  2524. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  2525. } else {
  2526. qtype.to_float(inbuf, outbuf, nels);
  2527. }
  2528. };
  2529. workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems));
  2530. in_buff_offs += thr_block_bytes;
  2531. out_buff_offs += thr_elems;
  2532. }
  2533. for (auto & worker : workers) {
  2534. worker.join();
  2535. }
  2536. }
  2537. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  2538. ggml_type quantized_type;
  2539. llama_ftype ftype = params->ftype;
  2540. int nthread = params->nthread;
  2541. switch (params->ftype) {
  2542. case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
  2543. case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
  2544. case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
  2545. case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
  2546. case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
  2547. case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
  2548. case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
  2549. #ifdef GGML_USE_K_QUANTS
  2550. // K-quants
  2551. case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
  2552. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  2553. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  2554. case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
  2555. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  2556. case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
  2557. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  2558. case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
  2559. case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
  2560. #endif
  2561. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  2562. }
  2563. if (nthread <= 0) {
  2564. nthread = std::thread::hardware_concurrency();
  2565. }
  2566. std::unique_ptr<llama_model_loader> model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false));
  2567. llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loader.get(), params->ftype);
  2568. #ifdef GGML_USE_K_QUANTS
  2569. int n_attention_wv = 0;
  2570. int n_feed_forward_w2 = 0;
  2571. for (auto& tensor : model_loader->tensors_map.tensors) {
  2572. if (tensor.name.find("attention.wv.weight") != std::string::npos) {
  2573. ++n_attention_wv;
  2574. }
  2575. else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
  2576. ++n_feed_forward_w2;
  2577. }
  2578. }
  2579. int i_attention_wv = 0;
  2580. int i_feed_forward_w2 = 0;
  2581. #endif
  2582. size_t total_size_org = 0;
  2583. size_t total_size_new = 0;
  2584. std::vector<int64_t> hist_all(1 << 4, 0);
  2585. std::vector<std::thread> workers;
  2586. std::mutex mutex;
  2587. auto use_more_bits = [] (int i_layer, int num_layers) -> bool {
  2588. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  2589. };
  2590. size_t idx = 0;
  2591. for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) {
  2592. llama_buffer read_data;
  2593. read_data.resize(tensor.size);
  2594. tensor.data = read_data.addr;
  2595. model_loader->load_data_for(tensor);
  2596. LLAMA_LOG_INFO("[%4zu/%4zu] %36s - %16s, type = %6s, ",
  2597. ++idx, model_loader->tensors_map.tensors.size(),
  2598. tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
  2599. ggml_type_name(tensor.type));
  2600. // This used to be a regex, but <regex> has an extreme cost to compile times.
  2601. bool quantize = tensor.name.rfind("weight") == tensor.name.size() - 6; // ends with 'weight'?
  2602. // quantize only 2D tensors
  2603. quantize &= (tensor.ne.size() == 2);
  2604. quantize &= params->quantize_output_tensor || tensor.name != "output.weight";
  2605. quantize &= quantized_type != tensor.type;
  2606. enum ggml_type new_type;
  2607. void * new_data;
  2608. size_t new_size;
  2609. llama_buffer work;
  2610. if (!quantize) {
  2611. new_type = tensor.type;
  2612. new_data = tensor.data;
  2613. new_size = tensor.size;
  2614. LLAMA_LOG_INFO("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
  2615. } else {
  2616. new_type = quantized_type;
  2617. #ifdef GGML_USE_K_QUANTS
  2618. if (tensor.name == "output.weight") {
  2619. int nx = tensor.ne.at(0);
  2620. int ny = tensor.ne.at(1);
  2621. if (nx % QK_K == 0 && ny % QK_K == 0) {
  2622. new_type = GGML_TYPE_Q6_K;
  2623. }
  2624. } else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
  2625. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2626. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2627. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  2628. use_more_bits(i_attention_wv, n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  2629. else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
  2630. (i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
  2631. ++i_attention_wv;
  2632. } else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
  2633. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2634. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2635. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  2636. use_more_bits(i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
  2637. //else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && i_feed_forward_w2 < n_feed_forward_w2/8) new_type = GGML_TYPE_Q6_K;
  2638. ++i_feed_forward_w2;
  2639. } else if (tensor.name.find("attention.wo.weight") != std::string::npos) {
  2640. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2641. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2642. }
  2643. bool convert_incompatible_tensor = false;
  2644. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  2645. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) {
  2646. int nx = tensor.ne.at(0);
  2647. int ny = tensor.ne.at(1);
  2648. if (nx % QK_K != 0 || ny % QK_K != 0) {
  2649. LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
  2650. convert_incompatible_tensor = true;
  2651. }
  2652. }
  2653. if (convert_incompatible_tensor) {
  2654. if (tensor.name == "output.weight") {
  2655. new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
  2656. LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n");
  2657. } else if (tensor.name == "tok_embeddings.weight") {
  2658. new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
  2659. LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n");
  2660. } else {
  2661. throw std::runtime_error("Unsupported tensor size encountered\n");
  2662. }
  2663. }
  2664. #endif
  2665. float * f32_data;
  2666. size_t nelements = tensor.ne.at(0) * tensor.ne.at(1);
  2667. llama_buffer f32_conv_buf;
  2668. if (tensor.type == GGML_TYPE_F32) {
  2669. f32_data = (float *) tensor.data;
  2670. } else if (ggml_is_quantized(tensor.type) && !params->allow_requantize) {
  2671. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor.type)));
  2672. } else {
  2673. llama_convert_tensor_internal(tensor, f32_conv_buf, nelements, nthread);
  2674. f32_data = (float *) f32_conv_buf.addr;
  2675. }
  2676. LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
  2677. fflush(stdout);
  2678. work.resize(nelements * 4); // upper bound on size
  2679. new_data = work.addr;
  2680. std::vector<int64_t> hist_cur(1 << 4, 0);
  2681. int chunk_size = 32 * 512;
  2682. const int nchunk = (nelements + chunk_size - 1)/chunk_size;
  2683. const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
  2684. if (nthread_use < 2) {
  2685. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data());
  2686. } else {
  2687. size_t counter = 0;
  2688. new_size = 0;
  2689. auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () {
  2690. std::vector<int64_t> local_hist;
  2691. size_t local_size = 0;
  2692. while (true) {
  2693. std::unique_lock<std::mutex> lock(mutex);
  2694. size_t first = counter; counter += chunk_size;
  2695. if (first >= nelements) {
  2696. if (!local_hist.empty()) {
  2697. for (int j=0; j<int(local_hist.size()); ++j) {
  2698. hist_cur[j] += local_hist[j];
  2699. }
  2700. new_size += local_size;
  2701. }
  2702. break;
  2703. }
  2704. lock.unlock();
  2705. size_t last = std::min(nelements, first + chunk_size);
  2706. if (local_hist.empty()) {
  2707. local_hist.resize(hist_cur.size(), 0);
  2708. }
  2709. local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
  2710. }
  2711. };
  2712. if ((int) workers.size() < nthread_use - 1) {
  2713. workers.resize(nthread_use - 1);
  2714. }
  2715. for (int it = 0; it < nthread_use - 1; ++it) {
  2716. workers[it] = std::thread(compute);
  2717. }
  2718. compute();
  2719. for (int it = 0; it < nthread_use - 1; ++it) {
  2720. workers[it].join();
  2721. }
  2722. }
  2723. LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
  2724. int64_t tot_count = 0;
  2725. for (size_t i = 0; i < hist_cur.size(); i++) {
  2726. hist_all[i] += hist_cur[i];
  2727. tot_count += hist_cur[i];
  2728. }
  2729. if (tot_count > 0) {
  2730. for (size_t i = 0; i < hist_cur.size(); i++) {
  2731. LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
  2732. }
  2733. }
  2734. LLAMA_LOG_INFO("\n");
  2735. }
  2736. total_size_org += tensor.size;
  2737. total_size_new += new_size;
  2738. file_saver.write_tensor(tensor, new_type, new_data, new_size);
  2739. }
  2740. LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  2741. LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  2742. {
  2743. int64_t sum_all = 0;
  2744. for (size_t i = 0; i < hist_all.size(); i++) {
  2745. sum_all += hist_all[i];
  2746. }
  2747. if (sum_all > 0) {
  2748. LLAMA_LOG_INFO("%s: hist: ", __func__);
  2749. for (size_t i = 0; i < hist_all.size(); i++) {
  2750. LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
  2751. }
  2752. LLAMA_LOG_INFO("\n");
  2753. }
  2754. }
  2755. }
  2756. //
  2757. // interface implementation
  2758. //
  2759. struct llama_model * llama_load_model_from_file(
  2760. const char * path_model,
  2761. struct llama_context_params params) {
  2762. ggml_time_init();
  2763. llama_model * model = new llama_model;
  2764. ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
  2765. if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.rms_norm_eps, params.n_gpu_layers,
  2766. params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
  2767. memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
  2768. params.progress_callback_user_data)) {
  2769. LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  2770. delete model;
  2771. return nullptr;
  2772. }
  2773. return model;
  2774. }
  2775. void llama_free_model(struct llama_model * model) {
  2776. delete model;
  2777. }
  2778. struct llama_context * llama_new_context_with_model(
  2779. struct llama_model * model,
  2780. struct llama_context_params params) {
  2781. if (!model) {
  2782. return nullptr;
  2783. }
  2784. llama_context * ctx = new llama_context(*model);
  2785. if (params.seed == LLAMA_DEFAULT_SEED) {
  2786. params.seed = time(NULL);
  2787. }
  2788. unsigned cur_percentage = 0;
  2789. if (params.progress_callback == NULL) {
  2790. params.progress_callback_user_data = &cur_percentage;
  2791. params.progress_callback = [](float progress, void * ctx) {
  2792. unsigned * cur_percentage_p = (unsigned *) ctx;
  2793. unsigned percentage = (unsigned) (100 * progress);
  2794. while (percentage > *cur_percentage_p) {
  2795. *cur_percentage_p = percentage;
  2796. LLAMA_LOG_INFO(".");
  2797. if (percentage >= 100) {
  2798. LLAMA_LOG_INFO("\n");
  2799. }
  2800. }
  2801. };
  2802. }
  2803. ctx->rng = std::mt19937(params.seed);
  2804. ctx->logits_all = params.logits_all;
  2805. ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
  2806. // reserve memory for context buffers
  2807. if (!params.vocab_only) {
  2808. if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
  2809. LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
  2810. llama_free(ctx);
  2811. return nullptr;
  2812. }
  2813. {
  2814. const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
  2815. LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
  2816. }
  2817. const auto & hparams = ctx->model.hparams;
  2818. // resized during inference
  2819. if (params.logits_all) {
  2820. ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
  2821. } else {
  2822. ctx->logits.reserve(hparams.n_vocab);
  2823. }
  2824. if (params.embedding){
  2825. ctx->embedding.resize(hparams.n_embd);
  2826. }
  2827. #ifdef LLAMA_USE_ALLOCATOR
  2828. {
  2829. static const size_t tensor_alignment = 32;
  2830. // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
  2831. ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead());
  2832. // create measure allocator
  2833. ctx->alloc = ggml_allocr_new_measure(tensor_alignment);
  2834. // build worst-case graph
  2835. int n_tokens = std::min((int)hparams.n_ctx, params.n_batch);
  2836. int n_past = hparams.n_ctx - n_tokens;
  2837. llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  2838. ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past);
  2839. #ifdef GGML_USE_METAL
  2840. if (params.n_gpu_layers > 0) {
  2841. ctx->ctx_metal = ggml_metal_init(1);
  2842. if (!ctx->ctx_metal) {
  2843. LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
  2844. llama_free(ctx);
  2845. return NULL;
  2846. }
  2847. ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false);
  2848. ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
  2849. }
  2850. #endif
  2851. // measure memory requirements for the graph
  2852. size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
  2853. LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
  2854. // debug - for comparison with scratch buffer
  2855. //size_t prev_req =
  2856. // MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) +
  2857. // MEM_REQ_SCRATCH1().at(ctx->model.type) +
  2858. // MEM_REQ_EVAL().at(ctx->model.type);
  2859. //LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0);
  2860. // recreate allocator with exact memory requirements
  2861. ggml_allocr_free(ctx->alloc);
  2862. ctx->buf_alloc.resize(alloc_size);
  2863. ctx->alloc = ggml_allocr_new(ctx->buf_alloc.addr, ctx->buf_alloc.size, tensor_alignment);
  2864. #ifdef GGML_USE_METAL
  2865. if (ctx->ctx_metal) {
  2866. ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
  2867. }
  2868. #endif
  2869. }
  2870. #else
  2871. ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead());
  2872. #endif
  2873. #ifdef LLAMA_USE_SCRATCH
  2874. ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type));
  2875. ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type));
  2876. #endif
  2877. }
  2878. #ifdef GGML_USE_METAL
  2879. if (params.n_gpu_layers > 0) {
  2880. // this allocates all Metal resources and memory buffers
  2881. void * data_ptr = NULL;
  2882. size_t data_size = 0;
  2883. if (params.use_mmap) {
  2884. data_ptr = ctx->model.mapping->addr;
  2885. data_size = ctx->model.mapping->size;
  2886. } else {
  2887. data_ptr = ggml_get_mem_buffer(ctx->model.ctx);
  2888. data_size = ggml_get_mem_size (ctx->model.ctx);
  2889. }
  2890. const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
  2891. LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
  2892. #define LLAMA_METAL_CHECK_BUF(result) \
  2893. if (!(result)) { \
  2894. LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \
  2895. llama_free(ctx); \
  2896. return NULL; \
  2897. }
  2898. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
  2899. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size, 0));
  2900. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.addr, ctx->kv_self.buf.size, 0));
  2901. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.addr, ctx->buf_alloc.size, 0));
  2902. #undef LLAMA_METAL_CHECK_BUF
  2903. }
  2904. #endif
  2905. #ifdef GGML_USE_MPI
  2906. ctx->ctx_mpi = ggml_mpi_init();
  2907. if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
  2908. // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
  2909. const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos());
  2910. while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
  2911. llama_backend_free();
  2912. exit(1);
  2913. }
  2914. #endif
  2915. return ctx;
  2916. }
  2917. struct llama_context * llama_init_from_file(
  2918. const char * path_model,
  2919. struct llama_context_params params) {
  2920. struct llama_model * model = llama_load_model_from_file(path_model, params);
  2921. if (!model) {
  2922. return nullptr;
  2923. }
  2924. struct llama_context * ctx = llama_new_context_with_model(model, params);
  2925. ctx->model_owner = true;
  2926. return ctx;
  2927. }
  2928. void llama_free(struct llama_context * ctx) {
  2929. delete ctx;
  2930. }
  2931. int llama_model_quantize(
  2932. const char * fname_inp,
  2933. const char * fname_out,
  2934. const llama_model_quantize_params *params) {
  2935. try {
  2936. llama_model_quantize_internal(fname_inp, fname_out, params);
  2937. return 0;
  2938. } catch (const std::exception & err) {
  2939. LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
  2940. return 1;
  2941. }
  2942. }
  2943. int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
  2944. LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  2945. const int64_t t_start_lora_us = ggml_time_us();
  2946. auto fin = std::ifstream(path_lora, std::ios::binary);
  2947. if (!fin) {
  2948. LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora);
  2949. return 1;
  2950. }
  2951. // verify magic and version
  2952. {
  2953. uint32_t magic;
  2954. fin.read((char *) &magic, sizeof(magic));
  2955. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  2956. LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
  2957. return 1;
  2958. }
  2959. uint32_t format_version;
  2960. fin.read((char *) &format_version, sizeof(format_version));
  2961. if (format_version != 1) {
  2962. LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
  2963. return 1;
  2964. }
  2965. }
  2966. int32_t lora_r;
  2967. int32_t lora_alpha;
  2968. fin.read((char *) &lora_r, sizeof(lora_r));
  2969. fin.read((char *) &lora_alpha, sizeof(lora_alpha));
  2970. float scaling = (float)lora_alpha / (float)lora_r;
  2971. LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  2972. // create a temporary ggml context to store the lora tensors
  2973. // todo: calculate size from biggest possible tensor
  2974. std::vector<uint8_t> lora_buf(1024ull * 1024ull * 1024ull);
  2975. struct ggml_init_params params;
  2976. params.mem_size = lora_buf.size();
  2977. params.mem_buffer = lora_buf.data();
  2978. params.no_alloc = false;
  2979. ggml_context * lora_ctx = ggml_init(params);
  2980. std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
  2981. // create a name -> tensor map of the model to accelerate lookups
  2982. std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
  2983. for (const auto & kv: model.tensors_by_name) {
  2984. model_tensors.insert(kv);
  2985. }
  2986. // load base model
  2987. std::unique_ptr<llama_model_loader> model_loader;
  2988. ggml_context * base_ctx = NULL;
  2989. llama_buffer base_buf;
  2990. if (path_base_model) {
  2991. LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
  2992. model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
  2993. size_t ctx_size;
  2994. size_t mmapped_size;
  2995. model_loader->calc_sizes(&ctx_size, &mmapped_size);
  2996. base_buf.resize(ctx_size);
  2997. ggml_init_params base_params;
  2998. base_params.mem_size = base_buf.size;
  2999. base_params.mem_buffer = base_buf.addr;
  3000. base_params.no_alloc = model_loader->use_mmap;
  3001. base_ctx = ggml_init(base_params);
  3002. model_loader->ggml_ctx = base_ctx;
  3003. // maybe this should in llama_model_loader
  3004. if (model_loader->use_mmap) {
  3005. model_loader->mapping.reset(new llama_mmap(&model_loader->file_loader->file, /* prefetch */ 0, ggml_is_numa()));
  3006. }
  3007. }
  3008. // read tensors and apply
  3009. bool warned = false;
  3010. int n_tensors = 0;
  3011. std::vector<uint8_t> work_buffer;
  3012. while (true) {
  3013. int32_t n_dims;
  3014. int32_t length;
  3015. int32_t ftype;
  3016. fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
  3017. fin.read(reinterpret_cast<char *>(&length), sizeof(length));
  3018. fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
  3019. if (fin.eof()) {
  3020. break;
  3021. }
  3022. int32_t ne[2] = { 1, 1 };
  3023. for (int i = 0; i < n_dims; ++i) {
  3024. fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
  3025. }
  3026. std::string name;
  3027. {
  3028. char buf[1024];
  3029. fin.read(buf, length);
  3030. name = std::string(buf, length);
  3031. }
  3032. // check for lora suffix and get the type of tensor
  3033. const std::string lora_suffix = ".lora";
  3034. size_t pos = name.rfind(lora_suffix);
  3035. if (pos == std::string::npos) {
  3036. LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  3037. return 1;
  3038. }
  3039. std::string lora_type = name.substr(pos + lora_suffix.length());
  3040. std::string base_name = name;
  3041. base_name.erase(pos);
  3042. // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
  3043. if (model_tensors.find(base_name) == model_tensors.end()) {
  3044. LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
  3045. return 1;
  3046. }
  3047. // create ggml tensor
  3048. ggml_type wtype;
  3049. switch (ftype) {
  3050. case 0: wtype = GGML_TYPE_F32; break;
  3051. case 1: wtype = GGML_TYPE_F16; break;
  3052. default:
  3053. {
  3054. LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
  3055. __func__, ftype);
  3056. return false;
  3057. }
  3058. }
  3059. ggml_tensor * lora_tensor;
  3060. if (n_dims == 2) {
  3061. lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
  3062. }
  3063. else {
  3064. LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
  3065. return 1;
  3066. }
  3067. ggml_set_name(lora_tensor, "lora_tensor");
  3068. // load tensor data
  3069. size_t offset = fin.tellg();
  3070. size_t tensor_data_size = ggml_nbytes(lora_tensor);
  3071. offset = (offset + 31) & -32;
  3072. fin.seekg(offset);
  3073. fin.read((char*)lora_tensor->data, tensor_data_size);
  3074. lora_tensors[name] = lora_tensor;
  3075. // check if we have both A and B tensors and apply
  3076. if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
  3077. lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
  3078. ggml_tensor * dest_t = model_tensors[base_name];
  3079. offload_func_t offload_func = llama_nop;
  3080. offload_func_t offload_func_force_inplace = llama_nop;
  3081. #ifdef GGML_USE_CUBLAS
  3082. if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
  3083. if (dest_t->type != GGML_TYPE_F16) {
  3084. throw std::runtime_error(format(
  3085. "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models", __func__));
  3086. }
  3087. offload_func = ggml_cuda_assign_buffers;
  3088. offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;
  3089. }
  3090. #endif // GGML_USE_CUBLAS
  3091. ggml_tensor * base_t;
  3092. if (model_loader) {
  3093. // load from base model
  3094. if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
  3095. LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  3096. return 1;
  3097. }
  3098. size_t idx = model_loader->tensors_map.name_to_idx[base_name];
  3099. llama_load_tensor & lt = model_loader->tensors_map.tensors[idx];
  3100. base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
  3101. lt.data = (uint8_t *) lt.ggml_tensor->data;
  3102. model_loader->load_data_for(lt);
  3103. lt.ggml_tensor->data = lt.data;
  3104. }
  3105. else {
  3106. base_t = dest_t;
  3107. }
  3108. if (ggml_is_quantized(base_t->type)) {
  3109. if (!warned) {
  3110. LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  3111. "use a f16 or f32 base model with --lora-base\n", __func__);
  3112. warned = true;
  3113. }
  3114. }
  3115. ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
  3116. GGML_ASSERT(loraA->type == GGML_TYPE_F32);
  3117. ggml_set_name(loraA, "loraA");
  3118. ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
  3119. GGML_ASSERT(loraB->type == GGML_TYPE_F32);
  3120. ggml_set_name(loraB, "loraB");
  3121. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  3122. LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  3123. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  3124. return 1;
  3125. }
  3126. // w = w + BA*s
  3127. ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
  3128. offload_func(BA);
  3129. ggml_set_name(BA, "BA");
  3130. if (scaling != 1.0f) {
  3131. ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling);
  3132. ggml_set_name(scale_tensor, "scale_tensor");
  3133. BA = ggml_scale_inplace(lora_ctx, BA, scale_tensor);
  3134. offload_func(BA);
  3135. ggml_set_name(BA, "BA_scaled");
  3136. }
  3137. ggml_tensor * r;
  3138. if (base_t == dest_t) {
  3139. r = ggml_add_inplace(lora_ctx, dest_t, BA);
  3140. offload_func_force_inplace(r);
  3141. ggml_set_name(r, "r_add_inplace");
  3142. }
  3143. else {
  3144. r = ggml_add(lora_ctx, base_t, BA);
  3145. offload_func(r);
  3146. ggml_set_name(r, "r_add");
  3147. r = ggml_cpy(lora_ctx, r, dest_t);
  3148. offload_func(r);
  3149. ggml_set_name(r, "r_cpy");
  3150. }
  3151. struct ggml_cgraph gf = ggml_build_forward(r);
  3152. ggml_graph_compute_helper(work_buffer, &gf, n_threads);
  3153. // we won't need these tensors again, reset the context to save memory
  3154. ggml_free(lora_ctx);
  3155. lora_ctx = ggml_init(params);
  3156. lora_tensors.clear();
  3157. n_tensors++;
  3158. if (n_tensors % 4 == 0) {
  3159. LLAMA_LOG_INFO(".");
  3160. }
  3161. }
  3162. }
  3163. // TODO: this should be in a destructor, it will leak on failure
  3164. ggml_free(lora_ctx);
  3165. if (base_ctx) {
  3166. ggml_free(base_ctx);
  3167. }
  3168. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  3169. LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
  3170. return 0;
  3171. }
  3172. int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) {
  3173. try {
  3174. return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
  3175. } catch (const std::exception & err) {
  3176. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  3177. return 1;
  3178. }
  3179. }
  3180. int llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, const char * path_base_model, int n_threads) {
  3181. try {
  3182. return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
  3183. } catch (const std::exception & err) {
  3184. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  3185. return 1;
  3186. }
  3187. }
  3188. int llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  3189. return ctx->kv_self.n;
  3190. }
  3191. #define LLAMA_MAX_RNG_STATE (64*1024)
  3192. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  3193. if (seed == LLAMA_DEFAULT_SEED) {
  3194. seed = time(NULL);
  3195. }
  3196. ctx->rng.seed(seed);
  3197. }
  3198. // Returns the *maximum* size of the state
  3199. size_t llama_get_state_size(const struct llama_context * ctx) {
  3200. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  3201. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  3202. const size_t s_rng_size = sizeof(size_t);
  3203. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  3204. const size_t s_logits_capacity = sizeof(size_t);
  3205. const size_t s_logits_size = sizeof(size_t);
  3206. const size_t s_logits = ctx->logits.capacity() * sizeof(float);
  3207. const size_t s_embedding_size = sizeof(size_t);
  3208. const size_t s_embedding = ctx->embedding.size() * sizeof(float);
  3209. const size_t s_kv_size = sizeof(size_t);
  3210. const size_t s_kv_ntok = sizeof(int);
  3211. const size_t s_kv = ctx->kv_self.buf.size;
  3212. const size_t s_total = (
  3213. + s_rng_size
  3214. + s_rng
  3215. + s_logits_capacity
  3216. + s_logits_size
  3217. + s_logits
  3218. + s_embedding_size
  3219. + s_embedding
  3220. + s_kv_size
  3221. + s_kv_ntok
  3222. + s_kv
  3223. );
  3224. return s_total;
  3225. }
  3226. /** copy state data into either a buffer or file depending on the passed in context
  3227. *
  3228. * file context:
  3229. * llama_file file("/path", "wb");
  3230. * llama_data_file_context data_ctx(&file);
  3231. * llama_copy_state_data(ctx, &data_ctx);
  3232. *
  3233. * buffer context:
  3234. * std::vector<uint8_t> buf(max_size, 0);
  3235. * llama_data_buffer_context data_ctx(&buf.data());
  3236. * llama_copy_state_data(ctx, &data_ctx);
  3237. *
  3238. */
  3239. void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
  3240. // copy rng
  3241. {
  3242. std::stringstream rng_ss;
  3243. rng_ss << ctx->rng;
  3244. const size_t rng_size = rng_ss.str().size();
  3245. char rng_buf[LLAMA_MAX_RNG_STATE];
  3246. memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
  3247. memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
  3248. data_ctx->write(&rng_size, sizeof(rng_size));
  3249. data_ctx->write(&rng_buf[0], LLAMA_MAX_RNG_STATE);
  3250. }
  3251. // copy logits
  3252. {
  3253. const size_t logits_cap = ctx->logits.capacity();
  3254. const size_t logits_size = ctx->logits.size();
  3255. data_ctx->write(&logits_cap, sizeof(logits_cap));
  3256. data_ctx->write(&logits_size, sizeof(logits_size));
  3257. if (logits_size) {
  3258. data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
  3259. }
  3260. // If there is a gap between the size and the capacity, write padding
  3261. size_t padding_size = (logits_cap - logits_size) * sizeof(float);
  3262. if (padding_size > 0) {
  3263. std::vector<uint8_t> padding(padding_size, 0); // Create a buffer filled with zeros
  3264. data_ctx->write(padding.data(), padding_size);
  3265. }
  3266. }
  3267. // copy embeddings
  3268. {
  3269. const size_t embedding_size = ctx->embedding.size();
  3270. data_ctx->write(&embedding_size, sizeof(embedding_size));
  3271. if (embedding_size) {
  3272. data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
  3273. }
  3274. }
  3275. // copy kv cache
  3276. {
  3277. const auto & kv_self = ctx->kv_self;
  3278. const auto & hparams = ctx->model.hparams;
  3279. const int n_layer = hparams.n_layer;
  3280. const int n_embd = hparams.n_embd_gqa();
  3281. const int n_ctx = hparams.n_ctx;
  3282. const size_t kv_size = kv_self.buf.size;
  3283. const int kv_ntok = llama_get_kv_cache_token_count(ctx);
  3284. data_ctx->write(&kv_size, sizeof(kv_size));
  3285. data_ctx->write(&kv_ntok, sizeof(kv_ntok));
  3286. if (kv_size) {
  3287. const size_t elt_size = ggml_element_size(kv_self.k);
  3288. ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
  3289. ggml_cgraph gf{};
  3290. ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
  3291. std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
  3292. kout3d->data = kout3d_data.data();
  3293. ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
  3294. std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
  3295. vout3d->data = vout3d_data.data();
  3296. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  3297. n_embd, kv_ntok, n_layer,
  3298. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  3299. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  3300. kv_ntok, n_embd, n_layer,
  3301. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  3302. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
  3303. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d));
  3304. ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
  3305. ggml_free(cpy_ctx);
  3306. // our data is now in the kout3d_data and vout3d_data buffers
  3307. // write them to file
  3308. data_ctx->write(kout3d_data.data(), kout3d_data.size());
  3309. data_ctx->write(vout3d_data.data(), vout3d_data.size());
  3310. }
  3311. }
  3312. }
  3313. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  3314. llama_data_buffer_context data_ctx(dst);
  3315. llama_copy_state_data_internal(ctx, &data_ctx);
  3316. return data_ctx.get_size_written();
  3317. }
  3318. // Sets the state reading from the specified source address
  3319. size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
  3320. uint8_t * inp = src;
  3321. // set rng
  3322. {
  3323. size_t rng_size;
  3324. char rng_buf[LLAMA_MAX_RNG_STATE];
  3325. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  3326. memcpy(&rng_buf[0], inp, LLAMA_MAX_RNG_STATE); inp += LLAMA_MAX_RNG_STATE;
  3327. std::stringstream rng_ss;
  3328. rng_ss.str(std::string(&rng_buf[0], rng_size));
  3329. rng_ss >> ctx->rng;
  3330. LLAMA_ASSERT(rng_ss.fail() == false);
  3331. }
  3332. // set logits
  3333. {
  3334. size_t logits_cap;
  3335. size_t logits_size;
  3336. memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap);
  3337. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  3338. LLAMA_ASSERT(ctx->logits.capacity() == logits_cap);
  3339. if (logits_size) {
  3340. ctx->logits.resize(logits_size);
  3341. memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
  3342. }
  3343. inp += logits_cap * sizeof(float);
  3344. }
  3345. // set embeddings
  3346. {
  3347. size_t embedding_size;
  3348. memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
  3349. LLAMA_ASSERT(ctx->embedding.capacity() == embedding_size);
  3350. if (embedding_size) {
  3351. memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
  3352. inp += embedding_size * sizeof(float);
  3353. }
  3354. }
  3355. // set kv cache
  3356. {
  3357. const auto & kv_self = ctx->kv_self;
  3358. const auto & hparams = ctx->model.hparams;
  3359. const int n_layer = hparams.n_layer;
  3360. const int n_embd = hparams.n_embd_gqa();
  3361. const int n_ctx = hparams.n_ctx;
  3362. size_t kv_size;
  3363. int kv_ntok;
  3364. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  3365. memcpy(&kv_ntok, inp, sizeof(kv_ntok)); inp += sizeof(kv_ntok);
  3366. if (kv_size) {
  3367. LLAMA_ASSERT(kv_self.buf.size == kv_size);
  3368. const size_t elt_size = ggml_element_size(kv_self.k);
  3369. ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
  3370. ggml_cgraph gf{};
  3371. ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
  3372. kin3d->data = (void *) inp;
  3373. inp += ggml_nbytes(kin3d);
  3374. ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
  3375. vin3d->data = (void *) inp;
  3376. inp += ggml_nbytes(vin3d);
  3377. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  3378. n_embd, kv_ntok, n_layer,
  3379. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  3380. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  3381. kv_ntok, n_embd, n_layer,
  3382. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  3383. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
  3384. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d));
  3385. ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
  3386. ggml_free(cpy_ctx);
  3387. }
  3388. ctx->kv_self.n = kv_ntok;
  3389. }
  3390. const size_t nread = inp - src;
  3391. const size_t max_size = llama_get_state_size(ctx);
  3392. LLAMA_ASSERT(nread <= max_size);
  3393. return nread;
  3394. }
  3395. static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  3396. llama_file file(path_session, "rb");
  3397. // sanity checks
  3398. {
  3399. const uint32_t magic = file.read_u32();
  3400. const uint32_t version = file.read_u32();
  3401. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  3402. LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  3403. return false;
  3404. }
  3405. llama_hparams session_hparams;
  3406. file.read_raw(&session_hparams, sizeof(llama_hparams));
  3407. if (session_hparams != ctx->model.hparams) {
  3408. LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
  3409. return false;
  3410. }
  3411. }
  3412. // load the prompt
  3413. {
  3414. const uint32_t n_token_count = file.read_u32();
  3415. if (n_token_count > n_token_capacity) {
  3416. LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  3417. return false;
  3418. }
  3419. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  3420. *n_token_count_out = n_token_count;
  3421. }
  3422. // restore the context state
  3423. {
  3424. const size_t n_state_size_cur = file.size - file.tell();
  3425. const size_t n_state_size_max = llama_get_state_size(ctx);
  3426. if (n_state_size_cur > n_state_size_max) {
  3427. LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  3428. return false;
  3429. }
  3430. std::vector<uint8_t> state_data(n_state_size_max);
  3431. file.read_raw(state_data.data(), n_state_size_cur);
  3432. llama_set_state_data(ctx, state_data.data());
  3433. }
  3434. return true;
  3435. }
  3436. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  3437. try {
  3438. return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  3439. } catch (const std::exception & err) {
  3440. LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
  3441. return false;
  3442. }
  3443. }
  3444. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  3445. llama_file file(path_session, "wb");
  3446. file.write_u32(LLAMA_SESSION_MAGIC);
  3447. file.write_u32(LLAMA_SESSION_VERSION);
  3448. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  3449. // save the prompt
  3450. file.write_u32((uint32_t) n_token_count);
  3451. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  3452. // save the context state using stream saving
  3453. llama_data_file_context data_ctx(&file);
  3454. llama_copy_state_data_internal(ctx, &data_ctx);
  3455. return true;
  3456. }
  3457. int llama_eval(
  3458. struct llama_context * ctx,
  3459. const llama_token * tokens,
  3460. int n_tokens,
  3461. int n_past,
  3462. int n_threads) {
  3463. if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
  3464. LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
  3465. return 1;
  3466. }
  3467. // get a more accurate load time, upon first eval
  3468. // TODO: fix this
  3469. if (!ctx->has_evaluated_once) {
  3470. ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
  3471. ctx->has_evaluated_once = true;
  3472. }
  3473. return 0;
  3474. }
  3475. int llama_eval_embd(
  3476. struct llama_context * ctx,
  3477. const float * embd,
  3478. int n_tokens,
  3479. int n_past,
  3480. int n_threads) {
  3481. if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
  3482. LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
  3483. return 1;
  3484. }
  3485. // get a more accurate load time, upon first eval
  3486. // TODO: fix this
  3487. if (!ctx->has_evaluated_once) {
  3488. ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
  3489. ctx->has_evaluated_once = true;
  3490. }
  3491. return 0;
  3492. }
  3493. int llama_eval_export(struct llama_context * ctx, const char * fname) {
  3494. const int n_batch = 1;
  3495. const int n_ctx = 512 - n_batch;
  3496. const std::vector<llama_token> tmp(n_batch, llama_token_bos());
  3497. if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
  3498. LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
  3499. return 1;
  3500. }
  3501. return 0;
  3502. }
  3503. int llama_tokenize_with_model(
  3504. const struct llama_model * model,
  3505. const char * text,
  3506. llama_token * tokens,
  3507. int n_max_tokens,
  3508. bool add_bos) {
  3509. auto res = llama_tokenize(model->vocab, text, add_bos);
  3510. if (n_max_tokens < (int) res.size()) {
  3511. LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
  3512. return -((int) res.size());
  3513. }
  3514. for (size_t i = 0; i < res.size(); i++) {
  3515. tokens[i] = res[i];
  3516. }
  3517. return res.size();
  3518. }
  3519. int llama_tokenize(
  3520. struct llama_context * ctx,
  3521. const char * text,
  3522. llama_token * tokens,
  3523. int n_max_tokens,
  3524. bool add_bos) {
  3525. return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos);
  3526. }
  3527. int llama_n_vocab_from_model(const struct llama_model * model) {
  3528. return model->vocab.id_to_token.size();
  3529. }
  3530. int llama_n_ctx_from_model(const struct llama_model * model) {
  3531. return model->hparams.n_ctx;
  3532. }
  3533. int llama_n_embd_from_model(const struct llama_model * model) {
  3534. return model->hparams.n_embd;
  3535. }
  3536. int llama_n_vocab(const struct llama_context * ctx) {
  3537. return ctx->model.vocab.id_to_token.size();
  3538. }
  3539. int llama_n_ctx(const struct llama_context * ctx) {
  3540. return ctx->model.hparams.n_ctx;
  3541. }
  3542. int llama_n_embd(const struct llama_context * ctx) {
  3543. return ctx->model.hparams.n_embd;
  3544. }
  3545. int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size) {
  3546. return snprintf(buf, buf_size, "LLaMA %s %s", llama_model_type_name(model->type), llama_ftype_name(model->hparams.ftype));
  3547. }
  3548. int llama_get_vocab_from_model(
  3549. const struct llama_model * model,
  3550. const char * * strings,
  3551. float * scores,
  3552. int capacity) {
  3553. int n = std::min(capacity, (int) model->vocab.id_to_token.size());
  3554. for (int i = 0; i<n; ++i) {
  3555. strings[i] = model->vocab.id_to_token[i].tok.c_str();
  3556. scores[i] = model->vocab.id_to_token[i].score;
  3557. }
  3558. return n;
  3559. }
  3560. int llama_get_vocab(
  3561. const struct llama_context * ctx,
  3562. const char * * strings,
  3563. float * scores,
  3564. int capacity) {
  3565. return llama_get_vocab_from_model(&ctx->model, strings, scores, capacity);
  3566. }
  3567. float * llama_get_logits(struct llama_context * ctx) {
  3568. return ctx->logits.data();
  3569. }
  3570. float * llama_get_embeddings(struct llama_context * ctx) {
  3571. return ctx->embedding.data();
  3572. }
  3573. const char * llama_token_to_str_with_model(const struct llama_model * model, llama_token token) {
  3574. if (token >= llama_n_vocab_from_model(model)) {
  3575. return nullptr;
  3576. }
  3577. return model->vocab.id_to_token[token].tok.c_str();
  3578. }
  3579. const char * llama_token_to_str(const struct llama_context * ctx, llama_token token) {
  3580. return llama_token_to_str_with_model(&ctx->model, token);
  3581. }
  3582. llama_token llama_token_bos() {
  3583. return 1;
  3584. }
  3585. llama_token llama_token_eos() {
  3586. return 2;
  3587. }
  3588. llama_token llama_token_nl() {
  3589. return 13;
  3590. }
  3591. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  3592. struct llama_timings result = {
  3593. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  3594. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  3595. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  3596. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  3597. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  3598. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  3599. /*.n_sample =*/ std::max(1, ctx->n_sample),
  3600. /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
  3601. /*.n_eval =*/ std::max(1, ctx->n_eval),
  3602. };
  3603. return result;
  3604. }
  3605. void llama_print_timings(struct llama_context * ctx) {
  3606. const llama_timings timings = llama_get_timings(ctx);
  3607. LLAMA_LOG_INFO("\n");
  3608. LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
  3609. LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  3610. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  3611. LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  3612. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  3613. LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  3614. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  3615. LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
  3616. }
  3617. void llama_reset_timings(struct llama_context * ctx) {
  3618. ctx->t_start_us = ggml_time_us();
  3619. ctx->t_sample_us = ctx->n_sample = 0;
  3620. ctx->t_eval_us = ctx->n_eval = 0;
  3621. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  3622. }
  3623. const char * llama_print_system_info(void) {
  3624. static std::string s;
  3625. s = "";
  3626. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  3627. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  3628. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  3629. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  3630. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  3631. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  3632. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  3633. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  3634. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  3635. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  3636. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  3637. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  3638. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  3639. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  3640. return s.c_str();
  3641. }
  3642. // For internal test use
  3643. const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
  3644. return ctx->model.tensors_by_name;
  3645. }
  3646. void llama_log_set(llama_log_callback log_callback, void * user_data) {
  3647. g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  3648. g_state.log_callback_user_data = user_data;
  3649. }
  3650. #if defined(_MSC_VER) && !defined(vsnprintf)
  3651. #define vsnprintf _vsnprintf
  3652. #endif
  3653. static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) {
  3654. va_list args_copy;
  3655. va_copy(args_copy, args);
  3656. char buffer[128];
  3657. int len = vsnprintf(buffer, 128, format, args);
  3658. if (len < 128) {
  3659. g_state.log_callback(level, buffer, g_state.log_callback_user_data);
  3660. } else {
  3661. char* buffer2 = new char[len+1];
  3662. vsnprintf(buffer2, len+1, format, args_copy);
  3663. buffer2[len] = 0;
  3664. g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
  3665. delete[] buffer2;
  3666. }
  3667. va_end(args_copy);
  3668. }
  3669. static void llama_log_internal(llama_log_level level, const char * format, ...) {
  3670. va_list args;
  3671. va_start(args, format);
  3672. llama_log_internal_v(level, format, args);
  3673. va_end(args);
  3674. }
  3675. static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) {
  3676. (void) level;
  3677. (void) user_data;
  3678. fputs(text, stderr);
  3679. fflush(stderr);
  3680. }