llama.cpp 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570
  1. #include "llama.h"
  2. #include "ggml.h"
  3. #include <cinttypes>
  4. #include <fstream>
  5. #include <random>
  6. #include <unordered_map>
  7. #include <queue>
  8. #include <regex>
  9. #include <cassert>
  10. #include <cstring>
  11. // determine number of model parts based on the dimension
  12. static const std::unordered_map<int, int> LLAMA_N_PARTS = {
  13. { 4096, 1 },
  14. { 5120, 2 },
  15. { 6656, 4 },
  16. { 8192, 8 },
  17. };
  18. // default hparams (LLaMA 7B)
  19. struct llama_hparams {
  20. int32_t n_vocab = 32000;
  21. int32_t n_ctx = 512; // this is provided as user input?
  22. int32_t n_embd = 4096;
  23. int32_t n_mult = 256;
  24. int32_t n_head = 32;
  25. int32_t n_layer = 32;
  26. int32_t n_rot = 64;
  27. int32_t f16 = 1;
  28. };
  29. struct llama_layer {
  30. // normalization
  31. struct ggml_tensor * attention_norm;
  32. // attention
  33. struct ggml_tensor * wq;
  34. struct ggml_tensor * wk;
  35. struct ggml_tensor * wv;
  36. struct ggml_tensor * wo;
  37. // normalization
  38. struct ggml_tensor * ffn_norm;
  39. // ff
  40. struct ggml_tensor * w1;
  41. struct ggml_tensor * w2;
  42. struct ggml_tensor * w3;
  43. };
  44. struct llama_model {
  45. llama_hparams hparams;
  46. struct ggml_tensor * tok_embeddings;
  47. struct ggml_tensor * norm;
  48. struct ggml_tensor * output;
  49. std::vector<llama_layer> layers;
  50. // key + value memory
  51. struct ggml_tensor * memory_k;
  52. struct ggml_tensor * memory_v;
  53. //
  54. struct ggml_context * ctx;
  55. std::unordered_map<std::string, struct ggml_tensor *> tensors;
  56. };
  57. struct llama_vocab {
  58. using id = int32_t;
  59. using token = std::string;
  60. struct token_score {
  61. token tok;
  62. float score;
  63. };
  64. std::unordered_map<token, id> token_to_id;
  65. std::vector<token_score> id_to_token;
  66. };
  67. struct llama_context {
  68. std::mt19937 rng;
  69. int64_t t_load_us = 0;
  70. int64_t t_start_us = 0;
  71. int64_t t_sample_us = 0;
  72. int64_t t_eval_us = 0;
  73. int32_t n_sample = 0; // number of tokens sampled
  74. int32_t n_eval = 0; // number of eval calls
  75. llama_model model;
  76. llama_vocab vocab;
  77. size_t mem_per_token = 0;
  78. // decode output (2-dimensional array: [n_tokens][n_vocab])
  79. std::vector<float> logits;
  80. bool logits_all = false;
  81. };
  82. struct llama_context_params llama_context_default_params() {
  83. struct llama_context_params result = {
  84. /*.n_ctx =*/ 512,
  85. /*.n_parts =*/ -1,
  86. /*.seed =*/ 0,
  87. /*.f16_kv =*/ false,
  88. /*.logits_all =*/ false,
  89. /*.vocab_only =*/ false,
  90. };
  91. return result;
  92. }
  93. //
  94. // model loading
  95. //
  96. static bool llama_model_load(
  97. const std::string & fname,
  98. llama_context & lctx,
  99. int n_ctx,
  100. int n_parts,
  101. ggml_type memory_type,
  102. bool vocab_only) {
  103. fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
  104. const int64_t t_start_us = ggml_time_us();
  105. lctx.t_start_us = t_start_us;
  106. std::vector<char> f_buf(1024*1024);
  107. auto & model = lctx.model;
  108. auto & vocab = lctx.vocab;
  109. auto fin = std::ifstream(fname, std::ios::binary);
  110. fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
  111. if (!fin) {
  112. fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
  113. return false;
  114. }
  115. // verify magic
  116. {
  117. uint32_t magic;
  118. fin.read((char *) &magic, sizeof(magic));
  119. if (magic == LLAMA_FILE_MAGIC_UNVERSIONED) {
  120. fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
  121. __func__, fname.c_str());
  122. return false;
  123. }
  124. if (magic != LLAMA_FILE_MAGIC) {
  125. fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
  126. return false;
  127. }
  128. uint32_t format_version;
  129. fin.read((char *) &format_version, sizeof(format_version));
  130. if (format_version != LLAMA_FILE_VERSION) {
  131. fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
  132. __func__, fname.c_str(), format_version, LLAMA_FILE_VERSION);
  133. return false;
  134. }
  135. }
  136. int n_ff = 0;
  137. // load hparams
  138. {
  139. auto & hparams = model.hparams;
  140. fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
  141. //fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
  142. fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
  143. fin.read((char *) &hparams.n_mult, sizeof(hparams.n_mult));
  144. fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
  145. fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
  146. fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
  147. fin.read((char *) &hparams.f16, sizeof(hparams.f16));
  148. hparams.n_ctx = n_ctx;
  149. n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
  150. if (n_parts < 1) {
  151. n_parts = LLAMA_N_PARTS.at(hparams.n_embd);
  152. }
  153. // temp warning to tell the user to use "--n_parts"
  154. if (hparams.f16 == 4 && n_parts != 1) {
  155. fprintf(stderr, "%s: GPTQ model detected - are you sure n_parts should be %d? we normally expect it to be 1\n", __func__, n_parts);
  156. fprintf(stderr, "%s: use '--n_parts 1' if necessary\n", __func__);
  157. }
  158. fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab);
  159. fprintf(stderr, "%s: n_ctx = %d\n", __func__, hparams.n_ctx);
  160. fprintf(stderr, "%s: n_embd = %d\n", __func__, hparams.n_embd);
  161. fprintf(stderr, "%s: n_mult = %d\n", __func__, hparams.n_mult);
  162. fprintf(stderr, "%s: n_head = %d\n", __func__, hparams.n_head);
  163. fprintf(stderr, "%s: n_layer = %d\n", __func__, hparams.n_layer);
  164. fprintf(stderr, "%s: n_rot = %d\n", __func__, hparams.n_rot);
  165. fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16);
  166. fprintf(stderr, "%s: n_ff = %d\n", __func__, n_ff);
  167. fprintf(stderr, "%s: n_parts = %d\n", __func__, n_parts);
  168. }
  169. // load vocab
  170. {
  171. std::string word;
  172. vocab.id_to_token.resize(model.hparams.n_vocab);
  173. std::vector<char> tmp(64);
  174. for (int i = 0; i < model.hparams.n_vocab; i++) {
  175. uint32_t len;
  176. fin.read((char *) &len, sizeof(len));
  177. word.resize(len);
  178. if (len > 0) {
  179. tmp.resize(len);
  180. fin.read(tmp.data(), len);
  181. word.assign(tmp.data(), len);
  182. } else {
  183. word.clear();
  184. }
  185. float score;
  186. fin.read((char *) &score, sizeof(score));
  187. vocab.token_to_id[word] = i;
  188. auto &tok_score = vocab.id_to_token[i];
  189. tok_score.tok = word;
  190. tok_score.score = score;
  191. }
  192. }
  193. if (vocab_only) {
  194. return true;
  195. }
  196. // for the big tensors, we have the option to store the data in 16-bit floats or quantized
  197. // in order to save memory and also to speed up the computation
  198. // wtype is for per-layer weights, while vtype is for other weights
  199. ggml_type wtype, vtype;
  200. switch (model.hparams.f16) {
  201. case 0: wtype = vtype = GGML_TYPE_F32; break;
  202. case 1: wtype = vtype = GGML_TYPE_F16; break;
  203. case 2: wtype = vtype = GGML_TYPE_Q4_0; break;
  204. case 3: wtype = vtype = GGML_TYPE_Q4_1; break;
  205. case 4: wtype = GGML_TYPE_Q4_1; vtype = GGML_TYPE_F16; break;
  206. default:
  207. {
  208. fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
  209. __func__, fname.c_str(), model.hparams.f16);
  210. return false;
  211. }
  212. }
  213. auto & ctx = model.ctx;
  214. size_t ctx_size = 0;
  215. {
  216. const auto & hparams = model.hparams;
  217. const int n_embd = hparams.n_embd;
  218. const int n_layer = hparams.n_layer;
  219. const int n_ctx = hparams.n_ctx;
  220. const int n_vocab = hparams.n_vocab;
  221. ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings
  222. ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm
  223. ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // output
  224. ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // attention_norm
  225. ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wq
  226. ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wk
  227. ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wv
  228. ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wo
  229. ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ffn_norm
  230. ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1
  231. ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
  232. ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
  233. ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
  234. ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
  235. ctx_size += (5 + 10*n_layer)*256; // object overhead
  236. fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
  237. }
  238. // create the ggml context
  239. {
  240. struct ggml_init_params params = {
  241. /*.mem_size =*/ ctx_size,
  242. /*.mem_buffer =*/ NULL,
  243. };
  244. model.ctx = ggml_init(params);
  245. if (!model.ctx) {
  246. fprintf(stderr, "%s: ggml_init() failed\n", __func__);
  247. return false;
  248. }
  249. }
  250. // prepare memory for the weights
  251. {
  252. const auto & hparams = model.hparams;
  253. const int n_embd = hparams.n_embd;
  254. const int n_layer = hparams.n_layer;
  255. const int n_vocab = hparams.n_vocab;
  256. model.layers.resize(n_layer);
  257. model.tok_embeddings = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
  258. model.norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
  259. model.output = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
  260. // map by name
  261. model.tensors["tok_embeddings.weight"] = model.tok_embeddings;
  262. model.tensors["norm.weight"] = model.norm;
  263. model.tensors["output.weight"] = model.output;
  264. for (int i = 0; i < n_layer; ++i) {
  265. auto & layer = model.layers[i];
  266. layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
  267. layer.wq = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
  268. layer.wk = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
  269. layer.wv = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
  270. layer.wo = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
  271. layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
  272. layer.w1 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
  273. layer.w2 = ggml_new_tensor_2d(ctx, wtype, n_ff, n_embd);
  274. layer.w3 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
  275. // map by name
  276. model.tensors["layers." + std::to_string(i) + ".attention_norm.weight"] = layer.attention_norm;
  277. model.tensors["layers." + std::to_string(i) + ".attention.wq.weight"] = layer.wq;
  278. model.tensors["layers." + std::to_string(i) + ".attention.wk.weight"] = layer.wk;
  279. model.tensors["layers." + std::to_string(i) + ".attention.wv.weight"] = layer.wv;
  280. model.tensors["layers." + std::to_string(i) + ".attention.wo.weight"] = layer.wo;
  281. model.tensors["layers." + std::to_string(i) + ".ffn_norm.weight"] = layer.ffn_norm;
  282. model.tensors["layers." + std::to_string(i) + ".feed_forward.w1.weight"] = layer.w1;
  283. model.tensors["layers." + std::to_string(i) + ".feed_forward.w2.weight"] = layer.w2;
  284. model.tensors["layers." + std::to_string(i) + ".feed_forward.w3.weight"] = layer.w3;
  285. }
  286. }
  287. // key + value memory
  288. {
  289. const auto & hparams = model.hparams;
  290. const int n_embd = hparams.n_embd;
  291. const int n_layer = hparams.n_layer;
  292. const int n_ctx = hparams.n_ctx;
  293. const int n_mem = n_layer*n_ctx;
  294. const int n_elements = n_embd*n_mem;
  295. model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
  296. model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements);
  297. const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
  298. fprintf(stderr, "%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem);
  299. }
  300. const size_t file_offset = fin.tellg();
  301. fin.close();
  302. std::vector<uint8_t> tmp;
  303. for (int i = 0; i < n_parts; ++i) {
  304. const int part_id = i;
  305. //const int part_id = n_parts - i - 1;
  306. std::string fname_part = fname;
  307. if (i > 0) {
  308. fname_part += "." + std::to_string(i);
  309. }
  310. fprintf(stderr, "%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str());
  311. fin = std::ifstream(fname_part, std::ios::binary);
  312. fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
  313. fin.seekg(file_offset);
  314. // load weights
  315. {
  316. int n_tensors = 0;
  317. size_t total_size = 0;
  318. fprintf(stderr, "%s: ", __func__);
  319. while (true) {
  320. int32_t n_dims;
  321. int32_t length;
  322. int32_t ftype;
  323. fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
  324. fin.read(reinterpret_cast<char *>(&length), sizeof(length));
  325. fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
  326. if (fin.eof()) {
  327. break;
  328. }
  329. int32_t nelements = 1;
  330. int32_t ne[2] = { 1, 1 };
  331. for (int i = 0; i < n_dims; ++i) {
  332. fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
  333. nelements *= ne[i];
  334. }
  335. std::string name(length, 0);
  336. fin.read(&name[0], length);
  337. if (model.tensors.find(name.data()) == model.tensors.end()) {
  338. fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
  339. return false;
  340. }
  341. // split_type = 0: split by columns
  342. // split_type = 1: split by rows
  343. int split_type = 0;
  344. // split_type = 0:
  345. // regex:
  346. // - tok_embeddings.*
  347. // - layers.*.attention.wo.weight
  348. // - layers.*.feed_forward.w2.weight
  349. // split_type = 1:
  350. // regex:
  351. // - output.*
  352. // - layers.*.attention.wq.weight
  353. // - layers.*.attention.wk.weight
  354. // - layers.*.attention.wv.weight
  355. // - layers.*.feed_forward.w1.weight
  356. // - layers.*.feed_forward.w3.weight
  357. if (name.find("tok_embeddings") != std::string::npos) {
  358. split_type = 0;
  359. } else if (name.find("layers") != std::string::npos) {
  360. if (name.find("attention.wo.weight") != std::string::npos) {
  361. split_type = 0;
  362. } else if (name.find("feed_forward.w2.weight") != std::string::npos) {
  363. split_type = 0;
  364. } else {
  365. split_type = 1;
  366. }
  367. } else if (name.find("output") != std::string::npos) {
  368. split_type = 1;
  369. }
  370. auto tensor = model.tensors[name.data()];
  371. if (n_dims == 1) {
  372. if (ggml_nelements(tensor) != nelements) {
  373. fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
  374. return false;
  375. }
  376. } else {
  377. if (ggml_nelements(tensor)/n_parts != nelements) {
  378. fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
  379. return false;
  380. }
  381. }
  382. if (n_dims == 1) {
  383. if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
  384. fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
  385. __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
  386. return false;
  387. }
  388. } else {
  389. if (split_type == 0) {
  390. if (tensor->ne[0]/n_parts != ne[0] || tensor->ne[1] != ne[1]) {
  391. fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
  392. __func__, name.data(), tensor->ne[0]/n_parts, tensor->ne[1], ne[0], ne[1]);
  393. return false;
  394. }
  395. } else {
  396. if (tensor->ne[0] != ne[0] || tensor->ne[1]/n_parts != ne[1]) {
  397. fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
  398. __func__, name.data(), tensor->ne[0], tensor->ne[1]/n_parts, ne[0], ne[1]);
  399. return false;
  400. }
  401. }
  402. }
  403. if (0) {
  404. static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", };
  405. fprintf(stderr, "%24s - [%5d, %5d], type = %6s, split = %d\n", name.data(), ne[0], ne[1], ftype_str[ftype], split_type);
  406. }
  407. size_t bpe = 0;
  408. switch (ftype) {
  409. case 0: bpe = ggml_type_size(GGML_TYPE_F32); break;
  410. case 1: bpe = ggml_type_size(GGML_TYPE_F16); break;
  411. case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break;
  412. case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break;
  413. default:
  414. {
  415. fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype);
  416. return false;
  417. }
  418. };
  419. if (n_dims == 1 || n_parts == 1) {
  420. if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
  421. fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
  422. __func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
  423. return false;
  424. }
  425. if (part_id == 0) {
  426. fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
  427. } else {
  428. fin.seekg(ggml_nbytes(tensor), std::ios::cur);
  429. }
  430. total_size += ggml_nbytes(tensor);
  431. } else {
  432. if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)/n_parts) {
  433. fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
  434. __func__, name.data(), ggml_nbytes(tensor)/n_parts, nelements*bpe);
  435. return false;
  436. }
  437. if (split_type == 0) {
  438. const int np0 = ne[0];
  439. const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
  440. assert(row_size == tensor->nb[1]);
  441. for (int i1 = 0; i1 < ne[1]; ++i1) {
  442. const size_t offset_row = i1*row_size;
  443. const size_t offset = offset_row + ((part_id*np0)/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
  444. fin.read(reinterpret_cast<char *>(tensor->data) + offset, row_size/n_parts);
  445. }
  446. } else {
  447. const int np1 = ne[1];
  448. const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
  449. for (int i1 = 0; i1 < ne[1]; ++i1) {
  450. const size_t offset_row = (i1 + part_id*np1)*row_size;
  451. fin.read(reinterpret_cast<char *>(tensor->data) + offset_row, row_size);
  452. }
  453. }
  454. total_size += ggml_nbytes(tensor)/n_parts;
  455. }
  456. //fprintf(stderr, "%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
  457. if (++n_tensors % 8 == 0) {
  458. fprintf(stderr, ".");
  459. fflush(stderr);
  460. }
  461. }
  462. fprintf(stderr, " done\n");
  463. fprintf(stderr, "%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors);
  464. }
  465. fin.close();
  466. }
  467. lctx.logits.reserve(lctx.model.hparams.n_ctx);
  468. lctx.t_load_us = ggml_time_us() - t_start_us;
  469. return true;
  470. }
  471. // evaluate the transformer
  472. //
  473. // - lctx: llama context
  474. // - tokens: new batch of tokens to process
  475. // - n_past: the context size so far
  476. // - n_threads: number of threads to use
  477. //
  478. static bool llama_eval_internal(
  479. llama_context & lctx,
  480. const llama_token * tokens,
  481. const int n_tokens,
  482. const int n_past,
  483. const int n_threads) {
  484. const int64_t t_start_us = ggml_time_us();
  485. const int N = n_tokens;
  486. const auto & model = lctx.model;
  487. const auto & hparams = model.hparams;
  488. const int n_embd = hparams.n_embd;
  489. const int n_layer = hparams.n_layer;
  490. const int n_ctx = hparams.n_ctx;
  491. const int n_head = hparams.n_head;
  492. const int n_vocab = hparams.n_vocab;
  493. const int n_rot = hparams.n_embd/hparams.n_head;
  494. auto & mem_per_token = lctx.mem_per_token;
  495. // TODO: fix this hardcoded size
  496. static size_t buf_size = 512u*1024*1024;
  497. static void * buf = malloc(buf_size);
  498. if (mem_per_token > 0 && mem_per_token*N > buf_size) {
  499. const size_t buf_size_new = 1.3*(mem_per_token*N); // add 30% to account for ggml object overhead
  500. //fprintf(stderr, "\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
  501. // reallocate
  502. buf_size = buf_size_new;
  503. buf = realloc(buf, buf_size);
  504. if (buf == nullptr) {
  505. fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
  506. return false;
  507. }
  508. }
  509. struct ggml_init_params params = {
  510. /*.mem_size =*/ buf_size,
  511. /*.mem_buffer =*/ buf,
  512. };
  513. struct ggml_context * ctx0 = ggml_init(params);
  514. ggml_cgraph gf = {};
  515. gf.n_threads = n_threads;
  516. struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
  517. memcpy(embd->data, tokens, N*ggml_element_size(embd));
  518. struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd);
  519. for (int il = 0; il < n_layer; ++il) {
  520. struct ggml_tensor * inpSA = inpL;
  521. struct ggml_tensor * cur;
  522. // norm
  523. {
  524. cur = ggml_rms_norm(ctx0, inpL);
  525. // cur = attention_norm*cur
  526. cur = ggml_mul(ctx0,
  527. ggml_repeat(ctx0, model.layers[il].attention_norm, cur),
  528. cur);
  529. }
  530. // self-attention
  531. {
  532. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  533. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  534. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  535. // store key and value to memory
  536. if (N >= 1) {
  537. struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
  538. struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past));
  539. ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
  540. ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
  541. }
  542. // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
  543. struct ggml_tensor * Q =
  544. ggml_permute(ctx0,
  545. ggml_rope(ctx0,
  546. ggml_cpy(ctx0,
  547. Qcur,
  548. ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)),
  549. n_past, n_rot, 0),
  550. 0, 2, 1, 3);
  551. // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
  552. struct ggml_tensor * K =
  553. ggml_permute(ctx0,
  554. ggml_rope(ctx0,
  555. ggml_reshape_3d(ctx0,
  556. ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
  557. n_embd/n_head, n_head, n_past + N),
  558. n_past, n_rot, 1),
  559. 0, 2, 1, 3);
  560. // K * Q
  561. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  562. // KQ_scaled = KQ / sqrt(n_embd/n_head)
  563. struct ggml_tensor * KQ_scaled =
  564. ggml_scale(ctx0,
  565. KQ,
  566. ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
  567. );
  568. // KQ_masked = mask_past(KQ_scaled)
  569. struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
  570. // KQ = soft_max(KQ_masked)
  571. struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
  572. // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
  573. struct ggml_tensor * V_trans =
  574. ggml_permute(ctx0,
  575. ggml_reshape_3d(ctx0,
  576. ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
  577. n_embd/n_head, n_head, n_past + N),
  578. 1, 2, 0, 3);
  579. // KQV = transpose(V) * KQ_soft_max
  580. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
  581. // KQV_merged = KQV.permute(0, 2, 1, 3)
  582. struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  583. // cur = KQV_merged.contiguous().view(n_embd, N)
  584. cur = ggml_cpy(ctx0,
  585. KQV_merged,
  586. ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
  587. // projection (no bias)
  588. cur = ggml_mul_mat(ctx0,
  589. model.layers[il].wo,
  590. cur);
  591. }
  592. struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
  593. // feed-forward network
  594. {
  595. // norm
  596. {
  597. cur = ggml_rms_norm(ctx0, inpFF);
  598. // cur = ffn_norm*cur
  599. cur = ggml_mul(ctx0,
  600. ggml_repeat(ctx0, model.layers[il].ffn_norm, cur),
  601. cur);
  602. }
  603. struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
  604. model.layers[il].w3,
  605. cur);
  606. cur = ggml_mul_mat(ctx0,
  607. model.layers[il].w1,
  608. cur);
  609. // SILU activation
  610. cur = ggml_silu(ctx0, cur);
  611. cur = ggml_mul(ctx0, cur, tmp);
  612. cur = ggml_mul_mat(ctx0,
  613. model.layers[il].w2,
  614. cur);
  615. }
  616. cur = ggml_add(ctx0, cur, inpFF);
  617. // input for next layer
  618. inpL = cur;
  619. }
  620. // norm
  621. {
  622. inpL = ggml_rms_norm(ctx0, inpL);
  623. // inpL = norm*inpL
  624. inpL = ggml_mul(ctx0,
  625. ggml_repeat(ctx0, model.norm, inpL),
  626. inpL);
  627. }
  628. // lm_head
  629. {
  630. inpL = ggml_mul_mat(ctx0, model.output, inpL);
  631. }
  632. // logits -> probs
  633. //inpL = ggml_soft_max(ctx0, inpL);
  634. // run the computation
  635. ggml_build_forward_expand(&gf, inpL);
  636. ggml_graph_compute (ctx0, &gf);
  637. //if (n_past%100 == 0) {
  638. // ggml_graph_print (&gf);
  639. // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
  640. //}
  641. //embd_w.resize(n_vocab*N);
  642. //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
  643. auto & logits_out = lctx.logits;
  644. if (lctx.logits_all) {
  645. logits_out.resize(n_vocab * N);
  646. memcpy(logits_out.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N);
  647. } else {
  648. // return result for just the last token
  649. logits_out.resize(n_vocab);
  650. memcpy(logits_out.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
  651. }
  652. if (mem_per_token == 0) {
  653. mem_per_token = ggml_used_mem(ctx0)/N;
  654. }
  655. //fprintf(stderr, "used_mem = %zu\n", ggml_used_mem(ctx0));
  656. ggml_free(ctx0);
  657. // measure the performance only for the single-token evals
  658. if (N == 1) {
  659. lctx.t_eval_us += ggml_time_us() - t_start_us;
  660. lctx.n_eval++;
  661. }
  662. return true;
  663. }
  664. //
  665. // tokenizer
  666. //
  667. static size_t utf8_len(char src) {
  668. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  669. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  670. return lookup[highbits];
  671. }
  672. struct llama_sp_symbol {
  673. using index = int;
  674. index prev;
  675. index next;
  676. const char * text;
  677. size_t n;
  678. };
  679. struct llama_sp_bigram {
  680. struct comparator {
  681. bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) {
  682. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  683. }
  684. };
  685. using queue_storage = std::vector<llama_sp_bigram>;
  686. using queue = std::priority_queue<llama_sp_bigram, queue_storage, comparator>;
  687. llama_sp_symbol::index left;
  688. llama_sp_symbol::index right;
  689. float score;
  690. size_t size;
  691. };
  692. // original implementation:
  693. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  694. struct llama_tokenizer {
  695. llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {}
  696. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  697. // split string into utf8 chars
  698. int index = 0;
  699. size_t offs = 0;
  700. while (offs < text.size()) {
  701. llama_sp_symbol sym;
  702. size_t char_len = std::min(text.size() - offs, utf8_len(text[offs]));
  703. sym.text = text.c_str() + offs;
  704. sym.n = char_len;
  705. offs += char_len;
  706. sym.prev = index - 1;
  707. sym.next = offs == text.size() ? -1 : index + 1;
  708. index++;
  709. symbols_.emplace_back(std::move(sym));
  710. }
  711. // seed the work queue with all possible 2-character tokens.
  712. for (size_t i = 1; i < symbols_.size(); ++i) {
  713. try_add_bigram(i - 1, i);
  714. }
  715. // keep substituting the highest frequency pairs for as long as we can.
  716. while (!work_queue_.empty()) {
  717. auto bigram = work_queue_.top();
  718. work_queue_.pop();
  719. auto & left_sym = symbols_[bigram.left];
  720. auto & right_sym = symbols_[bigram.right];
  721. // if one of the symbols already got merged, skip it.
  722. if (left_sym.n == 0 || right_sym.n == 0 ||
  723. left_sym.n + right_sym.n != bigram.size) {
  724. continue;
  725. }
  726. // merge the right sym into the left one
  727. left_sym.n += right_sym.n;
  728. right_sym.n = 0;
  729. //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  730. // remove the right sym from the chain
  731. left_sym.next = right_sym.next;
  732. if (right_sym.next >= 0) {
  733. symbols_[right_sym.next].prev = bigram.left;
  734. }
  735. // find more substitutions
  736. try_add_bigram(left_sym.prev, bigram.left);
  737. try_add_bigram(bigram.left, left_sym.next);
  738. }
  739. for (int i = 0; i != -1; i = symbols_[i].next) {
  740. auto & symbol = symbols_[i];
  741. auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n));
  742. if (token == vocab_.token_to_id.end()) {
  743. // output any symbols that did not form tokens as bytes.
  744. for (int j = 0; j < (int) symbol.n; ++j) {
  745. llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
  746. output.push_back(token_id);
  747. }
  748. } else {
  749. output.push_back((*token).second);
  750. }
  751. }
  752. }
  753. private:
  754. void try_add_bigram(int left, int right) {
  755. if (left == -1 || right == -1) {
  756. return;
  757. }
  758. const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n);
  759. auto token = vocab_.token_to_id.find(text);
  760. if (token == vocab_.token_to_id.end()) {
  761. return;
  762. }
  763. if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) {
  764. return;
  765. }
  766. const auto &tok_score = vocab_.id_to_token[(*token).second];
  767. llama_sp_bigram bigram;
  768. bigram.left = left;
  769. bigram.right = right;
  770. bigram.score = tok_score.score;
  771. bigram.size = text.size();
  772. work_queue_.push(bigram);
  773. }
  774. const llama_vocab & vocab_;
  775. std::vector<llama_sp_symbol> symbols_;
  776. llama_sp_bigram::queue work_queue_;
  777. };
  778. static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) {
  779. llama_tokenizer tokenizer(vocab);
  780. std::vector<llama_vocab::id> output;
  781. if (text.size() == 0) {
  782. return output;
  783. }
  784. if (bos) {
  785. output.push_back(1);
  786. }
  787. tokenizer.tokenize(text, output);
  788. return output;
  789. }
  790. //
  791. // sampling
  792. //
  793. static void sample_top_k(std::vector<std::pair<double, llama_vocab::id>> & logits_id, int top_k) {
  794. // find the top k tokens
  795. std::partial_sort(
  796. logits_id.begin(),
  797. logits_id.begin() + top_k, logits_id.end(),
  798. [](const std::pair<double, llama_vocab::id> & a, const std::pair<double, llama_vocab::id> & b) {
  799. return a.first > b.first;
  800. });
  801. logits_id.resize(top_k);
  802. }
  803. static llama_vocab::id llama_sample_top_p_top_k(
  804. llama_context & lctx,
  805. const std::vector<llama_vocab::id> & last_n_tokens,
  806. int top_k,
  807. double top_p,
  808. double temp,
  809. double repeat_penalty) {
  810. auto & rng = lctx.rng;
  811. const auto & vocab = lctx.vocab;
  812. const auto & logits = lctx.logits;
  813. int n_logits = vocab.id_to_token.size();
  814. std::vector<std::pair<double, llama_vocab::id>> logits_id;
  815. logits_id.reserve(n_logits);
  816. {
  817. const double scale = 1.0/temp;
  818. for (int i = 0; i < n_logits; ++i) {
  819. // repetition penalty from ctrl paper (https://arxiv.org/abs/1909.05858)
  820. // credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main
  821. if (std::find(last_n_tokens.begin(), last_n_tokens.end(), i) != last_n_tokens.end()) {
  822. // if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
  823. if (logits[i] < 0.0) {
  824. logits_id.push_back(std::make_pair(logits[i]*scale*repeat_penalty, i));
  825. } else {
  826. logits_id.push_back(std::make_pair(logits[i]*scale/repeat_penalty, i));
  827. }
  828. } else {
  829. logits_id.push_back(std::make_pair(logits[i]*scale, i));
  830. }
  831. }
  832. }
  833. sample_top_k(logits_id, top_k);
  834. double maxl = -std::numeric_limits<double>::infinity();
  835. for (const auto & kv : logits_id) {
  836. maxl = std::max(maxl, kv.first);
  837. }
  838. // compute probs for the top k tokens
  839. std::vector<double> probs;
  840. probs.reserve(logits_id.size());
  841. double sum = 0.0;
  842. for (const auto & kv : logits_id) {
  843. double p = exp(kv.first - maxl);
  844. probs.push_back(p);
  845. sum += p;
  846. }
  847. // normalize the probs
  848. for (auto & p : probs) {
  849. p /= sum;
  850. }
  851. if (top_p < 1.0f) {
  852. double cumsum = 0.0f;
  853. for (int i = 0; i < (int) probs.size(); i++) {
  854. cumsum += probs[i];
  855. if (cumsum >= top_p) {
  856. probs.resize(i + 1);
  857. logits_id.resize(i + 1);
  858. break;
  859. }
  860. }
  861. cumsum = 1.0/cumsum;
  862. for (int i = 0; i < (int) probs.size(); i++) {
  863. probs[i] *= cumsum;
  864. }
  865. }
  866. //printf("\n");
  867. //for (int i = 0; i < (int) 10; i++) {
  868. // printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]);
  869. //}
  870. //printf("\n\n");
  871. //exit(0);
  872. std::discrete_distribution<> dist(probs.begin(), probs.end());
  873. int idx = dist(rng);
  874. return logits_id[idx].second;
  875. }
  876. //
  877. // quantization
  878. //
  879. // TODO: reuse code from the llama_model_load() somehow
  880. bool llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, int itype, int qk) {
  881. ggml_type type = GGML_TYPE_Q4_1;
  882. switch (itype) {
  883. case 2: type = GGML_TYPE_Q4_0; break;
  884. case 3: type = GGML_TYPE_Q4_1; break;
  885. default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1;
  886. };
  887. if (type != GGML_TYPE_Q4_0 && type != GGML_TYPE_Q4_1) {
  888. fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type);
  889. return false;
  890. }
  891. llama_vocab vocab;
  892. printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
  893. auto finp = std::ifstream(fname_inp, std::ios::binary);
  894. if (!finp) {
  895. fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str());
  896. return false;
  897. }
  898. auto fout = std::ofstream(fname_out, std::ios::binary);
  899. if (!fout) {
  900. fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str());
  901. return false;
  902. }
  903. // verify magic
  904. {
  905. uint32_t magic;
  906. finp.read((char *) &magic, sizeof(magic));
  907. if (magic == LLAMA_FILE_MAGIC_UNVERSIONED) {
  908. fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
  909. __func__, fname_inp.c_str());
  910. return false;
  911. }
  912. if (magic != LLAMA_FILE_MAGIC) {
  913. fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str());
  914. return false;
  915. }
  916. fout.write((char *) &magic, sizeof(magic));
  917. uint32_t format_version;
  918. finp.read((char *) &format_version, sizeof(format_version));
  919. if (format_version != LLAMA_FILE_VERSION) {
  920. fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
  921. __func__, fname_inp.c_str(), format_version, LLAMA_FILE_VERSION);
  922. return false;
  923. }
  924. fout.write((char *) &format_version, sizeof(format_version));
  925. }
  926. llama_hparams hparams;
  927. // load hparams
  928. {
  929. finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
  930. //finp.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
  931. finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
  932. finp.read((char *) &hparams.n_mult, sizeof(hparams.n_mult));
  933. finp.read((char *) &hparams.n_head, sizeof(hparams.n_head));
  934. finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
  935. finp.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
  936. finp.read((char *) &hparams.f16, sizeof(hparams.f16));
  937. printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
  938. printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
  939. printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
  940. printf("%s: n_mult = %d\n", __func__, hparams.n_mult);
  941. printf("%s: n_head = %d\n", __func__, hparams.n_head);
  942. printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
  943. printf("%s: f16 = %d\n", __func__, hparams.f16);
  944. fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
  945. //fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
  946. fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd));
  947. fout.write((char *) &hparams.n_mult, sizeof(hparams.n_mult));
  948. fout.write((char *) &hparams.n_head, sizeof(hparams.n_head));
  949. fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer));
  950. fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot));
  951. fout.write((char *) &itype, sizeof(hparams.f16));
  952. }
  953. // load vocab
  954. {
  955. const int32_t n_vocab = hparams.n_vocab;
  956. if (n_vocab != hparams.n_vocab) {
  957. fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
  958. __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab);
  959. return false;
  960. }
  961. std::string word;
  962. vocab.id_to_token.resize(n_vocab);
  963. for (int i = 0; i < n_vocab; i++) {
  964. uint32_t len;
  965. finp.read ((char *) &len, sizeof(len));
  966. fout.write((char *) &len, sizeof(len));
  967. word.resize(len);
  968. finp.read ((char *) word.data(), len);
  969. fout.write((char *) word.data(), len);
  970. float score;
  971. finp.read ((char *) &score, sizeof(score));
  972. fout.write((char *) &score, sizeof(score));
  973. vocab.token_to_id[word] = i;
  974. auto &tok_score = vocab.id_to_token[i];
  975. tok_score.tok = word;
  976. tok_score.score = score;
  977. }
  978. }
  979. // load weights
  980. {
  981. size_t total_size_org = 0;
  982. size_t total_size_new = 0;
  983. std::vector<float> work;
  984. std::vector<uint8_t> data_u8;
  985. std::vector<ggml_fp16_t> data_f16;
  986. std::vector<float> data_f32;
  987. std::vector<int64_t> hist_all(1 << 4, 0);
  988. while (true) {
  989. int32_t n_dims;
  990. int32_t length;
  991. int32_t ftype;
  992. finp.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
  993. finp.read(reinterpret_cast<char *>(&length), sizeof(length));
  994. finp.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
  995. if (finp.eof()) {
  996. break;
  997. }
  998. int32_t nelements = 1;
  999. int32_t ne[2] = { 1, 1 };
  1000. for (int i = 0; i < n_dims; ++i) {
  1001. finp.read (reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
  1002. nelements *= ne[i];
  1003. }
  1004. std::string name(length, 0);
  1005. finp.read (&name[0], length);
  1006. {
  1007. static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", };
  1008. printf("%48s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]);
  1009. }
  1010. // regexes of tensor names to be quantized
  1011. const std::vector<std::string> k_names = {
  1012. ".*weight",
  1013. };
  1014. bool quantize = false;
  1015. for (const auto & s : k_names) {
  1016. if (std::regex_match(name, std::regex(s))) {
  1017. quantize = true;
  1018. break;
  1019. }
  1020. }
  1021. // quantize only 2D tensors
  1022. quantize &= (n_dims == 2);
  1023. if (quantize) {
  1024. if (ftype != 0 && ftype != 1) {
  1025. fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype);
  1026. return false;
  1027. }
  1028. if (ftype == 1) {
  1029. data_f16.resize(nelements);
  1030. finp.read(reinterpret_cast<char *>(data_f16.data()), nelements * sizeof(ggml_fp16_t));
  1031. data_f32.resize(nelements);
  1032. for (int i = 0; i < nelements; ++i) {
  1033. data_f32[i] = ggml_fp16_to_fp32(data_f16[i]);
  1034. }
  1035. } else {
  1036. data_f32.resize(nelements);
  1037. finp.read(reinterpret_cast<char *>(data_f32.data()), nelements * sizeof(float));
  1038. }
  1039. ftype = itype;
  1040. } else {
  1041. const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t);
  1042. data_u8.resize(nelements*bpe);
  1043. finp.read(reinterpret_cast<char *>(data_u8.data()), nelements * bpe);
  1044. }
  1045. fout.write(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
  1046. fout.write(reinterpret_cast<char *>(&length), sizeof(length));
  1047. fout.write(reinterpret_cast<char *>(&ftype), sizeof(ftype));
  1048. for (int i = 0; i < n_dims; ++i) {
  1049. fout.write(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
  1050. }
  1051. fout.write(&name[0], length);
  1052. if (quantize) {
  1053. printf("quantizing .. ");
  1054. work.resize(nelements); // for quantization
  1055. size_t cur_size = 0;
  1056. std::vector<int64_t> hist_cur(1 << 4, 0);
  1057. switch (type) {
  1058. case GGML_TYPE_Q4_0:
  1059. {
  1060. cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], qk, hist_cur.data());
  1061. } break;
  1062. case GGML_TYPE_Q4_1:
  1063. {
  1064. cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], qk, hist_cur.data());
  1065. } break;
  1066. default:
  1067. {
  1068. fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type);
  1069. return false;
  1070. }
  1071. }
  1072. fout.write(reinterpret_cast<char *>(work.data()), cur_size);
  1073. total_size_new += cur_size;
  1074. printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
  1075. for (int i = 0; i < (int) hist_cur.size(); ++i) {
  1076. hist_all[i] += hist_cur[i];
  1077. }
  1078. for (int i = 0; i < (int) hist_cur.size(); ++i) {
  1079. printf("%5.3f ", hist_cur[i] / (float)nelements);
  1080. }
  1081. printf("\n");
  1082. } else {
  1083. printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0);
  1084. fout.write(reinterpret_cast<char *>(data_u8.data()), data_u8.size());
  1085. total_size_new += data_u8.size();
  1086. }
  1087. total_size_org += nelements * sizeof(float);
  1088. }
  1089. printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  1090. printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  1091. {
  1092. int64_t sum_all = 0;
  1093. for (int i = 0; i < (int) hist_all.size(); ++i) {
  1094. sum_all += hist_all[i];
  1095. }
  1096. printf("%s: hist: ", __func__);
  1097. for (int i = 0; i < (int) hist_all.size(); ++i) {
  1098. printf("%5.3f ", hist_all[i] / (float)sum_all);
  1099. }
  1100. printf("\n");
  1101. }
  1102. }
  1103. finp.close();
  1104. fout.close();
  1105. return true;
  1106. }
  1107. //
  1108. // interface implementation
  1109. //
  1110. struct llama_context * llama_init_from_file(
  1111. const char * path_model,
  1112. struct llama_context_params params) {
  1113. ggml_time_init();
  1114. llama_context * ctx = new llama_context;
  1115. if (params.seed <= 0) {
  1116. params.seed = time(NULL);
  1117. }
  1118. ctx->rng = std::mt19937(params.seed);
  1119. ctx->logits_all = params.logits_all;
  1120. ggml_type type_memory = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
  1121. if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, type_memory, params.vocab_only)) {
  1122. fprintf(stderr, "%s: failed to load model\n", __func__);
  1123. delete ctx;
  1124. return nullptr;
  1125. }
  1126. return ctx;
  1127. }
  1128. void llama_free(struct llama_context * ctx) {
  1129. ggml_free(ctx->model.ctx);
  1130. delete ctx;
  1131. }
  1132. int llama_model_quantize(
  1133. const char * fname_inp,
  1134. const char * fname_out,
  1135. int itype,
  1136. int qk) {
  1137. if (!llama_model_quantize_internal(fname_inp, fname_out, itype, qk)) {
  1138. fprintf(stderr, "%s: failed to quantize\n", __func__);
  1139. return 1;
  1140. }
  1141. return 0;
  1142. }
  1143. int llama_eval(
  1144. struct llama_context * ctx,
  1145. const llama_token * tokens,
  1146. int n_tokens,
  1147. int n_past,
  1148. int n_threads) {
  1149. if (!llama_eval_internal(*ctx, tokens, n_tokens, n_past, n_threads)) {
  1150. fprintf(stderr, "%s: failed to eval\n", __func__);
  1151. return 1;
  1152. }
  1153. return 0;
  1154. }
  1155. int llama_tokenize(
  1156. struct llama_context * ctx,
  1157. const char * text,
  1158. llama_token * tokens,
  1159. int n_max_tokens,
  1160. bool add_bos) {
  1161. auto res = llama_tokenize(ctx->vocab, text, add_bos);
  1162. if (n_max_tokens < (int) res.size()) {
  1163. fprintf(stderr, "%s: too many tokens\n", __func__);
  1164. return -((int) res.size());
  1165. }
  1166. for (size_t i = 0; i < res.size(); i++) {
  1167. tokens[i] = res[i];
  1168. }
  1169. return res.size();
  1170. }
  1171. int llama_n_vocab(struct llama_context * ctx) {
  1172. return ctx->vocab.id_to_token.size();
  1173. }
  1174. int llama_n_ctx(struct llama_context * ctx) {
  1175. return ctx->model.hparams.n_ctx;
  1176. }
  1177. float * llama_get_logits(struct llama_context * ctx) {
  1178. return ctx->logits.data();
  1179. }
  1180. const char * llama_token_to_str(struct llama_context * ctx, llama_token token) {
  1181. if (token >= llama_n_vocab(ctx)) {
  1182. return nullptr;
  1183. }
  1184. return ctx->vocab.id_to_token[token].tok.c_str();
  1185. }
  1186. llama_token llama_token_bos() {
  1187. return 1;
  1188. }
  1189. llama_token llama_token_eos() {
  1190. return 2;
  1191. }
  1192. llama_token llama_sample_top_p_top_k(
  1193. llama_context * ctx,
  1194. const llama_token * last_n_tokens_data,
  1195. int last_n_tokens_size,
  1196. int top_k,
  1197. double top_p,
  1198. double temp,
  1199. double repeat_penalty) {
  1200. const int64_t t_start_sample_us = ggml_time_us();
  1201. llama_token result = 0;
  1202. // TODO: avoid this ...
  1203. const auto last_n_tokens = std::vector<llama_token>(last_n_tokens_data, last_n_tokens_data + last_n_tokens_size);
  1204. result = llama_sample_top_p_top_k(
  1205. *ctx,
  1206. last_n_tokens,
  1207. top_k,
  1208. top_p,
  1209. temp,
  1210. repeat_penalty);
  1211. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  1212. ctx->n_sample++;
  1213. return result;
  1214. }
  1215. void llama_print_timings(struct llama_context * ctx) {
  1216. const int64_t t_end_us = ggml_time_us();
  1217. const int32_t n_sample = std::max(1, ctx->n_sample);
  1218. const int32_t n_eval = std::max(1, ctx->n_eval);
  1219. fprintf(stderr, "\n");
  1220. fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0f);
  1221. fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->t_sample_us, n_sample, 1e-3f * ctx->t_sample_us / n_sample);
  1222. fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->t_eval_us, n_eval, 1e-3f * ctx->t_eval_us / n_eval);
  1223. fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0f);
  1224. }
  1225. void llama_reset_timings(struct llama_context * ctx) {
  1226. ctx->t_start_us = ggml_time_us();
  1227. ctx->t_sample_us = ctx->n_sample = 0;
  1228. ctx->t_eval_us = ctx->n_eval = 0;
  1229. }
  1230. const char * llama_print_system_info(void) {
  1231. static std::string s;
  1232. s = "";
  1233. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  1234. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  1235. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  1236. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  1237. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  1238. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  1239. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  1240. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  1241. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  1242. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  1243. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  1244. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  1245. return s.c_str();
  1246. }