baby-llama.cpp 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709
  1. #include "ggml.h"
  2. #include <vector>
  3. #include <cassert>
  4. #include <random>
  5. #include <cstring>
  6. #if defined(_MSC_VER)
  7. #pragma warning(disable: 4244 4267) // possible loss of data
  8. #endif
  9. #ifdef LLAMA_DEFAULT_RMS_EPS
  10. static const float rms_norm_eps = LLAMA_DEFAULT_RMS_EPS;
  11. #else
  12. static const float rms_norm_eps = 5e-6f;
  13. #endif
  14. float frand() {
  15. return (float)rand()/(float)RAND_MAX;
  16. }
  17. struct random_normal_distribution {
  18. std::mt19937 gen;
  19. std::normal_distribution<float> nd;
  20. float min;
  21. float max;
  22. };
  23. void init_random_normal_distribution(struct random_normal_distribution * rnd, int seed, float mean, float std, float min, float max) {
  24. rnd->gen = std::mt19937(seed);
  25. rnd->nd = std::normal_distribution<float>{mean, std};
  26. rnd->min = min;
  27. rnd->max = max;
  28. }
  29. float frand_normal(struct random_normal_distribution * rnd) {
  30. const float r = rnd->nd(rnd->gen);
  31. return ((r < rnd->min) ? (rnd->min) : (r > rnd->max) ? (rnd->max) : r);
  32. }
  33. void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
  34. struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
  35. if (plan.work_size > 0) {
  36. buf.resize(plan.work_size);
  37. plan.work_data = buf.data();
  38. }
  39. ggml_graph_compute(graph, &plan);
  40. }
  41. struct ggml_tensor * randomize_tensor(
  42. struct ggml_tensor * tensor,
  43. int ndims,
  44. const int64_t ne[],
  45. float fmin,
  46. float fmax) {
  47. switch (ndims) {
  48. case 1:
  49. for (int i0 = 0; i0 < ne[0]; i0++) {
  50. ((float *)tensor->data)[i0] = frand()*(fmax - fmin) + fmin;
  51. }
  52. break;
  53. case 2:
  54. for (int i1 = 0; i1 < ne[1]; i1++) {
  55. for (int i0 = 0; i0 < ne[0]; i0++) {
  56. ((float *)tensor->data)[i1*ne[0] + i0] = frand()*(fmax - fmin) + fmin;
  57. }
  58. }
  59. break;
  60. case 3:
  61. for (int i2 = 0; i2 < ne[2]; i2++) {
  62. for (int i1 = 0; i1 < ne[1]; i1++) {
  63. for (int i0 = 0; i0 < ne[0]; i0++) {
  64. ((float *)tensor->data)[i2*ne[1]*ne[0] + i1*ne[0] + i0] = frand()*(fmax - fmin) + fmin;
  65. }
  66. }
  67. }
  68. break;
  69. case 4:
  70. for (int i3 = 0; i3 < ne[3]; i3++) {
  71. for (int i2 = 0; i2 < ne[2]; i2++) {
  72. for (int i1 = 0; i1 < ne[1]; i1++) {
  73. for (int i0 = 0; i0 < ne[0]; i0++) {
  74. ((float *)tensor->data)[i3*ne[2]*ne[1]*ne[0] + i2*ne[1]*ne[0] + i1*ne[0] + i0] = frand()*(fmax - fmin) + fmin;
  75. }
  76. }
  77. }
  78. }
  79. break;
  80. default:
  81. assert(false);
  82. };
  83. return tensor;
  84. }
  85. struct ggml_tensor * randomize_tensor_normal(
  86. struct ggml_tensor * tensor,
  87. int ndims,
  88. const int64_t ne[],
  89. struct random_normal_distribution * rnd) {
  90. float scale = 1.0; // xavier
  91. switch (ndims) {
  92. case 1:
  93. scale /= sqrtf(ne[0]);
  94. for (int i0 = 0; i0 < ne[0]; i0++) {
  95. ((float *)tensor->data)[i0] = scale * frand_normal(rnd);
  96. }
  97. break;
  98. case 2:
  99. scale /= sqrtf(ne[0]+ne[1]);
  100. for (int i1 = 0; i1 < ne[1]; i1++) {
  101. for (int i0 = 0; i0 < ne[0]; i0++) {
  102. ((float *)tensor->data)[i1*ne[0] + i0] = scale * frand_normal(rnd);
  103. }
  104. }
  105. break;
  106. case 3:
  107. scale /= sqrtf(ne[0]+ne[1]);
  108. for (int i2 = 0; i2 < ne[2]; i2++) {
  109. for (int i1 = 0; i1 < ne[1]; i1++) {
  110. for (int i0 = 0; i0 < ne[0]; i0++) {
  111. ((float *)tensor->data)[i2*ne[1]*ne[0] + i1*ne[0] + i0] = scale * frand_normal(rnd);
  112. }
  113. }
  114. }
  115. break;
  116. case 4:
  117. scale /= sqrtf(ne[0]+ne[1]);
  118. for (int i3 = 0; i3 < ne[3]; i3++) {
  119. for (int i2 = 0; i2 < ne[2]; i2++) {
  120. for (int i1 = 0; i1 < ne[1]; i1++) {
  121. for (int i0 = 0; i0 < ne[0]; i0++) {
  122. ((float *)tensor->data)[i3*ne[2]*ne[1]*ne[0] + i2*ne[1]*ne[0] + i1*ne[0] + i0] = scale * frand_normal(rnd);
  123. }
  124. }
  125. }
  126. }
  127. break;
  128. default:
  129. assert(false);
  130. };
  131. return tensor;
  132. }
  133. struct llama_hparams {
  134. uint32_t n_vocab = 32000;
  135. uint32_t n_ctx = 512; // this is provided as user input?
  136. uint32_t n_embd = 4096;
  137. uint32_t n_mult = 4;
  138. uint32_t n_head = 32;
  139. uint32_t n_layer = 32;
  140. uint32_t n_rot = 64;
  141. bool operator!=(const llama_hparams & other) const {
  142. return memcmp(this, &other, sizeof(llama_hparams));
  143. }
  144. };
  145. uint32_t get_n_ff(const struct llama_hparams* hparams) {
  146. const uint32_t n_ff = ((2*(4*hparams->n_embd)/3 + hparams->n_mult - 1)/hparams->n_mult)*hparams->n_mult;
  147. return n_ff;
  148. }
  149. struct llama_hparams_lora {
  150. uint32_t n_vocab = 32000;
  151. uint32_t n_ctx = 512; // this is provided as user input?
  152. uint32_t n_embd = 4096;
  153. uint32_t n_mult = 4;
  154. uint32_t n_head = 32;
  155. uint32_t n_layer = 32;
  156. uint32_t n_rot = 64;
  157. uint32_t n_lora = 64;
  158. bool operator!=(const llama_hparams_lora & other) const {
  159. return memcmp(this, &other, sizeof(llama_hparams_lora)) != 0;
  160. }
  161. };
  162. struct llama_layer {
  163. // normalization
  164. struct ggml_tensor * attention_norm;
  165. // attention
  166. struct ggml_tensor * wq;
  167. struct ggml_tensor * wk;
  168. struct ggml_tensor * wv;
  169. struct ggml_tensor * wo;
  170. // normalization
  171. struct ggml_tensor * ffn_norm;
  172. // ff
  173. struct ggml_tensor * w1;
  174. struct ggml_tensor * w2;
  175. struct ggml_tensor * w3;
  176. };
  177. struct llama_layer_lora {
  178. // normalization
  179. struct ggml_tensor * attention_norm;
  180. // attention
  181. struct ggml_tensor * wqa;
  182. struct ggml_tensor * wqb;
  183. struct ggml_tensor * wka;
  184. struct ggml_tensor * wkb;
  185. struct ggml_tensor * wva;
  186. struct ggml_tensor * wvb;
  187. struct ggml_tensor * woa;
  188. struct ggml_tensor * wob;
  189. // normalization
  190. struct ggml_tensor * ffn_norm;
  191. // ff
  192. struct ggml_tensor * w1;
  193. struct ggml_tensor * w2;
  194. struct ggml_tensor * w3;
  195. };
  196. struct llama_kv_cache {
  197. struct ggml_context * ctx = NULL;
  198. struct ggml_tensor * k;
  199. struct ggml_tensor * v;
  200. // llama_ctx_buffer buf;
  201. int n; // number of tokens currently in the cache
  202. };
  203. struct llama_model {
  204. struct ggml_context * ctx = NULL;
  205. llama_hparams hparams;
  206. struct ggml_tensor * tok_embeddings;
  207. struct ggml_tensor * norm;
  208. struct ggml_tensor * output;
  209. std::vector<llama_layer> layers;
  210. };
  211. struct llama_model_lora {
  212. struct ggml_context * ctx = NULL;
  213. llama_hparams_lora hparams;
  214. struct ggml_tensor * tok_embeddings;
  215. struct ggml_tensor * norm;
  216. struct ggml_tensor * outputa;
  217. struct ggml_tensor * outputb;
  218. std::vector<llama_layer_lora> layers;
  219. };
  220. void init_model(struct llama_model * model) {
  221. const auto & hparams = model->hparams;
  222. const uint32_t n_embd = hparams.n_embd;
  223. const uint32_t n_layer = hparams.n_layer;
  224. const uint32_t n_vocab = hparams.n_vocab;
  225. const uint32_t n_ff = get_n_ff(&hparams);
  226. struct ggml_context * ctx = model->ctx;
  227. model->tok_embeddings = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab); // ("tok_embeddings.weight", {n_embd, n_vocab});
  228. model->norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // ("norm.weight", {n_embd});
  229. model->output = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab); // ("output.weight", {n_embd, n_vocab});
  230. model->layers.resize(n_layer);
  231. for (uint32_t i = 0; i < n_layer; ++i) {
  232. auto & layer = model->layers[i];
  233. // std::string layers_i = "layers." + std::to_string(i);
  234. layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // (layers_i + ".attention_norm.weight", {n_embd});
  235. layer.wq = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd); // (layers_i + ".attention.wq.weight", {n_embd, n_embd});
  236. layer.wk = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd); // (layers_i + ".attention.wk.weight", {n_embd, n_embd});
  237. layer.wv = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd); // (layers_i + ".attention.wv.weight", {n_embd, n_embd});
  238. layer.wo = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd); // (layers_i + ".attention.wo.weight", {n_embd, n_embd});
  239. layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // (layers_i + ".ffn_norm.weight", {n_embd});
  240. layer.w1 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff); // (layers_i + ".feed_forward.w1.weight", {n_embd, n_ff});
  241. layer.w2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_ff, n_embd); // (layers_i + ".feed_forward.w2.weight", { n_ff, n_embd});
  242. layer.w3 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff); // (layers_i + ".feed_forward.w3.weight", {n_embd, n_ff});
  243. }
  244. }
  245. void init_model_lora(struct llama_model_lora * model) {
  246. const auto & hparams = model->hparams;
  247. const uint32_t n_embd = hparams.n_embd;
  248. const uint32_t n_mult = hparams.n_mult;
  249. const uint32_t n_layer = hparams.n_layer;
  250. const uint32_t n_vocab = hparams.n_vocab;
  251. const uint32_t n_lora = hparams.n_lora;
  252. const uint32_t n_ff = ((2*(4*n_embd)/3 + n_mult - 1)/n_mult)*n_mult;
  253. struct ggml_context * ctx = model->ctx;
  254. model->tok_embeddings = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab); // ("tok_embeddings.weight", {n_embd, n_vocab});
  255. model->norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // ("norm.weight", {n_embd});
  256. model->outputa = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_vocab); // ("output.weight", {n_embd, n_vocab});
  257. model->outputb = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // ("output.weight", {n_embd, n_vocab});
  258. model->layers.resize(n_layer);
  259. for (uint32_t i = 0; i < n_layer; ++i) {
  260. auto & layer = model->layers[i];
  261. // std::string layers_i = "layers." + std::to_string(i);
  262. layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // (layers_i + ".attention_norm.weight", {n_embd});
  263. layer.wqa = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_embd); // (layers_i + ".attention.wq.weight", {n_embd, n_embd});
  264. layer.wqb = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // (layers_i + ".attention.wq.weight", {n_embd, n_embd});
  265. layer.wka = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_embd); // (layers_i + ".attention.wk.weight", {n_embd, n_embd});
  266. layer.wkb = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // (layers_i + ".attention.wk.weight", {n_embd, n_embd});
  267. layer.wva = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_embd); // (layers_i + ".attention.wv.weight", {n_embd, n_embd});
  268. layer.wvb = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // (layers_i + ".attention.wv.weight", {n_embd, n_embd});
  269. layer.woa = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_embd); // (layers_i + ".attention.wo.weight", {n_embd, n_embd});
  270. layer.wob = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // (layers_i + ".attention.wo.weight", {n_embd, n_embd});
  271. layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // (layers_i + ".ffn_norm.weight", {n_embd});
  272. layer.w1 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff); // (layers_i + ".feed_forward.w1.weight", {n_embd, n_ff});
  273. layer.w2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_ff, n_embd); // (layers_i + ".feed_forward.w2.weight", { n_ff, n_embd});
  274. layer.w3 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff); // (layers_i + ".feed_forward.w3.weight", {n_embd, n_ff});
  275. }
  276. }
  277. void set_param_model(struct llama_model * model) {
  278. const auto& hparams = model->hparams;
  279. const uint32_t n_layer = hparams.n_layer;
  280. struct ggml_context* ctx = model->ctx;
  281. ggml_set_param(ctx, model->tok_embeddings);
  282. ggml_set_param(ctx, model->norm);
  283. ggml_set_param(ctx, model->output);
  284. for (uint32_t i = 0; i < n_layer; ++i) {
  285. auto & layer = model->layers[i];
  286. ggml_set_param(ctx, layer.attention_norm);
  287. ggml_set_param(ctx, layer.wq);
  288. ggml_set_param(ctx, layer.wk);
  289. ggml_set_param(ctx, layer.wv);
  290. ggml_set_param(ctx, layer.wo);
  291. ggml_set_param(ctx, layer.ffn_norm);
  292. ggml_set_param(ctx, layer.w1);
  293. ggml_set_param(ctx, layer.w2);
  294. ggml_set_param(ctx, layer.w3);
  295. }
  296. }
  297. void set_param_model_lora(struct llama_model_lora * model) {
  298. const auto& hparams = model->hparams;
  299. const uint32_t n_layer = hparams.n_layer;
  300. struct ggml_context* ctx = model->ctx;
  301. ggml_set_param(ctx, model->tok_embeddings);
  302. ggml_set_param(ctx, model->norm);
  303. ggml_set_param(ctx, model->outputa);
  304. ggml_set_param(ctx, model->outputb);
  305. for (uint32_t i = 0; i < n_layer; ++i) {
  306. auto & layer = model->layers[i];
  307. ggml_set_param(ctx, layer.attention_norm);
  308. ggml_set_param(ctx, layer.wqa);
  309. ggml_set_param(ctx, layer.wqb);
  310. ggml_set_param(ctx, layer.wka);
  311. ggml_set_param(ctx, layer.wkb);
  312. ggml_set_param(ctx, layer.wva);
  313. ggml_set_param(ctx, layer.wvb);
  314. ggml_set_param(ctx, layer.woa);
  315. ggml_set_param(ctx, layer.wob);
  316. ggml_set_param(ctx, layer.ffn_norm);
  317. ggml_set_param(ctx, layer.w1);
  318. ggml_set_param(ctx, layer.w2);
  319. ggml_set_param(ctx, layer.w3);
  320. }
  321. }
  322. void randomize_model(struct llama_model * model, int seed, float mean, float std, float min, float max) {
  323. const auto & hparams = model->hparams;
  324. const uint32_t n_layer = hparams.n_layer;
  325. struct random_normal_distribution rnd;
  326. init_random_normal_distribution(&rnd, seed, mean, std, min, max);
  327. randomize_tensor_normal(model->tok_embeddings, model->tok_embeddings->n_dims, model->tok_embeddings->ne, &rnd);
  328. randomize_tensor_normal(model->norm, model->norm->n_dims, model->norm->ne, &rnd);
  329. randomize_tensor_normal(model->output, model->output->n_dims, model->output->ne, &rnd);
  330. for (uint32_t i = 0; i < n_layer; ++i) {
  331. auto & layer = model->layers[i];
  332. randomize_tensor_normal(layer.attention_norm, layer.attention_norm->n_dims, layer.attention_norm->ne, &rnd);
  333. randomize_tensor_normal(layer.wq, layer.wq->n_dims, layer.wq->ne, &rnd);
  334. randomize_tensor_normal(layer.wk, layer.wk->n_dims, layer.wk->ne, &rnd);
  335. randomize_tensor_normal(layer.wv, layer.wv->n_dims, layer.wv->ne, &rnd);
  336. randomize_tensor_normal(layer.wo, layer.wo->n_dims, layer.wo->ne, &rnd);
  337. randomize_tensor_normal(layer.ffn_norm, layer.ffn_norm->n_dims, layer.ffn_norm->ne, &rnd);
  338. randomize_tensor_normal(layer.w1, layer.w1->n_dims, layer.w1->ne, &rnd);
  339. randomize_tensor_normal(layer.w2, layer.w2->n_dims, layer.w2->ne, &rnd);
  340. randomize_tensor_normal(layer.w3, layer.w3->n_dims, layer.w3->ne, &rnd);
  341. }
  342. }
  343. void randomize_model_lora(struct llama_model_lora * model, int seed, float mean, float std, float min, float max) {
  344. const auto & hparams = model->hparams;
  345. const uint32_t n_layer = hparams.n_layer;
  346. struct random_normal_distribution rnd;
  347. init_random_normal_distribution(&rnd, seed, mean, std, min, max);
  348. randomize_tensor_normal(model->tok_embeddings, model->tok_embeddings->n_dims, model->tok_embeddings->ne, &rnd);
  349. randomize_tensor_normal(model->norm, model->norm->n_dims, model->norm->ne, &rnd);
  350. randomize_tensor_normal(model->outputa, model->outputa->n_dims, model->outputa->ne, &rnd);
  351. randomize_tensor_normal(model->outputb, model->outputb->n_dims, model->outputb->ne, &rnd);
  352. for (uint32_t i = 0; i < n_layer; ++i) {
  353. auto & layer = model->layers[i];
  354. randomize_tensor_normal(layer.attention_norm, layer.attention_norm->n_dims, layer.attention_norm->ne, &rnd);
  355. randomize_tensor_normal(layer.wqa, layer.wqa->n_dims, layer.wqa->ne, &rnd);
  356. randomize_tensor_normal(layer.wqb, layer.wqb->n_dims, layer.wqb->ne, &rnd);
  357. randomize_tensor_normal(layer.wka, layer.wka->n_dims, layer.wka->ne, &rnd);
  358. randomize_tensor_normal(layer.wkb, layer.wkb->n_dims, layer.wkb->ne, &rnd);
  359. randomize_tensor_normal(layer.wva, layer.wva->n_dims, layer.wva->ne, &rnd);
  360. randomize_tensor_normal(layer.wvb, layer.wvb->n_dims, layer.wvb->ne, &rnd);
  361. randomize_tensor_normal(layer.woa, layer.woa->n_dims, layer.woa->ne, &rnd);
  362. randomize_tensor_normal(layer.wob, layer.wob->n_dims, layer.wob->ne, &rnd);
  363. randomize_tensor_normal(layer.ffn_norm, layer.ffn_norm->n_dims, layer.ffn_norm->ne, &rnd);
  364. randomize_tensor_normal(layer.w1, layer.w1->n_dims, layer.w1->ne, &rnd);
  365. randomize_tensor_normal(layer.w2, layer.w2->n_dims, layer.w2->ne, &rnd);
  366. randomize_tensor_normal(layer.w3, layer.w3->n_dims, layer.w3->ne, &rnd);
  367. }
  368. }
  369. bool init_kv_cache(struct llama_kv_cache* cache, struct llama_model * model, int n_batch) {
  370. const auto & hparams = model->hparams;
  371. const uint32_t n_ctx = hparams.n_ctx;
  372. const uint32_t n_embd = hparams.n_embd;
  373. const uint32_t n_layer = hparams.n_layer;
  374. const int64_t n_mem = n_layer*n_ctx*n_batch;
  375. const int64_t n_elements = n_embd*n_mem;
  376. // cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
  377. // struct ggml_init_params params;
  378. // params.mem_size = cache.buf.size;
  379. // params.mem_buffer = cache.buf.addr;
  380. // params.no_alloc = false;
  381. if (!cache->ctx) {
  382. struct ggml_init_params params;
  383. params.mem_size = 2u*n_elements*ggml_type_size(GGML_TYPE_F32) + 2u*1024*1024;
  384. params.mem_buffer = NULL;
  385. params.no_alloc = false;
  386. cache->ctx = ggml_init(params);
  387. if (!cache->ctx) {
  388. fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
  389. return false;
  390. }
  391. }
  392. cache->k = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
  393. cache->v = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
  394. return true;
  395. }
  396. bool init_kv_cache_lora(struct llama_kv_cache* cache, struct llama_model_lora * model, int n_batch) {
  397. const auto & hparams = model->hparams;
  398. const uint32_t n_ctx = hparams.n_ctx;
  399. const uint32_t n_embd = hparams.n_embd;
  400. const uint32_t n_layer = hparams.n_layer;
  401. const int64_t n_mem = n_layer*n_ctx*n_batch;
  402. const int64_t n_elements = n_embd*n_mem;
  403. // cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
  404. // struct ggml_init_params params;
  405. // params.mem_size = cache.buf.size;
  406. // params.mem_buffer = cache.buf.addr;
  407. // params.no_alloc = false;
  408. if (!cache->ctx) {
  409. struct ggml_init_params params;
  410. params.mem_size = 2u*n_elements*ggml_type_size(GGML_TYPE_F32) + 2u*1024*1024;
  411. params.mem_buffer = NULL;
  412. params.no_alloc = false;
  413. cache->ctx = ggml_init(params);
  414. if (!cache->ctx) {
  415. fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
  416. return false;
  417. }
  418. }
  419. cache->k = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
  420. cache->v = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
  421. return true;
  422. }
  423. struct ggml_tensor * forward(
  424. struct llama_model * model,
  425. struct llama_kv_cache * cache,
  426. struct ggml_context * ctx0,
  427. struct ggml_cgraph * gf,
  428. struct ggml_tensor * tokens_input,
  429. const int n_tokens,
  430. const int n_past) {
  431. const int N = n_tokens;
  432. struct llama_kv_cache& kv_self = *cache;
  433. const auto & hparams = model->hparams;
  434. const int n_ctx = hparams.n_ctx;
  435. const int n_embd = hparams.n_embd;
  436. const int n_layer = hparams.n_layer;
  437. const int n_head = hparams.n_head;
  438. const int n_rot = hparams.n_rot;
  439. struct ggml_tensor * tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
  440. memcpy(tokens->data, tokens_input->data, N*ggml_element_size(tokens));
  441. struct ggml_tensor * kc = kv_self.k;
  442. struct ggml_tensor * vc = kv_self.v;
  443. // inpL shape [n_embd,N,1,1]
  444. struct ggml_tensor * inpL = ggml_get_rows(ctx0, model->tok_embeddings, tokens);
  445. for (int il = 0; il < n_layer; ++il) {
  446. struct ggml_tensor * inpSA = inpL;
  447. struct ggml_tensor * cur;
  448. // lctx.use_buf(ctx0, 0);
  449. // norm
  450. {
  451. // cur shape [n_embd,N,1,1]
  452. cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  453. // cur = attention_norm*cur
  454. cur = ggml_mul(ctx0,
  455. ggml_repeat(ctx0, model->layers[il].attention_norm, cur),
  456. cur);
  457. }
  458. // self-attention
  459. {
  460. // compute Q and K and RoPE them
  461. // wq shape [n_embd, n_embd, 1, 1]
  462. // wk shape [n_embd, n_embd, 1, 1]
  463. // Qcur shape [n_embd/n_head, n_head, N, 1]
  464. // Kcur shape [n_embd/n_head, n_head, N, 1]
  465. struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
  466. struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
  467. // store key and value to memory
  468. {
  469. // compute the transposed [N, n_embd] V matrix
  470. // wv shape [n_embd, n_embd, 1, 1]
  471. // Vcur shape [n_embd, N, 1, 1]
  472. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wv, cur), n_embd, N)));
  473. // kv_self.k shape [n_embd * n_ctx * n_layer, 1]
  474. // kv_self.v shape [n_embd * n_ctx * n_layer, 1]
  475. // k shape [n_embd * N, 1] == kv_self.k[:,n_past:n_past+N,il,0]
  476. // v shape [N, n_embd, 1, 1] == kv_self.v[:,n_past:n_past+N,il,0]
  477. /* {
  478. struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
  479. struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
  480. ( n_ctx)*ggml_element_size(kv_self.v),
  481. (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
  482. // important: storing RoPE-ed version of K in the KV cache!
  483. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
  484. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
  485. } //*/
  486. kc = ggml_set_1d(ctx0, kc, ggml_reshape_1d(ctx0, Kcur, n_embd*N), (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
  487. vc = ggml_set_2d(ctx0, vc, Vcur, ( n_ctx)*ggml_element_size(kv_self.v),
  488. (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
  489. }
  490. // Qcur shape [n_embd/n_head, n_head, N, 1]
  491. // Q shape [n_embd/n_head, N, n_head, 1]
  492. struct ggml_tensor * Q =
  493. ggml_permute(ctx0,
  494. Qcur,
  495. 0, 2, 1, 3);
  496. // kv_self.k shape [n_embd * n_ctx * n_layer, 1]
  497. // K shape [n_embd/n_head, n_past + N, n_head, 1]
  498. struct ggml_tensor * K =
  499. ggml_permute(ctx0,
  500. ggml_reshape_3d(ctx0,
  501. ggml_view_1d(ctx0, kc, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kc)*n_embd),
  502. n_embd/n_head, n_head, n_past + N),
  503. 0, 2, 1, 3);
  504. // K * Q
  505. // KQ shape [n_past + N, N, n_head, 1]
  506. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  507. // KQ_scaled = KQ / sqrt(n_embd/n_head)
  508. // KQ_scaled shape [n_past + N, N, n_head, 1]
  509. struct ggml_tensor * KQ_scaled =
  510. ggml_scale(ctx0,
  511. KQ,
  512. ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
  513. // KQ_masked = mask_past(KQ_scaled)
  514. // KQ_masked shape [n_past + N, N, n_head, 1]
  515. struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
  516. // KQ = soft_max(KQ_masked)
  517. // KQ_soft_max shape [n_past + N, N, n_head, 1]
  518. struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
  519. // split cached V into n_head heads
  520. //// V shape [n_past + N, n_embd/n_head, n_head, 1]
  521. // V shape [n_past + N, n_embd/n_head, n_head, 1] == kv_self.v[:,:(n_past+N),il,1]
  522. struct ggml_tensor * V =
  523. ggml_view_3d(ctx0, vc,
  524. n_past + N, n_embd/n_head, n_head,
  525. n_ctx*ggml_element_size(vc),
  526. n_ctx*ggml_element_size(vc)*n_embd/n_head,
  527. il*n_ctx*ggml_element_size(vc)*n_embd);
  528. // KQV shape [n_embd/n_head, N, n_head, 1]
  529. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
  530. // KQV_merged = KQV.permute(0, 2, 1, 3)
  531. // KQV_merged shape [n_embd/n_head, n_head, N, 1]
  532. struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  533. // KQV_merged shape
  534. // cur = KQV_merged.contiguous().view(n_embd, N)
  535. // cur shape [n_embd,N,1,1]
  536. cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, KQV_merged), n_embd, N);
  537. // cur = ggml_cpy(ctx0,
  538. // KQV_merged,
  539. // ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
  540. // projection (no bias)
  541. // cur shape [n_embd,N,1,1]
  542. cur = ggml_mul_mat(ctx0,
  543. model->layers[il].wo,
  544. cur);
  545. }
  546. // lctx.use_buf(ctx0, 1);
  547. // inpFF shape [n_embd,N,1,1]
  548. struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
  549. // feed-forward network
  550. {
  551. // norm
  552. {
  553. // cur shape [n_embd,N,1,1]
  554. cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps);
  555. // cur = ffn_norm*cur
  556. // cur shape [n_embd,N,1,1]
  557. cur = ggml_mul(ctx0,
  558. ggml_repeat(ctx0, model->layers[il].ffn_norm, cur),
  559. cur);
  560. }
  561. // tmp shape [n_ff,N,1,1]
  562. struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
  563. model->layers[il].w3,
  564. cur);
  565. // cur shape [n_ff,N,1,1]
  566. cur = ggml_mul_mat(ctx0,
  567. model->layers[il].w1,
  568. cur);
  569. // SILU activation
  570. // cur shape [n_ff,N,1,1]
  571. cur = ggml_silu(ctx0, cur);
  572. // cur shape [n_ff,N,1,1]
  573. cur = ggml_mul(ctx0, cur, tmp);
  574. // cur shape [n_embd,N,1,1]
  575. cur = ggml_mul_mat(ctx0,
  576. model->layers[il].w2,
  577. cur);
  578. }
  579. // cur shape [n_embd,N,1,1]
  580. cur = ggml_add(ctx0, cur, inpFF);
  581. // input for next layer
  582. // inpL shape [n_embd,N,1,1]
  583. inpL = cur;
  584. }
  585. // norm
  586. {
  587. // inpL shape [n_embd,N,1,1]
  588. inpL = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  589. // inpL = norm*inpL
  590. // inpL shape [n_embd,N,1,1]
  591. inpL = ggml_mul(ctx0,
  592. ggml_repeat(ctx0, model->norm, inpL),
  593. inpL);
  594. //embeddings = inpL;
  595. }
  596. // lm_head
  597. // inpL shape [n_vocab,N,1,1]
  598. inpL = ggml_mul_mat(ctx0, model->output, inpL);
  599. // run the computation
  600. ggml_build_forward_expand(gf, inpL);
  601. return inpL;
  602. }
  603. void assert_shape_1d(struct ggml_tensor * tensor, int64_t ne0) {
  604. GGML_ASSERT(tensor->n_dims == 1);
  605. GGML_ASSERT(tensor->ne[0] == ne0);
  606. }
  607. void assert_shape_2d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1) {
  608. GGML_ASSERT(tensor->n_dims == 2);
  609. GGML_ASSERT(tensor->ne[0] == ne0);
  610. GGML_ASSERT(tensor->ne[1] == ne1);
  611. }
  612. void assert_shape_3d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1, int64_t ne2) {
  613. GGML_ASSERT(tensor->n_dims == 3);
  614. GGML_ASSERT(tensor->ne[0] == ne0);
  615. GGML_ASSERT(tensor->ne[1] == ne1);
  616. GGML_ASSERT(tensor->ne[2] == ne2);
  617. }
  618. void assert_shape_4d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
  619. GGML_ASSERT(tensor->n_dims == 4);
  620. GGML_ASSERT(tensor->ne[0] == ne0);
  621. GGML_ASSERT(tensor->ne[1] == ne1);
  622. GGML_ASSERT(tensor->ne[2] == ne2);
  623. GGML_ASSERT(tensor->ne[3] == ne3);
  624. }
  625. struct ggml_tensor * forward_batch(
  626. struct llama_model * model,
  627. struct llama_kv_cache * cache,
  628. struct ggml_context * ctx0,
  629. struct ggml_cgraph * gf,
  630. struct ggml_tensor * tokens_input,
  631. const int n_tokens,
  632. const int n_past,
  633. const int n_batch) {
  634. const int N = n_tokens;
  635. struct llama_kv_cache& kv_self = *cache;
  636. const auto & hparams = model->hparams;
  637. const int n_ctx = hparams.n_ctx;
  638. const int n_vocab = hparams.n_vocab;
  639. const int n_embd = hparams.n_embd;
  640. const int n_layer = hparams.n_layer;
  641. const int n_head = hparams.n_head;
  642. const int n_rot = hparams.n_rot;
  643. const int n_ff = get_n_ff(&hparams);
  644. struct ggml_tensor * tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N*n_batch);
  645. memcpy(tokens->data, tokens_input->data, ggml_element_size(tokens)*N*n_batch);
  646. struct ggml_tensor * kc = kv_self.k;
  647. struct ggml_tensor * vc = kv_self.v;
  648. // inpL shape [n_embd,N*n_batch,1]
  649. struct ggml_tensor * inpL = ggml_get_rows(ctx0, model->tok_embeddings, tokens);
  650. assert_shape_2d(inpL, n_embd, N*n_batch);
  651. for (int il = 0; il < n_layer; ++il) {
  652. struct ggml_tensor * inpSA = inpL;
  653. struct ggml_tensor * cur;
  654. // lctx.use_buf(ctx0, 0);
  655. // norm
  656. {
  657. // cur shape [n_embd,N*n_batch,1,1]
  658. cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  659. assert_shape_2d(cur, n_embd, N*n_batch);
  660. // cur = attention_norm*cur
  661. cur = ggml_mul(ctx0,
  662. ggml_repeat(ctx0, model->layers[il].attention_norm, cur),
  663. cur);
  664. assert_shape_2d(cur, n_embd, N*n_batch);
  665. }
  666. // self-attention
  667. {
  668. // compute Q and K and RoPE them
  669. // wq shape [n_embd, n_embd, 1, 1]
  670. // wk shape [n_embd, n_embd, 1, 1]
  671. // Qcur shape [n_embd/n_head, n_head, N, n_batch]
  672. // Kcur shape [n_embd/n_head, n_head, N, n_batch]
  673. struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N, n_batch), n_past, n_rot, 0, 0);
  674. struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N, n_batch), n_past, n_rot, 0, 0);
  675. assert_shape_4d(Qcur, n_embd/n_head, n_head, N, n_batch);
  676. assert_shape_4d(Kcur, n_embd/n_head, n_head, N, n_batch);
  677. // store key and value to memory
  678. {
  679. // compute the transposed [N, n_embd] V matrix
  680. // wv shape [n_embd, n_embd, 1, 1]
  681. // Vcur shape [N, n_embd, n_batch, 1]
  682. struct ggml_tensor * Vcur = ggml_cont(ctx0,
  683. ggml_permute(ctx0,
  684. ggml_reshape_3d(ctx0,
  685. ggml_mul_mat(ctx0,
  686. model->layers[il].wv,
  687. cur),
  688. n_embd, N, n_batch),
  689. 1, 0, 2, 3));
  690. assert_shape_3d(Vcur, N, n_embd, n_batch);
  691. // kv_self.k shape [n_embd * n_ctx * n_batch * n_layer]
  692. // kv_self.v shape [n_ctx * n_embd * n_batch * n_layer]
  693. // k shape [n_embd * N, n_batch] == kv_self.k[:,n_past:n_past+N,:,il]
  694. // v shape [N, n_embd, n_batch, 1] == kv_self.v[:,n_past:n_past+N,:,il]
  695. /* {
  696. struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
  697. struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
  698. ( n_ctx)*ggml_element_size(kv_self.v),
  699. (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
  700. // important: storing RoPE-ed version of K in the KV cache!
  701. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
  702. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
  703. } //*/
  704. kc = ggml_set_2d(ctx0, kc,
  705. ggml_reshape_2d(ctx0, Kcur, n_embd*N, n_batch),
  706. ggml_element_size(kc)*n_embd*n_ctx,
  707. (ggml_element_size(kc)*n_embd)*(il*n_batch*n_ctx + n_past));
  708. vc = ggml_set_2d(ctx0, vc,
  709. ggml_reshape_2d(ctx0, Vcur, N*n_embd, n_batch),
  710. ggml_element_size(vc)*n_ctx*n_embd,
  711. ggml_element_size(vc)*(n_past + il*n_embd*n_batch*n_ctx));
  712. assert_shape_1d(kc, n_embd * n_ctx * n_batch * n_layer);
  713. assert_shape_1d(vc, n_embd * n_ctx * n_batch * n_layer);
  714. }
  715. // Qcur shape [n_embd/n_head, n_head, N, n_batch]
  716. // Q shape [n_embd/n_head, N, n_head, n_batch]
  717. struct ggml_tensor * Q =
  718. ggml_permute(ctx0,
  719. Qcur,
  720. 0, 2, 1, 3);
  721. assert_shape_4d(Q, n_embd/n_head, N, n_head, n_batch);
  722. // kv_self.k shape [n_embd * n_ctx * n_batch * n_layer]
  723. // K shape [n_embd/n_head, n_past + N, n_head, n_batch]
  724. struct ggml_tensor * K =
  725. ggml_permute(ctx0,
  726. ggml_reshape_4d(ctx0,
  727. ggml_view_3d(ctx0,
  728. kc,
  729. n_embd,
  730. (n_past + N),
  731. n_batch,
  732. n_embd*ggml_element_size(kc),
  733. n_ctx*n_embd*ggml_element_size(kc),
  734. il*n_batch*n_ctx*n_embd*ggml_element_size(kc)),
  735. n_embd/n_head, n_head, n_past + N, n_batch),
  736. 0, 2, 1, 3);
  737. assert_shape_4d(K, n_embd/n_head, n_past + N, n_head, n_batch);
  738. // K * Q
  739. // KQ shape [n_past + N, N, n_head, n_batch]
  740. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  741. assert_shape_4d(KQ, n_past + N, N, n_head, n_batch);
  742. // KQ_scaled = KQ / sqrt(n_embd/n_head)
  743. // KQ_scaled shape [n_past + N, N, n_head, n_batch]
  744. struct ggml_tensor * KQ_scaled =
  745. ggml_scale(ctx0,
  746. KQ,
  747. ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
  748. assert_shape_4d(KQ_scaled, n_past + N, N, n_head, n_batch);
  749. // KQ_masked = mask_past(KQ_scaled)
  750. // KQ_masked shape [n_past + N, N, n_head, n_batch]
  751. struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
  752. assert_shape_4d(KQ_masked, n_past + N, N, n_head, n_batch);
  753. // KQ = soft_max(KQ_masked)
  754. // KQ_soft_max shape [n_past + N, N, n_head, n_batch]
  755. struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
  756. assert_shape_4d(KQ_soft_max, n_past + N, N, n_head, n_batch);
  757. // split cached V into n_head heads
  758. // kv_self.v shape [n_ctx * n_embd * n_batch * n_layer]
  759. // V shape [n_past + N, n_embd/n_head, n_head, n_batch] == kv_self.v[:(n_past+N),:,:,il]
  760. struct ggml_tensor * V =
  761. ggml_view_4d(ctx0, vc,
  762. n_past + N, n_embd/n_head, n_head, n_batch,
  763. ggml_element_size(vc)*n_ctx,
  764. ggml_element_size(vc)*n_ctx*n_embd/n_head,
  765. ggml_element_size(vc)*n_ctx*n_embd,
  766. il*n_batch*n_ctx*n_embd*ggml_element_size(vc));
  767. assert_shape_4d(V, n_past + N, n_embd/n_head, n_head, n_batch);
  768. // KQV shape [n_embd/n_head, N, n_head, n_batch]
  769. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
  770. assert_shape_4d(KQV, n_embd/n_head, N, n_head, n_batch);
  771. // KQV_merged = KQV.permute(0, 2, 1, 3)
  772. // KQV_merged shape [n_embd/n_head, n_head, N, n_batch]
  773. struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  774. assert_shape_4d(KQV_merged, n_embd/n_head, n_head, N, n_batch);
  775. // KQV_merged shape
  776. // cur = KQV_merged.contiguous().view(n_embd, N)
  777. // cur shape [n_embd,N*n_batch,1,1]
  778. cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, KQV_merged), n_embd, N*n_batch);
  779. assert_shape_2d(cur, n_embd, N*n_batch);
  780. // cur = ggml_cpy(ctx0,
  781. // KQV_merged,
  782. // ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
  783. // projection (no bias)
  784. // cur shape [n_embd,N*n_batch,1,1]
  785. cur = ggml_mul_mat(ctx0,
  786. model->layers[il].wo,
  787. cur);
  788. assert_shape_2d(cur, n_embd, N*n_batch);
  789. }
  790. // lctx.use_buf(ctx0, 1);
  791. // inpFF shape [n_embd,N*n_batch,1,1]
  792. struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
  793. assert_shape_2d(inpFF, n_embd, N*n_batch);
  794. // feed-forward network
  795. {
  796. // norm
  797. {
  798. // cur shape [n_embd,N*n_batch,1,1]
  799. cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps);
  800. assert_shape_2d(cur, n_embd, N*n_batch);
  801. // cur = ffn_norm*cur
  802. // cur shape [n_embd,N*n_batch,1,1]
  803. cur = ggml_mul(ctx0,
  804. ggml_repeat(ctx0, model->layers[il].ffn_norm, cur),
  805. cur);
  806. assert_shape_2d(cur, n_embd, N*n_batch);
  807. }
  808. // tmp shape [n_ff,N*n_batch,1,1]
  809. struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
  810. model->layers[il].w3,
  811. cur);
  812. assert_shape_2d(tmp, n_ff, N*n_batch);
  813. // cur shape [n_ff,N*n_batch,1,1]
  814. cur = ggml_mul_mat(ctx0,
  815. model->layers[il].w1,
  816. cur);
  817. assert_shape_2d(cur, n_ff, N*n_batch);
  818. // SILU activation
  819. // cur shape [n_ff,N*n_batch,1,1]
  820. cur = ggml_silu(ctx0, cur);
  821. assert_shape_2d(cur, n_ff, N*n_batch);
  822. // cur shape [n_ff,N*n_batch,1,1]
  823. cur = ggml_mul(ctx0, cur, tmp);
  824. assert_shape_2d(cur, n_ff, N*n_batch);
  825. // cur shape [n_embd,N*n_batch,1,1]
  826. cur = ggml_mul_mat(ctx0,
  827. model->layers[il].w2,
  828. cur);
  829. assert_shape_2d(cur, n_embd, N*n_batch);
  830. }
  831. // cur shape [n_embd,N*n_batch,1,1]
  832. cur = ggml_add(ctx0, cur, inpFF);
  833. assert_shape_2d(cur, n_embd, N*n_batch);
  834. // input for next layer
  835. // inpL shape [n_embd,N*n_batch,1,1]
  836. inpL = cur;
  837. assert_shape_2d(inpL, n_embd, N*n_batch);
  838. }
  839. // norm
  840. {
  841. // inpL shape [n_embd,N*n_batch,1,1]
  842. inpL = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  843. assert_shape_2d(inpL, n_embd, N*n_batch);
  844. // inpL = norm*inpL
  845. // inpL shape [n_embd,N*n_batch,1,1]
  846. inpL = ggml_mul(ctx0,
  847. ggml_repeat(ctx0, model->norm, inpL),
  848. inpL);
  849. assert_shape_2d(inpL, n_embd, N*n_batch);
  850. //embeddings = inpL;
  851. }
  852. // lm_head
  853. // inpL shape [n_vocab,N*n_batch,1,1]
  854. inpL = ggml_mul_mat(ctx0, model->output, inpL);
  855. assert_shape_2d(inpL, n_vocab, N*n_batch);
  856. {
  857. // inpL shape [n_vocab,N,n_batch,1]
  858. inpL = ggml_reshape_3d(ctx0,
  859. inpL,
  860. n_vocab, N, n_batch);
  861. assert_shape_3d(inpL, n_vocab, N, n_batch);
  862. }
  863. // run the computation
  864. ggml_build_forward_expand(gf, inpL);
  865. return inpL;
  866. }
  867. struct ggml_tensor * forward_lora(
  868. struct llama_model_lora * model,
  869. struct llama_kv_cache * cache,
  870. struct ggml_context * ctx0,
  871. struct ggml_cgraph * gf,
  872. struct ggml_tensor * tokens_input,
  873. const int n_tokens,
  874. const int n_past) {
  875. const int N = n_tokens;
  876. struct llama_kv_cache& kv_self = *cache;
  877. const auto & hparams = model->hparams;
  878. const int n_ctx = hparams.n_ctx;
  879. const int n_embd = hparams.n_embd;
  880. const int n_layer = hparams.n_layer;
  881. const int n_head = hparams.n_head;
  882. const int n_rot = hparams.n_rot;
  883. struct ggml_tensor * tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
  884. memcpy(tokens->data, tokens_input->data, N*ggml_element_size(tokens));
  885. struct ggml_tensor * kc = kv_self.k;
  886. struct ggml_tensor * vc = kv_self.v;
  887. // inpL shape [n_embd,N,1,1]
  888. struct ggml_tensor * inpL = ggml_get_rows(ctx0, model->tok_embeddings, tokens);
  889. for (int il = 0; il < n_layer; ++il) {
  890. struct ggml_tensor * inpSA = inpL;
  891. struct ggml_tensor * cur;
  892. // norm
  893. {
  894. // cur shape [n_embd,N,1,1]
  895. cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  896. // cur = attention_norm*cur
  897. cur = ggml_mul(ctx0,
  898. ggml_repeat(ctx0, model->layers[il].attention_norm, cur),
  899. cur);
  900. }
  901. // self-attention
  902. {
  903. // compute Q and K and RoPE them
  904. // wq shape [n_embd, n_embd, 1, 1]
  905. // wk shape [n_embd, n_embd, 1, 1]
  906. // Qcur shape [n_embd/n_head, n_head, N, 1]
  907. // Kcur shape [n_embd/n_head, n_head, N, 1]
  908. struct ggml_tensor * Qcur = ggml_rope(ctx0,
  909. ggml_reshape_3d(ctx0,
  910. ggml_mul_mat(ctx0,
  911. model->layers[il].wqa,
  912. ggml_mul_mat(ctx0,
  913. model->layers[il].wqb,
  914. cur)),
  915. n_embd/n_head, n_head, N),
  916. n_past, n_rot, 0, 0);
  917. struct ggml_tensor * Kcur = ggml_rope(ctx0,
  918. ggml_reshape_3d(ctx0,
  919. ggml_mul_mat(ctx0,
  920. model->layers[il].wka,
  921. ggml_mul_mat(ctx0,
  922. model->layers[il].wkb,
  923. cur)),
  924. n_embd/n_head, n_head, N),
  925. n_past, n_rot, 0, 0);
  926. // store key and value to memory
  927. {
  928. // compute the transposed [N, n_embd] V matrix
  929. // wv shape [n_embd, n_embd, 1, 1]
  930. // Vcur shape [n_embd, N, 1, 1]
  931. struct ggml_tensor * Vcur = ggml_cont(ctx0,
  932. ggml_transpose(ctx0,
  933. ggml_reshape_2d(ctx0,
  934. ggml_mul_mat(ctx0,
  935. model->layers[il].wva,
  936. ggml_mul_mat(ctx0,
  937. model->layers[il].wvb,
  938. cur)),
  939. n_embd, N)));
  940. // kv_self.k shape [n_embd * n_ctx * n_layer, 1]
  941. // kv_self.v shape [n_embd * n_ctx * n_layer, 1]
  942. // k shape [n_embd * N, 1] == kv_self.k[:,n_past:n_past+N,il,0]
  943. // v shape [N, n_embd, 1, 1] == kv_self.v[:,n_past:n_past+N,il,0]
  944. /* {
  945. struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
  946. struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
  947. ( n_ctx)*ggml_element_size(kv_self.v),
  948. (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
  949. // important: storing RoPE-ed version of K in the KV cache!
  950. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
  951. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
  952. } //*/
  953. kc = ggml_set_1d(ctx0, kc, ggml_reshape_1d(ctx0, Kcur, n_embd*N), (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
  954. vc = ggml_set_2d(ctx0, vc, Vcur, ( n_ctx)*ggml_element_size(kv_self.v),
  955. (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
  956. }
  957. // Qcur shape [n_embd/n_head, n_head, N, 1]
  958. // Q shape [n_embd/n_head, N, n_head, 1]
  959. struct ggml_tensor * Q =
  960. ggml_permute(ctx0,
  961. Qcur,
  962. 0, 2, 1, 3);
  963. // kv_self.k shape [n_embd * n_ctx * n_layer, 1]
  964. // K shape [n_embd/n_head, n_past + N, n_head, 1]
  965. struct ggml_tensor * K =
  966. ggml_permute(ctx0,
  967. ggml_reshape_3d(ctx0,
  968. ggml_view_1d(ctx0, kc, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kc)*n_embd),
  969. n_embd/n_head, n_head, n_past + N),
  970. 0, 2, 1, 3);
  971. // K * Q
  972. // KQ shape [n_past + N, N, n_head, 1]
  973. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  974. // KQ_scaled = KQ / sqrt(n_embd/n_head)
  975. // KQ_scaled shape [n_past + N, N, n_head, 1]
  976. struct ggml_tensor * KQ_scaled =
  977. ggml_scale(ctx0,
  978. KQ,
  979. ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
  980. // KQ_masked = mask_past(KQ_scaled)
  981. // KQ_masked shape [n_past + N, N, n_head, 1]
  982. struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
  983. // KQ = soft_max(KQ_masked)
  984. // KQ_soft_max shape [n_past + N, N, n_head, 1]
  985. struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
  986. // split cached V into n_head heads
  987. //// V shape [n_past + N, n_embd/n_head, n_head, 1]
  988. // V shape [n_past + N, n_embd/n_head, n_head, 1] == kv_self.v[:,:(n_past+N),il,1]
  989. struct ggml_tensor * V =
  990. ggml_view_3d(ctx0, vc,
  991. n_past + N, n_embd/n_head, n_head,
  992. n_ctx*ggml_element_size(vc),
  993. n_ctx*ggml_element_size(vc)*n_embd/n_head,
  994. il*n_ctx*ggml_element_size(vc)*n_embd);
  995. // KQV shape [n_embd/n_head, N, n_head, 1]
  996. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
  997. // KQV_merged = KQV.permute(0, 2, 1, 3)
  998. // KQV_merged shape [n_embd/n_head, n_head, N, 1]
  999. struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  1000. // KQV_merged shape
  1001. // cur = KQV_merged.contiguous().view(n_embd, N)
  1002. // cur shape [n_embd,N,1,1]
  1003. cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, KQV_merged), n_embd, N);
  1004. // cur = ggml_cpy(ctx0,
  1005. // KQV_merged,
  1006. // ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
  1007. // projection (no bias)
  1008. // cur shape [n_embd,N,1,1]
  1009. cur = ggml_mul_mat(ctx0,
  1010. model->layers[il].woa,
  1011. ggml_mul_mat(ctx0,
  1012. model->layers[il].wob,
  1013. cur));
  1014. }
  1015. // inpFF shape [n_embd,N,1,1]
  1016. struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
  1017. // feed-forward network
  1018. {
  1019. // norm
  1020. {
  1021. // cur shape [n_embd,N,1,1]
  1022. cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps);
  1023. // cur = ffn_norm*cur
  1024. // cur shape [n_embd,N,1,1]
  1025. cur = ggml_mul(ctx0,
  1026. ggml_repeat(ctx0, model->layers[il].ffn_norm, cur),
  1027. cur);
  1028. }
  1029. // tmp shape [n_ff,N,1,1]
  1030. struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
  1031. model->layers[il].w3,
  1032. cur);
  1033. // cur shape [n_ff,N,1,1]
  1034. cur = ggml_mul_mat(ctx0,
  1035. model->layers[il].w1,
  1036. cur);
  1037. // SILU activation
  1038. // cur shape [n_ff,N,1,1]
  1039. cur = ggml_silu(ctx0, cur);
  1040. // cur shape [n_ff,N,1,1]
  1041. cur = ggml_mul(ctx0, cur, tmp);
  1042. // cur shape [n_embd,N,1,1]
  1043. cur = ggml_mul_mat(ctx0,
  1044. model->layers[il].w2,
  1045. cur);
  1046. }
  1047. // cur shape [n_embd,N,1,1]
  1048. cur = ggml_add(ctx0, cur, inpFF);
  1049. // input for next layer
  1050. // inpL shape [n_embd,N,1,1]
  1051. inpL = cur;
  1052. }
  1053. // norm
  1054. {
  1055. // inpL shape [n_embd,N,1,1]
  1056. inpL = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  1057. // inpL = norm*inpL
  1058. // inpL shape [n_embd,N,1,1]
  1059. inpL = ggml_mul(ctx0,
  1060. ggml_repeat(ctx0, model->norm, inpL),
  1061. inpL);
  1062. //embeddings = inpL;
  1063. }
  1064. // lm_head
  1065. // inpL shape [n_vocab,N,1,1]
  1066. inpL = ggml_mul_mat(ctx0,
  1067. model->outputa,
  1068. ggml_mul_mat(ctx0,
  1069. model->outputb,
  1070. inpL));
  1071. // ggml_set_scratch(ctx0, { 0, 0, nullptr, });
  1072. // run the computation
  1073. ggml_build_forward_expand(gf, inpL);
  1074. return inpL;
  1075. }
  1076. void sample_softmax(struct ggml_tensor * logits, struct ggml_tensor * probs, struct ggml_tensor * best_samples) {
  1077. assert(logits->n_dims == 2);
  1078. assert(probs->n_dims == 2);
  1079. assert(best_samples->n_dims == 1);
  1080. assert(logits->ne[1] == best_samples->ne[0]);
  1081. assert(logits->ne[0] == probs->ne[0]);
  1082. assert(logits->ne[1] == probs->ne[1]);
  1083. for (int i = 0; i < logits->ne[1]; ++i) {
  1084. float max_logit = ggml_get_f32_1d(logits, i * logits->ne[0]);
  1085. ggml_set_i32_1d(best_samples, i, 0);
  1086. for (int k = 0; k < logits->ne[0]; ++k) {
  1087. float logit = ggml_get_f32_1d(logits, i * logits->ne[0] + k);
  1088. if (logit > max_logit) {
  1089. max_logit = logit;
  1090. ggml_set_i32_1d(best_samples, i, k);
  1091. }
  1092. }
  1093. float psum = 0;
  1094. for (int k = 0; k < logits->ne[0]; ++k) {
  1095. float logit = ggml_get_f32_1d(logits, i * logits->ne[0] + k);
  1096. float p = (logit == -INFINITY) ? 0 : expf(logit - max_logit);
  1097. psum += p;
  1098. ggml_set_f32_1d(probs, i * probs->ne[0] + k, p);
  1099. }
  1100. for (int k = 0; k < logits->ne[0]; ++k) {
  1101. float p = ggml_get_f32_1d(probs, i*probs->ne[0] + k);
  1102. ggml_set_f32_1d(probs, i * probs->ne[0] + k, p / psum);
  1103. }
  1104. }
  1105. }
  1106. void sample_softmax_batch(struct ggml_context * ctx, struct ggml_tensor * logits, struct ggml_tensor * probs, struct ggml_tensor * best_samples) {
  1107. GGML_ASSERT(best_samples->n_dims == 2);
  1108. GGML_ASSERT(logits->n_dims == 3);
  1109. GGML_ASSERT(probs->n_dims == 3);
  1110. int n_tokens = best_samples->ne[0];
  1111. int n_batch = best_samples->ne[1];
  1112. int n_vocab = logits->ne[0];
  1113. GGML_ASSERT(n_tokens == logits->ne[1]);
  1114. GGML_ASSERT(n_batch == logits->ne[2]);
  1115. GGML_ASSERT(n_vocab == probs->ne[0]);
  1116. GGML_ASSERT(n_tokens == probs->ne[1]);
  1117. GGML_ASSERT(n_batch == probs->ne[2]);
  1118. for (int k = 0; k < n_batch; ++k) {
  1119. struct ggml_tensor * best_samples_k = ggml_view_1d(ctx,
  1120. best_samples,
  1121. best_samples->ne[0],
  1122. k*best_samples->nb[1]);
  1123. struct ggml_tensor * logits_k = ggml_view_2d(ctx,
  1124. logits,
  1125. logits->ne[0],
  1126. logits->ne[1],
  1127. logits->nb[1],
  1128. k*logits->nb[2]);
  1129. struct ggml_tensor * probs_k = ggml_view_2d(ctx,
  1130. probs,
  1131. probs->ne[0],
  1132. probs->ne[1],
  1133. probs->nb[1],
  1134. k*probs->nb[2]);
  1135. sample_softmax(logits_k, probs_k, best_samples_k);
  1136. }
  1137. }
  1138. void print_row(struct ggml_tensor * probs, int i) {
  1139. for (int k = 0; k < probs->ne[0]; ++k) {
  1140. float p = ggml_get_f32_1d(probs, i*probs->ne[0] + k);
  1141. printf(" %.2f", p);
  1142. }
  1143. printf("\n");
  1144. }
  1145. void print_matrix(struct ggml_tensor * probs) {
  1146. assert(probs->n_dims == 2);
  1147. for (int i = 0; i < probs->ne[1]; ++i) {
  1148. for (int k = 0; k < probs->ne[0]; ++k) {
  1149. float p = ggml_get_f32_1d(probs, i*probs->ne[0] + k);
  1150. printf(" %.2f", p);
  1151. }
  1152. printf("\n");
  1153. }
  1154. }
  1155. void print_token(int token, int n_vocab) {
  1156. for (int k = 0; k < token; ++k) {
  1157. printf(" ");
  1158. }
  1159. printf("X");
  1160. for (int k = token+1; k < n_vocab; ++k) {
  1161. printf(" ");
  1162. }
  1163. printf("\n");
  1164. }
  1165. void print_tokens(struct ggml_tensor * tokens, int n_vocab) {
  1166. for (int i=0; i<tokens->ne[0]; ++i) {
  1167. int token = ggml_get_i32_1d(tokens, i);
  1168. print_token(token, n_vocab);
  1169. }
  1170. }
  1171. void get_example_targets(int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * targets) {
  1172. int n_tokens = tokens_input->ne[0];
  1173. int n_vocab = targets->ne[0];
  1174. float randomness = 0.0f;
  1175. // ggml_set_zero(targets);
  1176. ggml_set_f32(targets, -1.0f);
  1177. ggml_set_i32_1d(tokens_input, 0, 0);
  1178. for (int i=1; i<n_tokens+1; ++i) {
  1179. float x = example_id + i * 3.14159f * 2.0f * 1.0f * 0.5f / n_tokens;
  1180. float y = sinf(x);//*cosf(x*1.1f+1.0f);
  1181. float z = (y+1.0f)*0.5f; // scale to [0..1]
  1182. z += (frand()-0.5f)*(randomness/n_vocab);
  1183. z = (z < 0.0f) ? 0.0f : (z > 1.0f) ? 1.0f : z; // clamp to [0..1]
  1184. int token = std::max(1,std::min(1+(int)(z*(float)(n_vocab-1)), n_vocab-1));
  1185. ggml_set_f32_1d(targets, (i-1)*n_vocab + token, +1.0f);
  1186. if (i<n_tokens) {
  1187. ggml_set_i32_1d(tokens_input, i, token);
  1188. }
  1189. }
  1190. }
  1191. void get_example_targets_batch(struct ggml_context * ctx, int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * targets) {
  1192. GGML_ASSERT(tokens_input->n_dims == 2);
  1193. GGML_ASSERT( targets->n_dims == 3);
  1194. int n_tokens = tokens_input->ne[0];
  1195. int n_batch = tokens_input->ne[1];
  1196. GGML_ASSERT(n_tokens == targets->ne[1]);
  1197. GGML_ASSERT(n_batch == targets->ne[2]);
  1198. for (int k=0; k<n_batch; ++k) {
  1199. struct ggml_tensor * tokens_input_k = ggml_view_1d(ctx,
  1200. tokens_input,
  1201. tokens_input->ne[0],
  1202. k*tokens_input->nb[1]);
  1203. struct ggml_tensor * targets_k = ggml_view_2d(ctx,
  1204. targets,
  1205. targets->ne[0],
  1206. targets->ne[1],
  1207. targets->nb[1],
  1208. k*targets->nb[2]);
  1209. get_example_targets(example_id*n_batch + k, tokens_input_k, targets_k);
  1210. }
  1211. }
  1212. void lshift_examples(struct ggml_tensor * tokens_input, struct ggml_tensor * targets, int n_shift) {
  1213. int n_tokens = tokens_input->ne[0];
  1214. int n_vocab = targets->ne[0];
  1215. for (int i=0; i<n_tokens-n_shift; ++i) {
  1216. ggml_set_i32_1d(tokens_input, i, ggml_get_i32_1d(tokens_input, i + n_shift));
  1217. for (int k=0; k<n_vocab; ++k) {
  1218. ggml_set_f32_1d(targets, i*n_vocab + k, ggml_get_f32_1d(targets, (i + n_shift)*n_vocab + k));
  1219. }
  1220. }
  1221. }
  1222. struct ggml_tensor * square_error_loss(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
  1223. // todo: instead of a-b: a[1:]-b[:-1]
  1224. return ggml_sum(ctx, ggml_sqr(ctx, ggml_sub(ctx, a, b)));
  1225. }
  1226. struct ggml_tensor * cross_entropy_loss(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
  1227. const float eps = 1e-3f;
  1228. return
  1229. ggml_sum(ctx,
  1230. ggml_neg(ctx,
  1231. ggml_sum_rows(ctx,
  1232. ggml_mul(ctx,
  1233. ggml_soft_max(ctx, a),
  1234. ggml_log(ctx,
  1235. ggml_add1(ctx,
  1236. ggml_soft_max(ctx, b),
  1237. ggml_new_f32(ctx, eps)))))));
  1238. }
  1239. int main(int argc, char ** argv) {
  1240. if (argc < 1) {
  1241. fprintf(stderr, "usage: %s\n", argv[0]);
  1242. return 1;
  1243. }
  1244. struct ggml_init_params lcparams;
  1245. lcparams.mem_size = 1024ll*1024ll*1024ll;
  1246. lcparams.mem_buffer = NULL;
  1247. lcparams.no_alloc = false;
  1248. struct llama_model model;
  1249. model.hparams.n_vocab = 8;
  1250. model.hparams.n_ctx = 8;
  1251. model.hparams.n_embd = 32;
  1252. model.hparams.n_mult = 2;
  1253. model.hparams.n_head = 8;
  1254. model.hparams.n_layer = 1;
  1255. model.hparams.n_rot = std::min(16u, model.hparams.n_embd / model.hparams.n_head);
  1256. // model.hparams.n_embd = 32;
  1257. // model.hparams.n_mult = 2;
  1258. // model.hparams.n_head = 4;
  1259. // model.hparams.n_layer = 8;
  1260. // model.hparams.n_rot = 8;
  1261. model.ctx = ggml_init(lcparams);
  1262. printf("init model\n");
  1263. init_model(&model);
  1264. set_param_model(&model);
  1265. randomize_model(&model, 1337, 0.0f, 1.0f, -1.0f, +1.0f);
  1266. /*
  1267. struct llama_model_lora model_lora;
  1268. // model.hparams.n_vocab = 6;
  1269. // model.hparams.n_ctx = 64;
  1270. // model.hparams.n_embd = 128;
  1271. // model.hparams.n_mult = 2;
  1272. // model.hparams.n_head = 8;
  1273. // model.hparams.n_layer = 6;
  1274. // model.hparams.n_rot = model.hparams.n_embd / model.hparams.n_head;
  1275. model_lora.hparams.n_vocab = 16;
  1276. model_lora.hparams.n_ctx = 32;
  1277. model_lora.hparams.n_embd = 256;
  1278. model_lora.hparams.n_mult = 2;
  1279. model_lora.hparams.n_head = 16;
  1280. model_lora.hparams.n_layer = 1;
  1281. model_lora.hparams.n_lora = 64;
  1282. model_lora.hparams.n_rot = MIN(16, model_lora.hparams.n_embd / model_lora.hparams.n_head);
  1283. // model.hparams.n_rot = (model.hparams.n_embd / model.hparams.n_head) / 2;
  1284. // model.hparams.n_embd = 32;
  1285. // model.hparams.n_mult = 2;
  1286. // model.hparams.n_head = 4;
  1287. // model.hparams.n_layer = 8;
  1288. // model.hparams.n_rot = 8;
  1289. model_lora.ctx = ggml_init(lcparams);
  1290. printf("init model_lora\n");
  1291. init_model_lora(&model_lora);
  1292. set_param_model_lora(&model_lora);
  1293. randomize_model_lora(&model_lora, 1337, 0.0f, 1.0f, -1.0f, +1.0f);
  1294. */
  1295. int n_batch = 8;
  1296. // key + value cache for the self attention
  1297. struct llama_kv_cache kv_self;
  1298. printf("init_kv_cache\n");
  1299. kv_self.ctx = model.ctx;
  1300. init_kv_cache(&kv_self, &model, n_batch);
  1301. //init_kv_cache_lora(&kv_self, &model_lora);
  1302. size_t compute_size = 1024ll*1024ll*1024ll;
  1303. uint8_t * compute_addr = new uint8_t[compute_size];
  1304. int n_examples = 256;
  1305. int n_tokens = model.hparams.n_ctx;
  1306. int n_vocab = model.hparams.n_vocab;
  1307. std::vector<uint8_t> work_buffer;
  1308. for (int ex=0; ex<n_examples; ++ex) {
  1309. struct ggml_init_params params = {
  1310. /*.mem_size =*/ compute_size,
  1311. /*.mem_buffer =*/ compute_addr,
  1312. /*.no_alloc =*/ false,
  1313. };
  1314. struct ggml_context * ctx0 = ggml_init(params);
  1315. struct ggml_tensor * after_opt_best_samples = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_batch);
  1316. struct ggml_tensor * after_opt_probs = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_vocab, n_tokens, n_batch);
  1317. struct ggml_tensor * tokens_input = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_batch);
  1318. struct ggml_tensor * targets = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_vocab, n_tokens, n_batch);
  1319. int n_past = 0;
  1320. ggml_cgraph gf = {};
  1321. get_example_targets_batch(ctx0, 64*ex+0, tokens_input, targets);
  1322. struct ggml_tensor * logits = forward_batch(&model, &kv_self, ctx0, &gf, tokens_input, n_tokens, n_past, n_batch);
  1323. // struct ggml_tensor * e = cross_entropy_loss(ctx0, targets, logits);
  1324. struct ggml_tensor * e = square_error_loss(ctx0, targets, logits);
  1325. ggml_build_forward_expand(&gf, e);
  1326. ggml_graph_compute_helper(work_buffer, &gf, /*n_threads*/ 1);
  1327. float error_before_opt = ggml_get_f32_1d(e, 0);
  1328. struct ggml_opt_params opt_params_lbfgs = ggml_opt_default_params(GGML_OPT_LBFGS);
  1329. opt_params_lbfgs.print_forward_graph = false;
  1330. opt_params_lbfgs.print_backward_graph = false;
  1331. opt_params_lbfgs.lbfgs.n_iter = 16;
  1332. ggml_opt(ctx0, opt_params_lbfgs, e);
  1333. //
  1334. ggml_build_forward_expand(&gf, e);
  1335. ggml_graph_compute_helper(work_buffer, &gf, /*n_threads*/ 1);
  1336. float error_after_opt = ggml_get_f32_1d(e, 0);
  1337. if (ex % 8 == 0) {
  1338. printf("Example %d\n", (ex+1));
  1339. printf("error_before_opt: %.2f\n", error_before_opt);
  1340. printf("error_after_opt: %.2f\n", error_after_opt);
  1341. }
  1342. if (ex % 64 == 0) {
  1343. sample_softmax_batch(ctx0, logits, after_opt_probs, after_opt_best_samples);
  1344. // printf("probabilities after optimization:\n");
  1345. // print_matrix(after_opt_probs);
  1346. printf("best samples after optimization:\n");
  1347. print_tokens(after_opt_best_samples, n_vocab);
  1348. }
  1349. ggml_free(ctx0);
  1350. }
  1351. {
  1352. int n_gen = 128;
  1353. int sample_ctx = n_tokens-n_tokens/8;
  1354. printf("Generating %d tokens.\n", n_gen);
  1355. struct ggml_tensor * tokens_input = ggml_new_tensor_1d(model.ctx, GGML_TYPE_I32, n_tokens);
  1356. struct ggml_tensor * targets = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, n_vocab, n_tokens);
  1357. get_example_targets(137, tokens_input, targets);
  1358. for (int i=sample_ctx; i<n_tokens; ++i) {
  1359. ggml_set_i32_1d(tokens_input, i, n_vocab/2);
  1360. }
  1361. for (int i=0; i<sample_ctx-1; ++i) {
  1362. print_token(ggml_get_i32_1d(tokens_input, i), n_vocab);
  1363. }
  1364. printf("---\n");
  1365. for (int i=0; i<n_gen; ++i) {
  1366. struct ggml_init_params params = {
  1367. /*.mem_size =*/ compute_size,
  1368. /*.mem_buffer =*/ compute_addr,
  1369. /*.no_alloc =*/ false,
  1370. };
  1371. struct ggml_context * ctx0 = ggml_init(params);
  1372. ggml_cgraph gf = {};
  1373. int n_past = 0;
  1374. struct ggml_tensor * logits = forward(&model, &kv_self, ctx0, &gf, tokens_input, sample_ctx, n_past);
  1375. ggml_build_forward_expand(&gf, logits);
  1376. ggml_graph_compute_helper(work_buffer, &gf, /*n_threads*/ 1);
  1377. struct ggml_tensor * best_samples = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, sample_ctx);
  1378. struct ggml_tensor * probs = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_vocab, sample_ctx);
  1379. sample_softmax(logits, probs, best_samples);
  1380. // int sample_at = n_tokens-1;
  1381. int token = ggml_get_i32_1d(best_samples, sample_ctx-1);
  1382. // print_row(probs, sample_at);
  1383. print_token(token, n_vocab);
  1384. lshift_examples(tokens_input, targets, 1);
  1385. ggml_set_i32_1d(tokens_input, 0, 0);
  1386. ggml_set_i32_1d(tokens_input, sample_ctx-1, token);
  1387. ggml_free(ctx0);
  1388. }
  1389. }
  1390. print_matrix(model.tok_embeddings);
  1391. printf("done\n");
  1392. // ggml_free(kv_self.ctx);
  1393. // ggml_free(model_lora.ctx);
  1394. ggml_free(model.ctx);
  1395. return 0;
  1396. }