1
0

finetune.cpp 88 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862
  1. #include "ggml.h"
  2. #include "ggml-alloc.h"
  3. #include "ggml-backend.h"
  4. #include "llama.h"
  5. #include "common.h"
  6. #include "train.h"
  7. #include <vector>
  8. #include <cstring>
  9. #include <ctime>
  10. #include <algorithm>
  11. #include <string>
  12. #if defined(_MSC_VER)
  13. #pragma warning(disable: 4244 4267) // possible loss of data
  14. #endif
  15. struct my_llama_hparams {
  16. uint32_t n_vocab = 32000;
  17. uint32_t n_ctx = 512;
  18. uint32_t n_embd = 4096;
  19. uint32_t n_ff = 11008;
  20. uint32_t n_head = 32;
  21. uint32_t n_head_kv = 32;
  22. uint32_t n_layer = 32;
  23. // float f_norm_eps = 1e-5f; // falcon
  24. float f_norm_rms_eps = 1e-5f; // llama
  25. float rope_freq_base = 10000.0f;
  26. float rope_freq_scale = 1.0f;
  27. uint32_t n_gqa() const {
  28. return n_head/n_head_kv;
  29. }
  30. uint32_t n_embd_head() const {
  31. return n_embd/n_head;
  32. }
  33. uint32_t n_embd_gqa() const {
  34. return n_embd/n_gqa();
  35. }
  36. bool operator!=(const my_llama_hparams& other) const {
  37. return memcmp(this, &other, sizeof(other));
  38. }
  39. };
  40. struct my_llama_layer {
  41. // normalization
  42. struct ggml_tensor * attention_norm;
  43. // attention
  44. struct ggml_tensor * wq;
  45. struct ggml_tensor * wk;
  46. struct ggml_tensor * wv;
  47. struct ggml_tensor * wo;
  48. // normalization
  49. struct ggml_tensor * ffn_norm;
  50. // ff
  51. struct ggml_tensor * ffn_gate; // w1
  52. struct ggml_tensor * ffn_down; // w2
  53. struct ggml_tensor * ffn_up; // w3
  54. };
  55. struct my_llama_model {
  56. struct my_llama_hparams hparams;
  57. struct ggml_tensor * tok_embeddings;
  58. struct ggml_tensor * norm;
  59. struct ggml_tensor * output;
  60. std::vector<my_llama_layer> layers;
  61. };
  62. struct my_llama_lora_hparams {
  63. uint32_t lora_r = 1;
  64. uint32_t lora_alpha = 1;
  65. uint32_t n_rank_attention_norm = 1;
  66. uint32_t n_rank_wq = 4;
  67. uint32_t n_rank_wk = 4;
  68. uint32_t n_rank_wv = 4;
  69. uint32_t n_rank_wo = 4;
  70. uint32_t n_rank_ffn_norm = 1;
  71. uint32_t n_rank_ffn_gate = 4;
  72. uint32_t n_rank_ffn_down = 4;
  73. uint32_t n_rank_ffn_up = 4;
  74. uint32_t n_rank_tok_embeddings = 4;
  75. uint32_t n_rank_norm = 1;
  76. uint32_t n_rank_output = 4;
  77. bool operator!=(const my_llama_lora_hparams& other) const {
  78. return memcmp(this, &other, sizeof(other));
  79. }
  80. };
  81. struct my_llama_lora_layer {
  82. // normalization
  83. struct ggml_tensor * attention_norm_a;
  84. struct ggml_tensor * attention_norm_b;
  85. // attention
  86. struct ggml_tensor * wq_a;
  87. struct ggml_tensor * wq_b;
  88. struct ggml_tensor * wk_a;
  89. struct ggml_tensor * wk_b;
  90. struct ggml_tensor * wv_a;
  91. struct ggml_tensor * wv_b;
  92. struct ggml_tensor * wo_a;
  93. struct ggml_tensor * wo_b;
  94. // normalization
  95. struct ggml_tensor * ffn_norm_a;
  96. struct ggml_tensor * ffn_norm_b;
  97. // ff
  98. struct ggml_tensor * ffn_gate_a;
  99. struct ggml_tensor * ffn_gate_b;
  100. struct ggml_tensor * ffn_down_a;
  101. struct ggml_tensor * ffn_down_b;
  102. struct ggml_tensor * ffn_up_a;
  103. struct ggml_tensor * ffn_up_b;
  104. };
  105. struct my_llama_lora {
  106. struct ggml_context * ctx = NULL;
  107. ggml_backend_buffer_t data;
  108. my_llama_lora_hparams hparams;
  109. struct ggml_tensor * tok_embeddings_a;
  110. struct ggml_tensor * tok_embeddings_b;
  111. struct ggml_tensor * norm_a;
  112. struct ggml_tensor * norm_b;
  113. struct ggml_tensor * output_a;
  114. struct ggml_tensor * output_b;
  115. std::vector<my_llama_lora_layer> layers;
  116. };
  117. // gguf constants
  118. static const char * LLM_KV_TRAINING_TYPE_FINETUNE_LORA = "finetune_lora";
  119. static const char * LLM_KV_TRAINING_TYPE = "training.type";
  120. static const char * LLM_KV_TRAINING_LORA_RANK_TOKEN_EMBD = "training.lora.rank.token_embd";
  121. static const char * LLM_KV_TRAINING_LORA_RANK_OUTPUT_NORM = "training.lora.rank.output_norm";
  122. static const char * LLM_KV_TRAINING_LORA_RANK_OUTPUT = "training.lora.rank.output";
  123. static const char * LLM_KV_TRAINING_LORA_RANK_ATTN_NORM = "training.lora.rank.attn_norm";
  124. static const char * LLM_KV_TRAINING_LORA_RANK_ATTN_Q = "training.lora.rank.attn_q";
  125. static const char * LLM_KV_TRAINING_LORA_RANK_ATTN_K = "training.lora.rank.attn_k";
  126. static const char * LLM_KV_TRAINING_LORA_RANK_ATTN_V = "training.lora.rank.attn_v";
  127. static const char * LLM_KV_TRAINING_LORA_RANK_ATTN_OUT = "training.lora.rank.attn_output";
  128. static const char * LLM_KV_TRAINING_LORA_RANK_FFN_NORM = "training.lora.rank.ffn_norm";
  129. static const char * LLM_KV_TRAINING_LORA_RANK_FFN_GATE = "training.lora.rank.ffn_gate";
  130. static const char * LLM_KV_TRAINING_LORA_RANK_FFN_DOWN = "training.lora.rank.ffn_down";
  131. static const char * LLM_KV_TRAINING_LORA_RANK_FFN_UP = "training.lora.rank.ffn_up";
  132. // gguf constants (sync with gguf.py)
  133. static const char * LLM_KV_GENERAL_ARCHITECTURE = "general.architecture";
  134. static const char * LLM_KV_GENERAL_FILE_TYPE = "general.file_type";
  135. static const char * LLM_KV_CONTEXT_LENGTH = "%s.context_length";
  136. static const char * LLM_KV_EMBEDDING_LENGTH = "%s.embedding_length";
  137. static const char * LLM_KV_BLOCK_COUNT = "%s.block_count";
  138. static const char * LLM_KV_FEED_FORWARD_LENGTH = "%s.feed_forward_length";
  139. static const char * LLM_KV_ATTENTION_HEAD_COUNT = "%s.attention.head_count";
  140. static const char * LLM_KV_ATTENTION_HEAD_COUNT_KV = "%s.attention.head_count_kv";
  141. static const char * LLM_KV_ATTENTION_LAYERNORM_RMS_EPS = "%s.attention.layer_norm_rms_epsilon";
  142. static const char * LLM_KV_ROPE_DIMENSION_COUNT = "%s.rope.dimension_count";
  143. static const char * LLM_KV_ROPE_FREQ_BASE = "%s.rope.freq_base"; // TODO load in llama.cpp
  144. static const char * LLM_KV_ROPE_SCALE_LINEAR = "%s.rope.scale_linear";
  145. static const char * LLM_TENSOR_TOKEN_EMBD = "token_embd";
  146. static const char * LLM_TENSOR_OUTPUT_NORM = "output_norm";
  147. static const char * LLM_TENSOR_OUTPUT = "output";
  148. static const char * LLM_TENSOR_ATTN_NORM = "blk.%d.attn_norm";
  149. static const char * LLM_TENSOR_ATTN_Q = "blk.%d.attn_q";
  150. static const char * LLM_TENSOR_ATTN_K = "blk.%d.attn_k";
  151. static const char * LLM_TENSOR_ATTN_V = "blk.%d.attn_v";
  152. static const char * LLM_TENSOR_ATTN_OUT = "blk.%d.attn_output";
  153. static const char * LLM_TENSOR_FFN_NORM = "blk.%d.ffn_norm";
  154. static const char * LLM_TENSOR_FFN_GATE = "blk.%d.ffn_gate";
  155. static const char * LLM_TENSOR_FFN_DOWN = "blk.%d.ffn_down";
  156. static const char * LLM_TENSOR_FFN_UP = "blk.%d.ffn_up";
  157. static void print_params(struct my_llama_hparams * params) {
  158. printf("%s: n_vocab : %u\n", __func__, params->n_vocab);
  159. printf("%s: n_ctx : %u\n", __func__, params->n_ctx);
  160. printf("%s: n_embd : %u\n", __func__, params->n_embd);
  161. printf("%s: n_ff : %u\n", __func__, params->n_ff);
  162. printf("%s: n_head : %u\n", __func__, params->n_head);
  163. printf("%s: n_head_kv : %u\n", __func__, params->n_head_kv);
  164. printf("%s: n_layer : %u\n", __func__, params->n_layer);
  165. printf("%s: norm_rms_eps : %f\n", __func__, params->f_norm_rms_eps);
  166. printf("%s: rope_freq_base : %f\n", __func__, params->rope_freq_base);
  167. printf("%s: rope_freq_scale : %f\n", __func__, params->rope_freq_scale);
  168. }
  169. static void print_lora_params(struct my_llama_lora_hparams * params) {
  170. printf("%s: n_rank_attention_norm : %u\n", __func__, params->n_rank_attention_norm);
  171. printf("%s: n_rank_wq : %u\n", __func__, params->n_rank_wq);
  172. printf("%s: n_rank_wk : %u\n", __func__, params->n_rank_wk);
  173. printf("%s: n_rank_wv : %u\n", __func__, params->n_rank_wv);
  174. printf("%s: n_rank_wo : %u\n", __func__, params->n_rank_wo);
  175. printf("%s: n_rank_ffn_norm : %u\n", __func__, params->n_rank_ffn_norm);
  176. printf("%s: n_rank_ffn_gate : %u\n", __func__, params->n_rank_ffn_gate);
  177. printf("%s: n_rank_ffn_down : %u\n", __func__, params->n_rank_ffn_down);
  178. printf("%s: n_rank_ffn_up : %u\n", __func__, params->n_rank_ffn_up);
  179. printf("%s: n_rank_tok_embeddings : %u\n", __func__, params->n_rank_tok_embeddings);
  180. printf("%s: n_rank_norm : %u\n", __func__, params->n_rank_norm);
  181. printf("%s: n_rank_output : %u\n", __func__, params->n_rank_output);
  182. }
  183. #define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
  184. { \
  185. const std::string skey(key); \
  186. const int kid = gguf_find_key(ctx, skey.c_str()); \
  187. if (kid >= 0) { \
  188. enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \
  189. if (ktype != (type)) { \
  190. die_fmt("key %s has wrong type: %s", skey.c_str(), gguf_type_name(ktype)); \
  191. } \
  192. (dst) = func(ctx, kid); \
  193. } else if (req) { \
  194. die_fmt("key not found in model: %s", skey.c_str()); \
  195. } \
  196. }
  197. static void load_model_hparams_gguf(struct gguf_context * ctx, struct my_llama_hparams * hparams, const char * expected_arch) {
  198. std::string arch;
  199. GGUF_GET_KEY(ctx, arch, gguf_get_val_str, GGUF_TYPE_STRING, true, LLM_KV_GENERAL_ARCHITECTURE);
  200. if (expected_arch != NULL) {
  201. if (arch != expected_arch) {
  202. printf("%s: arch=%s expected_arch=%s\n", __func__, arch.c_str(), expected_arch);
  203. }
  204. GGML_ASSERT(arch == expected_arch);
  205. }
  206. std::vector<char> keybuf;
  207. keybuf.resize(512);
  208. auto kv = [&arch, &keybuf](const char * key) -> const char * {
  209. snprintf(keybuf.data(), keybuf.size(), key, arch.c_str());
  210. return keybuf.data();
  211. };
  212. GGUF_GET_KEY(ctx, hparams->n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_EMBEDDING_LENGTH));
  213. GGUF_GET_KEY(ctx, hparams->n_ctx, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_CONTEXT_LENGTH));
  214. GGUF_GET_KEY(ctx, hparams->n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_FEED_FORWARD_LENGTH));
  215. GGUF_GET_KEY(ctx, hparams->n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_ATTENTION_HEAD_COUNT));
  216. GGUF_GET_KEY(ctx, hparams->n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_BLOCK_COUNT));
  217. // n_head_kv is optional, default to n_head
  218. hparams->n_head_kv = hparams->n_head;
  219. GGUF_GET_KEY(ctx, hparams->n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ATTENTION_HEAD_COUNT_KV));
  220. float rope_freq_scale = 1.0f;
  221. GGUF_GET_KEY(ctx, hparams->f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
  222. GGUF_GET_KEY(ctx, hparams->rope_freq_base, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE));
  223. GGUF_GET_KEY(ctx, rope_freq_scale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR));
  224. if (rope_freq_scale != 1.0f) {
  225. hparams->rope_freq_scale = 1.0f / rope_freq_scale;
  226. }
  227. }
  228. static void init_model(struct llama_model * input, struct my_llama_model * model, const char * fn_model, uint32_t n_ctx) {
  229. auto & hparams = model->hparams;
  230. std::vector<char> tn_buf;
  231. tn_buf.resize(GGML_MAX_NAME);
  232. auto tn = [&tn_buf](const char * key) -> const char * {
  233. snprintf(tn_buf.data(), tn_buf.size(), "%s.weight", key);
  234. return tn_buf.data();
  235. };
  236. auto tni = [&tn_buf](const char * key, int bid) -> const char * {
  237. snprintf(tn_buf.data(), tn_buf.size(), key, bid);
  238. std::string s = tn_buf.data();
  239. snprintf(tn_buf.data(), tn_buf.size(), "%s.weight", s.c_str());
  240. return tn_buf.data();
  241. };
  242. // get parameters directly from gguf file
  243. {
  244. struct gguf_init_params params = {
  245. /*.no_alloc = */ false,
  246. /*.ctx = */ NULL,
  247. };
  248. struct gguf_context * mctx = gguf_init_from_file(fn_model, params);
  249. load_model_hparams_gguf(mctx, &hparams, "llama");
  250. gguf_free(mctx);
  251. }
  252. hparams.n_vocab = llama_n_vocab(input);
  253. hparams.n_ctx = n_ctx;
  254. // get tensors from llama_model (possibly mmapped)
  255. model->tok_embeddings = llama_get_model_tensor(input, tn(LLM_TENSOR_TOKEN_EMBD));
  256. model->norm = llama_get_model_tensor(input, tn(LLM_TENSOR_OUTPUT_NORM));
  257. model->output = llama_get_model_tensor(input, tn(LLM_TENSOR_OUTPUT));
  258. assert_shape_2d(model->tok_embeddings, hparams.n_embd, hparams.n_vocab);
  259. assert_shape_1d(model->norm, hparams.n_embd);
  260. assert_shape_2d(model->output, hparams.n_embd, hparams.n_vocab);
  261. model->layers.resize(hparams.n_layer);
  262. for (uint32_t i = 0; i < hparams.n_layer; ++i) {
  263. auto & layer = model->layers[i];
  264. layer.attention_norm = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_NORM, i));
  265. layer.wq = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_Q, i));
  266. layer.wk = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_K, i));
  267. layer.wv = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_V, i));
  268. layer.wo = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_OUT, i));
  269. layer.ffn_norm = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_NORM, i));
  270. layer.ffn_gate = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_GATE, i));
  271. layer.ffn_down = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_DOWN, i));
  272. layer.ffn_up = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_UP, i));
  273. assert_shape_1d(layer.attention_norm, hparams.n_embd);
  274. assert_shape_2d(layer.wq, hparams.n_embd, hparams.n_embd);
  275. assert_shape_2d(layer.wk, hparams.n_embd, hparams.n_embd_gqa());
  276. assert_shape_2d(layer.wv, hparams.n_embd, hparams.n_embd_gqa());
  277. assert_shape_2d(layer.wo, hparams.n_embd, hparams.n_embd);
  278. assert_shape_1d(layer.ffn_norm, hparams.n_embd);
  279. assert_shape_2d(layer.ffn_gate, hparams.n_embd, hparams.n_ff);
  280. assert_shape_2d(layer.ffn_down, hparams.n_ff, hparams.n_embd);
  281. assert_shape_2d(layer.ffn_up, hparams.n_embd, hparams.n_ff);
  282. }
  283. }
  284. static void set_param_lora(struct my_llama_lora * lora) {
  285. const uint32_t n_layer = lora->layers.size();
  286. struct ggml_context* ctx = lora->ctx;
  287. ggml_set_param(ctx, lora->tok_embeddings_a);
  288. ggml_set_param(ctx, lora->tok_embeddings_b);
  289. ggml_set_param(ctx, lora->norm_a);
  290. ggml_set_param(ctx, lora->norm_b);
  291. ggml_set_param(ctx, lora->output_a);
  292. ggml_set_param(ctx, lora->output_b);
  293. for (uint32_t i = 0; i < n_layer; ++i) {
  294. auto & layer = lora->layers[i];
  295. ggml_set_param(ctx, layer.attention_norm_a);
  296. ggml_set_param(ctx, layer.attention_norm_b);
  297. ggml_set_param(ctx, layer.wq_a);
  298. ggml_set_param(ctx, layer.wq_b);
  299. ggml_set_param(ctx, layer.wk_a);
  300. ggml_set_param(ctx, layer.wk_b);
  301. ggml_set_param(ctx, layer.wv_a);
  302. ggml_set_param(ctx, layer.wv_b);
  303. ggml_set_param(ctx, layer.wo_a);
  304. ggml_set_param(ctx, layer.wo_b);
  305. ggml_set_param(ctx, layer.ffn_norm_a);
  306. ggml_set_param(ctx, layer.ffn_norm_b);
  307. ggml_set_param(ctx, layer.ffn_gate_a);
  308. ggml_set_param(ctx, layer.ffn_gate_b);
  309. ggml_set_param(ctx, layer.ffn_down_a);
  310. ggml_set_param(ctx, layer.ffn_down_b);
  311. ggml_set_param(ctx, layer.ffn_up_a);
  312. ggml_set_param(ctx, layer.ffn_up_b);
  313. }
  314. }
  315. static void init_lora(const struct my_llama_model * model, struct my_llama_lora * lora) {
  316. const auto & lparams = lora->hparams;
  317. const uint32_t n_embd = model->hparams.n_embd;
  318. const uint32_t n_embd_gqa = model->hparams.n_embd_gqa();
  319. const uint32_t n_layer = model->hparams.n_layer;
  320. const uint32_t n_vocab = model->hparams.n_vocab;
  321. const uint32_t n_ff = model->hparams.n_ff;
  322. std::vector<char> tn_buf;
  323. tn_buf.resize(GGML_MAX_NAME);
  324. auto tn = [&tn_buf](const char * key, const char * suffix) -> const char * {
  325. snprintf(tn_buf.data(), tn_buf.size(), "%s%s", key, suffix);
  326. return tn_buf.data();
  327. };
  328. auto tni = [&tn_buf](const char * key, const char * suffix, int bid) -> const char * {
  329. snprintf(tn_buf.data(), tn_buf.size(), key, bid);
  330. std::string s = tn_buf.data();
  331. snprintf(tn_buf.data(), tn_buf.size(), "%s%s", s.c_str(), suffix);
  332. return tn_buf.data();
  333. };
  334. // context for lora tensors without their data
  335. struct ggml_init_params ctx_lora_params;
  336. ctx_lora_params.mem_size = ggml_tensor_overhead()*2*(6 + n_layer*18);
  337. ctx_lora_params.mem_buffer = NULL;
  338. ctx_lora_params.no_alloc = true;
  339. struct ggml_context * ctx = ggml_init(ctx_lora_params);
  340. lora->ctx = ctx;
  341. lora->tok_embeddings_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_tok_embeddings, n_embd);
  342. lora->tok_embeddings_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_tok_embeddings, n_vocab);
  343. lora->norm_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_norm, n_embd);
  344. lora->norm_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_norm, 1);
  345. lora->output_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_output, n_embd);
  346. lora->output_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_output, n_vocab);
  347. ggml_set_name(lora->tok_embeddings_a, tn(LLM_TENSOR_TOKEN_EMBD, ".weight.lora_a"));
  348. ggml_set_name(lora->tok_embeddings_b, tn(LLM_TENSOR_TOKEN_EMBD, ".weight.lora_b"));
  349. ggml_set_name(lora->norm_a, tn(LLM_TENSOR_OUTPUT_NORM, ".weight.lora_a"));
  350. ggml_set_name(lora->norm_b, tn(LLM_TENSOR_OUTPUT_NORM, ".weight.lora_b"));
  351. ggml_set_name(lora->output_a, tn(LLM_TENSOR_OUTPUT, ".weight.lora_a"));
  352. ggml_set_name(lora->output_b, tn(LLM_TENSOR_OUTPUT, ".weight.lora_b"));
  353. lora->layers.resize(n_layer);
  354. for (uint32_t i = 0; i < n_layer; ++i) {
  355. auto & layer = lora->layers[i];
  356. layer.attention_norm_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_attention_norm, n_embd);
  357. layer.attention_norm_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_attention_norm, 1);
  358. layer.wq_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_wq, n_embd);
  359. layer.wq_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_wq, n_embd);
  360. layer.wk_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_wk, n_embd);
  361. layer.wk_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_wk, n_embd_gqa);
  362. layer.wv_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_wv, n_embd);
  363. layer.wv_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_wv, n_embd_gqa);
  364. layer.wo_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_wo, n_embd);
  365. layer.wo_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_wo, n_embd);
  366. layer.ffn_norm_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_norm, n_embd);
  367. layer.ffn_norm_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_norm, 1);
  368. layer.ffn_gate_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_gate, n_embd);
  369. layer.ffn_gate_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_gate, n_ff);
  370. layer.ffn_down_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_down, n_ff);
  371. layer.ffn_down_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_down, n_embd);
  372. layer.ffn_up_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_up, n_embd);
  373. layer.ffn_up_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_up, n_ff);
  374. ggml_set_name(layer.attention_norm_a, tni(LLM_TENSOR_ATTN_NORM, ".weight.lora_a", i));
  375. ggml_set_name(layer.attention_norm_b, tni(LLM_TENSOR_ATTN_NORM, ".weight.lora_b", i));
  376. ggml_set_name(layer.wq_a, tni(LLM_TENSOR_ATTN_Q, ".weight.lora_a", i));
  377. ggml_set_name(layer.wq_b, tni(LLM_TENSOR_ATTN_Q, ".weight.lora_b", i));
  378. ggml_set_name(layer.wk_a, tni(LLM_TENSOR_ATTN_K, ".weight.lora_a", i));
  379. ggml_set_name(layer.wk_b, tni(LLM_TENSOR_ATTN_K, ".weight.lora_b", i));
  380. ggml_set_name(layer.wv_a, tni(LLM_TENSOR_ATTN_V, ".weight.lora_a", i));
  381. ggml_set_name(layer.wv_b, tni(LLM_TENSOR_ATTN_V, ".weight.lora_b", i));
  382. ggml_set_name(layer.wo_a, tni(LLM_TENSOR_ATTN_OUT, ".weight.lora_a", i));
  383. ggml_set_name(layer.wo_b, tni(LLM_TENSOR_ATTN_OUT, ".weight.lora_b", i));
  384. ggml_set_name(layer.ffn_norm_a, tni(LLM_TENSOR_FFN_NORM, ".weight.lora_a", i));
  385. ggml_set_name(layer.ffn_norm_b, tni(LLM_TENSOR_FFN_NORM, ".weight.lora_b", i));
  386. ggml_set_name(layer.ffn_gate_a, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_a", i));
  387. ggml_set_name(layer.ffn_gate_b, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_b", i));
  388. ggml_set_name(layer.ffn_down_a, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_a", i));
  389. ggml_set_name(layer.ffn_down_b, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_b", i));
  390. ggml_set_name(layer.ffn_up_a, tni(LLM_TENSOR_FFN_UP, ".weight.lora_a", i));
  391. ggml_set_name(layer.ffn_up_b, tni(LLM_TENSOR_FFN_UP, ".weight.lora_b", i));
  392. }
  393. set_param_lora(lora);
  394. // allocate data for lora tensors
  395. lora->data = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_cpu_buffer_type());
  396. }
  397. static void randomize_lora(struct my_llama_lora * lora, int seed, float mean, float std, float min, float max) {
  398. const uint32_t n_layer = lora->layers.size();
  399. struct random_normal_distribution * rnd = init_random_normal_distribution(seed, mean, std, min, max);
  400. randomize_tensor_normal(lora->tok_embeddings_a, rnd);
  401. ggml_set_zero(lora->tok_embeddings_b);
  402. randomize_tensor_normal(lora->norm_a, rnd);
  403. ggml_set_zero(lora->norm_b);
  404. randomize_tensor_normal(lora->output_a, rnd);
  405. ggml_set_zero(lora->output_b);
  406. for (uint32_t i = 0; i < n_layer; ++i) {
  407. auto & layer = lora->layers[i];
  408. randomize_tensor_normal(layer.attention_norm_a, rnd);
  409. ggml_set_zero(layer.attention_norm_b);
  410. randomize_tensor_normal(layer.wq_a, rnd);
  411. ggml_set_zero(layer.wq_b);
  412. randomize_tensor_normal(layer.wk_a, rnd);
  413. ggml_set_zero(layer.wk_b);
  414. randomize_tensor_normal(layer.wv_a, rnd);
  415. ggml_set_zero(layer.wv_b);
  416. randomize_tensor_normal(layer.wo_a, rnd);
  417. ggml_set_zero(layer.wo_b);
  418. randomize_tensor_normal(layer.ffn_norm_a, rnd);
  419. ggml_set_zero(layer.ffn_norm_b);
  420. randomize_tensor_normal(layer.ffn_gate_a, rnd);
  421. ggml_set_zero(layer.ffn_gate_b);
  422. randomize_tensor_normal(layer.ffn_down_a, rnd);
  423. ggml_set_zero(layer.ffn_down_b);
  424. randomize_tensor_normal(layer.ffn_up_a, rnd);
  425. ggml_set_zero(layer.ffn_up_b);
  426. }
  427. free_random_normal_distribution(rnd);
  428. }
  429. static struct ggml_tensor * llama_build_lora_finetune_graphs(
  430. struct my_llama_model * model,
  431. struct my_llama_lora * lora,
  432. ggml_gallocr_t alloc,
  433. struct ggml_context * ctx,
  434. struct ggml_cgraph * gf,
  435. struct ggml_cgraph * gb,
  436. struct ggml_cgraph * gb_tmp,
  437. struct ggml_tensor * * logits,
  438. struct ggml_tensor * tokens_input,
  439. struct ggml_tensor * targets,
  440. const int n_tokens,
  441. const int n_batch,
  442. const bool enable_flash_attn,
  443. const bool enable_checkpointing,
  444. const bool measure_only) {
  445. ggml_set_scratch(ctx, { 0, 0, nullptr, });
  446. const int n_past = 0;
  447. const int N = n_tokens;
  448. const auto & hparams = model->hparams;
  449. const int n_ctx = hparams.n_ctx;
  450. const int n_vocab = hparams.n_vocab;
  451. const int n_embd = hparams.n_embd;
  452. const int n_layer = hparams.n_layer;
  453. const int n_head = hparams.n_head;
  454. const int n_head_kv = hparams.n_head_kv;
  455. const int n_ff = hparams.n_ff;
  456. const int n_rot = hparams.n_embd_head();
  457. const int n_embd_head = hparams.n_embd_head();
  458. const int n_embd_gqa = hparams.n_embd_gqa();
  459. const float rms_norm_eps = hparams.f_norm_rms_eps;
  460. const float rope_freq_base = hparams.rope_freq_base;
  461. const float rope_freq_scale = hparams.rope_freq_scale;
  462. GGML_ASSERT((size_t) n_layer == lora->layers.size());
  463. auto set_name = [](struct ggml_tensor * t, const char * n) {
  464. ggml_set_name(t, n);
  465. if (t->grad) {
  466. ggml_format_name(t->grad, "%s->grad", n);
  467. }
  468. };
  469. // KQ_pos - contains the positions
  470. struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N);
  471. ggml_set_input(KQ_pos);
  472. // rope has so much parameters that we make a custom function for it
  473. auto rope = [ctx, KQ_pos, n_rot, n_ctx, rope_freq_base, rope_freq_scale]
  474. (struct ggml_tensor * t) -> struct ggml_tensor * {
  475. // not capturing these, to silcence warnings
  476. const int rope_mode = 0;
  477. return ggml_rope_ext(ctx,
  478. t, KQ_pos, nullptr, n_rot, rope_mode, n_ctx,
  479. rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f
  480. );
  481. };
  482. set_name(tokens_input, "tokens_input");
  483. set_name(targets, "targets");
  484. GGML_ASSERT(tokens_input->type == GGML_TYPE_I32);
  485. auto add_to_f32 = [] (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
  486. if (ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16 || a->type == GGML_TYPE_BF16) {
  487. return ggml_add_cast(ctx, a, b, GGML_TYPE_F32);
  488. } else if (a->type == GGML_TYPE_F32) {
  489. return ggml_add(ctx, a, b);
  490. } else {
  491. die_fmt("%s: Finetuning on tensors with type '%s' is not yet supported.\n",
  492. __func__, ggml_type_name(a->type));
  493. }
  494. };
  495. struct ggml_tensor * tok_embeddings = add_to_f32(ctx, model->tok_embeddings, ggml_mul_mat(ctx, lora->tok_embeddings_a, lora->tok_embeddings_b));
  496. struct ggml_tensor * norm = add_to_f32(ctx, model->norm, ggml_mul_mat(ctx, lora->norm_a, lora->norm_b));
  497. struct ggml_tensor * output = add_to_f32(ctx, model->output, ggml_mul_mat(ctx, lora->output_a, lora->output_b));
  498. struct ggml_tensor * t00 = ggml_reshape_1d(ctx, tokens_input, N*n_batch); set_name(t00, "t00"); assert_shape_1d(t00, N*n_batch);
  499. struct ggml_tensor * t01 = ggml_get_rows(ctx, tok_embeddings, t00); set_name(t01, "t01"); assert_shape_2d(t01, n_embd, N*n_batch);
  500. struct ggml_tensor * cur = t01;
  501. std::vector<struct ggml_tensor *> checkpoints;
  502. if (enable_checkpointing) {
  503. checkpoints.push_back(tokens_input);
  504. checkpoints.push_back(targets);
  505. checkpoints.push_back(t00);
  506. checkpoints.push_back(t01);
  507. }
  508. const float kv_scale = 1.0f/sqrtf(float(n_embd)/n_head);
  509. for (int il = 0; il < n_layer; ++il) {
  510. struct my_llama_layer & layer = model->layers[il];
  511. struct my_llama_lora_layer & llayer = lora->layers[il];
  512. struct ggml_tensor * attention_norm = add_to_f32(ctx, layer.attention_norm, ggml_mul_mat(ctx, llayer.attention_norm_a, llayer.attention_norm_b));
  513. struct ggml_tensor * ffn_norm = add_to_f32(ctx, layer.ffn_norm, ggml_mul_mat(ctx, llayer.ffn_norm_a, llayer.ffn_norm_b));
  514. struct ggml_tensor * wq = add_to_f32(ctx, layer.wq, ggml_mul_mat(ctx, llayer.wq_a, llayer.wq_b));
  515. struct ggml_tensor * wk = add_to_f32(ctx, layer.wk, ggml_mul_mat(ctx, llayer.wk_a, llayer.wk_b));
  516. struct ggml_tensor * wv = add_to_f32(ctx, layer.wv, ggml_mul_mat(ctx, llayer.wv_a, llayer.wv_b));
  517. struct ggml_tensor * wo = add_to_f32(ctx, layer.wo, ggml_mul_mat(ctx, llayer.wo_a, llayer.wo_b));
  518. struct ggml_tensor * ffn_gate = add_to_f32(ctx, layer.ffn_gate, ggml_mul_mat(ctx, llayer.ffn_gate_a, llayer.ffn_gate_b));
  519. struct ggml_tensor * ffn_down = add_to_f32(ctx, layer.ffn_down, ggml_mul_mat(ctx, llayer.ffn_down_a, llayer.ffn_down_b));
  520. struct ggml_tensor * ffn_up = add_to_f32(ctx, layer.ffn_up, ggml_mul_mat(ctx, llayer.ffn_up_a, llayer.ffn_up_b));
  521. struct ggml_tensor * t02 = ggml_rms_norm (ctx, cur, rms_norm_eps); set_name(t02, "t02"); assert_shape_2d(t02, n_embd, N*n_batch);
  522. struct ggml_tensor * t03 = ggml_repeat (ctx, attention_norm, t02); set_name(t03, "t03"); assert_shape_2d(t03, n_embd, N*n_batch);
  523. struct ggml_tensor * t04 = ggml_mul (ctx, t03, t02); set_name(t04, "t04"); assert_shape_2d(t04, n_embd, N*n_batch);
  524. struct ggml_tensor * t05 = ggml_mul_mat (ctx, wq, t04); set_name(t05, "t05"); assert_shape_2d(t05, n_embd, N*n_batch);
  525. struct ggml_tensor * t06 = ggml_reshape_4d (ctx, t05, n_embd_head, n_head, N, n_batch); set_name(t06, "t06"); assert_shape_4d(t06, n_embd_head, n_head, N, n_batch);
  526. struct ggml_tensor * t07 = rope (t06); set_name(t07, "t07"); assert_shape_4d(t07, n_embd_head, n_head, N, n_batch);
  527. struct ggml_tensor * t08 = ggml_mul_mat (ctx, wk, t04); set_name(t08, "t08"); assert_shape_2d(t08, n_embd_gqa, N*n_batch);
  528. struct ggml_tensor * t09 = ggml_reshape_4d (ctx, t08, n_embd_head, n_head_kv, N, n_batch); set_name(t09, "t09"); assert_shape_4d(t09, n_embd_head, n_head_kv, N, n_batch);
  529. struct ggml_tensor * t10 = rope (t09); set_name(t10, "t10"); assert_shape_4d(t10, n_embd_head, n_head_kv, N, n_batch);
  530. struct ggml_tensor * t11;
  531. if (ggml_is_quantized(wv->type)) {
  532. struct ggml_tensor * t11_1 = ggml_mul_mat (ctx, wv, t04); set_name(t11_1, "t11_1"); assert_shape_2d(t11_1, n_embd_gqa, N*n_batch);
  533. struct ggml_tensor * t11_2 = ggml_transpose(ctx, t11_1); set_name(t11_2, "t11_2"); assert_shape_2d(t11_2, N*n_batch, n_embd_gqa);
  534. t11 = ggml_cont (ctx, t11_2); set_name(t11, "t11"); assert_shape_2d(t11, N*n_batch, n_embd_gqa);
  535. } else {
  536. t11 = ggml_mul_mat (ctx, t04, wv); set_name(t11, "t11"); assert_shape_2d(t11, N*n_batch, n_embd_gqa);
  537. }
  538. struct ggml_tensor * t12 = ggml_reshape_4d (ctx, t11, N, n_batch, n_embd_head, n_head_kv); set_name(t12, "t12"); assert_shape_4d(t12, N, n_batch, n_embd_head, n_head_kv);
  539. struct ggml_tensor * t13 = ggml_permute (ctx, t07, 0, 2, 1, 3); set_name(t13, "t13"); assert_shape_4d(t13, n_embd_head, N, n_head, n_batch);
  540. struct ggml_tensor * t14 = ggml_permute (ctx, t10, 0, 2, 1, 3); set_name(t14, "t14"); assert_shape_4d(t14, n_embd_head, N, n_head_kv, n_batch);
  541. struct ggml_tensor * t15 = ggml_permute (ctx, t12, 0, 3, 1, 2); set_name(t15, "t15"); assert_shape_4d(t15, N, n_embd_head, n_head_kv, n_batch);
  542. struct ggml_tensor * t16;
  543. if (enable_flash_attn) {
  544. GGML_ASSERT(false && "TODO: ggml_flash_attn_ext() not yet supported");
  545. //t16 = ggml_flash_attn(ctx, t13, t14, t15, true); set_name(t16, "t16"); assert_shape_4d(t16, n_embd_head, N, n_head, n_batch);
  546. } else {
  547. struct ggml_tensor * t16_0 = ggml_mul_mat (ctx, t14, t13); set_name(t16_0, "t16_0"); assert_shape_4d(t16_0, N, N, n_head, n_batch);
  548. struct ggml_tensor * t16_1 = ggml_scale_inplace (ctx, t16_0, kv_scale); set_name(t16_1, "t16_1"); assert_shape_4d(t16_1, N, N, n_head, n_batch);
  549. struct ggml_tensor * t16_2 = ggml_diag_mask_inf_inplace(ctx, t16_1, n_past); set_name(t16_2, "t16_2"); assert_shape_4d(t16_2, N, N, n_head, n_batch);
  550. struct ggml_tensor * t16_3 = ggml_soft_max_inplace (ctx, t16_2); set_name(t16_3, "t16_3"); assert_shape_4d(t16_3, N, N, n_head, n_batch);
  551. t16 = ggml_mul_mat(ctx, t15, t16_3); set_name(t16, "t16"); assert_shape_4d(t16, n_embd_head, N, n_head, n_batch);
  552. }
  553. struct ggml_tensor * t17 = ggml_permute (ctx, t16, 0, 2, 1, 3); set_name(t17, "t17"); assert_shape_4d(t17, n_embd_head, n_head, N, n_batch);
  554. struct ggml_tensor * t18 = ggml_cont (ctx, t17); set_name(t18, "t18"); assert_shape_4d(t18, n_embd_head, n_head, N, n_batch);
  555. struct ggml_tensor * t19 = ggml_reshape_2d (ctx, t18, n_embd, N*n_batch); set_name(t19, "t19"); assert_shape_2d(t19, n_embd, N*n_batch);
  556. struct ggml_tensor * t20 = ggml_mul_mat (ctx, wo, t19); set_name(t20, "t20"); assert_shape_2d(t20, n_embd, N*n_batch);
  557. struct ggml_tensor * t21 = ggml_add (ctx, t20, cur); set_name(t21, "t21"); assert_shape_2d(t21, n_embd, N*n_batch);
  558. struct ggml_tensor * t22 = ggml_rms_norm (ctx, t21, rms_norm_eps); set_name(t22, "t22"); assert_shape_2d(t22, n_embd, N*n_batch);
  559. struct ggml_tensor * t23 = ggml_repeat (ctx, ffn_norm, t22); set_name(t23, "t23"); assert_shape_2d(t23, n_embd, N*n_batch);
  560. struct ggml_tensor * t24 = ggml_mul (ctx, t23, t22); set_name(t24, "t24"); assert_shape_2d(t24, n_embd, N*n_batch);
  561. struct ggml_tensor * t25 = ggml_mul_mat (ctx, ffn_up, t24); set_name(t25, "t25"); assert_shape_2d(t25, n_ff, N*n_batch);
  562. struct ggml_tensor * t26 = ggml_mul_mat (ctx, ffn_gate, t24); set_name(t26, "t26"); assert_shape_2d(t26, n_ff, N*n_batch);
  563. struct ggml_tensor * t27 = ggml_silu (ctx, t26); set_name(t27, "t27"); assert_shape_2d(t27, n_ff, N*n_batch);
  564. struct ggml_tensor * t28 = ggml_mul (ctx, t27, t25); set_name(t28, "t28"); assert_shape_2d(t28, n_ff, N*n_batch);
  565. struct ggml_tensor * t29 = ggml_mul_mat (ctx, ffn_down, t28); set_name(t29, "t29"); assert_shape_2d(t29, n_embd, N*n_batch);
  566. struct ggml_tensor * t30 = ggml_add (ctx, t29, t21); set_name(t30, "t30"); assert_shape_2d(t30, n_embd, N*n_batch);
  567. cur = t30;
  568. if (enable_checkpointing) {
  569. checkpoints.push_back(cur);
  570. }
  571. }
  572. struct ggml_tensor * t31 = ggml_rms_norm (ctx, cur, rms_norm_eps); set_name(t31, "t31"); assert_shape_2d(t31, n_embd, N*n_batch);
  573. struct ggml_tensor * t32 = ggml_repeat (ctx, norm, t31); set_name(t32, "t32"); assert_shape_2d(t32, n_embd, N*n_batch);
  574. struct ggml_tensor * t33 = ggml_mul (ctx, t32, t31); set_name(t33, "t33"); assert_shape_2d(t33, n_embd, N*n_batch);
  575. struct ggml_tensor * t34 = ggml_mul_mat (ctx, output, t33); set_name(t34, "t34"); assert_shape_2d(t34, n_vocab, N*n_batch);
  576. struct ggml_tensor * t35 = ggml_reshape_3d (ctx, t34, n_vocab, N, n_batch); set_name(t35, "t35"); assert_shape_3d(t35, n_vocab, N, n_batch);
  577. struct ggml_tensor * t36 = ggml_cross_entropy_loss(ctx, t35, targets); set_name(t36, "t36"); assert_shape_1d(t36, 1);
  578. if (enable_checkpointing) {
  579. checkpoints.push_back(t31);
  580. checkpoints.push_back(t32);
  581. checkpoints.push_back(t33);
  582. checkpoints.push_back(t34);
  583. checkpoints.push_back(t35);
  584. checkpoints.push_back(t36);
  585. }
  586. ggml_build_forward_expand(gf, t36);
  587. if (enable_checkpointing) {
  588. ggml_build_backward_gradient_checkpointing(ctx, gf, gb, gb_tmp, checkpoints.data(), (int) checkpoints.size());
  589. } else {
  590. ggml_graph_cpy(gf, gb);
  591. ggml_build_backward_expand(ctx, gf, gb, true);
  592. }
  593. GGML_ASSERT(alloc != NULL);
  594. // make sure some tensors are not reallocated by inserting new temporary nodes depending on them
  595. int n_leafs_before = gb->n_leafs;
  596. int n_nodes_before = gb->n_nodes;
  597. // output tensors
  598. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, 1.0f));
  599. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, 1.0f));
  600. // input gradient
  601. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, 1.0f));
  602. GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL);
  603. ggml_set_input(t36->grad);
  604. // KQ_pos
  605. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, 1.0f));
  606. // make sure base model tensors data cannot be used in viewable operations
  607. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, 1.0f));
  608. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->norm, 1.0f));
  609. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->output, 1.0f));
  610. for (int il = 0; il < n_layer; ++il) {
  611. struct my_llama_layer & layer = model->layers[il];
  612. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.attention_norm, 1.0f));
  613. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_norm, 1.0f));
  614. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wq, 1.0f));
  615. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wk, 1.0f));
  616. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wv, 1.0f));
  617. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wo, 1.0f));
  618. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_gate, 1.0f));
  619. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_down, 1.0f));
  620. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_up, 1.0f));
  621. }
  622. // allocating checkpoints in one block to reduce memory fragmentation
  623. // note: they will be freed in reverse order
  624. for (unsigned int i = 0; i < checkpoints.size(); ++i) {
  625. if (checkpoints[i]->data == NULL && checkpoints[i]->view_src == NULL) {
  626. ggml_set_input(checkpoints[i]);
  627. }
  628. }
  629. if (measure_only) {
  630. ggml_gallocr_reserve(alloc, gb);
  631. } else {
  632. ggml_gallocr_alloc_graph(alloc, gb);
  633. // set KQ_pos
  634. {
  635. int * data = (int *) KQ_pos->data;
  636. for (int i = 0; i < N; ++i) {
  637. data[i] = n_past + i;
  638. }
  639. }
  640. }
  641. // remove the additional nodes and leafs
  642. for (int i = n_leafs_before; i < gb->n_leafs; ++i) {
  643. gb->leafs[i] = NULL;
  644. }
  645. for (int i = n_nodes_before; i < gb->n_nodes; ++i) {
  646. gb->nodes[i] = NULL;
  647. }
  648. gb->n_leafs = n_leafs_before;
  649. gb->n_nodes = n_nodes_before;
  650. *logits = t35;
  651. return t36;
  652. }
  653. static void load_llama_lora_gguf(struct gguf_context * fctx, struct ggml_context * f_ggml_ctx, struct my_llama_model * model, struct my_llama_lora * lora) {
  654. // NOTE: gguf_context must be initialized with f_ggml_ctx and no_alloc=false, otherwise tensor data can not be read
  655. std::string arch;
  656. std::vector<char> keybuf;
  657. keybuf.resize(512);
  658. GGUF_GET_KEY(fctx, arch, gguf_get_val_str, GGUF_TYPE_STRING, true, LLM_KV_GENERAL_ARCHITECTURE);
  659. GGML_ASSERT(arch == "llama");
  660. uint32_t ftype_u;
  661. GGUF_GET_KEY(fctx, ftype_u, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_GENERAL_FILE_TYPE);
  662. GGML_ASSERT((enum llama_ftype) ftype_u == LLAMA_FTYPE_ALL_F32);
  663. struct my_llama_hparams hparams;
  664. load_model_hparams_gguf(fctx, &hparams, arch.c_str());
  665. // parameters that define tensor shapes must match
  666. GGML_ASSERT(hparams.n_embd == model->hparams.n_embd);
  667. GGML_ASSERT(hparams.n_ff == model->hparams.n_ff);
  668. GGML_ASSERT(hparams.n_head == model->hparams.n_head);
  669. GGML_ASSERT(hparams.n_head_kv == model->hparams.n_head_kv);
  670. GGML_ASSERT(hparams.n_layer == model->hparams.n_layer);
  671. GGUF_GET_KEY(fctx, lora->hparams.n_rank_tok_embeddings, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_TOKEN_EMBD);
  672. GGUF_GET_KEY(fctx, lora->hparams.n_rank_norm, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_OUTPUT_NORM);
  673. GGUF_GET_KEY(fctx, lora->hparams.n_rank_output, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_OUTPUT);
  674. GGUF_GET_KEY(fctx, lora->hparams.n_rank_attention_norm, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_NORM);
  675. GGUF_GET_KEY(fctx, lora->hparams.n_rank_wq, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_Q);
  676. GGUF_GET_KEY(fctx, lora->hparams.n_rank_wk, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_K);
  677. GGUF_GET_KEY(fctx, lora->hparams.n_rank_wv, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_V);
  678. GGUF_GET_KEY(fctx, lora->hparams.n_rank_wo, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_OUT);
  679. GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_norm, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_NORM);
  680. GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_gate, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_GATE);
  681. GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_down, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN);
  682. GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_up, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_UP);
  683. init_lora(model, lora);
  684. copy_tensor_by_name(lora->tok_embeddings_a, f_ggml_ctx, ggml_get_name(lora->tok_embeddings_a));
  685. copy_tensor_by_name(lora->tok_embeddings_b, f_ggml_ctx, ggml_get_name(lora->tok_embeddings_b));
  686. copy_tensor_by_name(lora->norm_a, f_ggml_ctx, ggml_get_name(lora->norm_a));
  687. copy_tensor_by_name(lora->norm_b, f_ggml_ctx, ggml_get_name(lora->norm_b));
  688. copy_tensor_by_name(lora->output_a, f_ggml_ctx, ggml_get_name(lora->output_a));
  689. copy_tensor_by_name(lora->output_b, f_ggml_ctx, ggml_get_name(lora->output_b));
  690. for (uint32_t i = 0; i < lora->layers.size(); ++i) {
  691. auto & layer = lora->layers[i];
  692. copy_tensor_by_name(layer.attention_norm_a, f_ggml_ctx, ggml_get_name(layer.attention_norm_a));
  693. copy_tensor_by_name(layer.attention_norm_b, f_ggml_ctx, ggml_get_name(layer.attention_norm_b));
  694. copy_tensor_by_name(layer.wq_a, f_ggml_ctx, ggml_get_name(layer.wq_a));
  695. copy_tensor_by_name(layer.wq_b, f_ggml_ctx, ggml_get_name(layer.wq_b));
  696. copy_tensor_by_name(layer.wk_a, f_ggml_ctx, ggml_get_name(layer.wk_a));
  697. copy_tensor_by_name(layer.wk_b, f_ggml_ctx, ggml_get_name(layer.wk_b));
  698. copy_tensor_by_name(layer.wv_a, f_ggml_ctx, ggml_get_name(layer.wv_a));
  699. copy_tensor_by_name(layer.wv_b, f_ggml_ctx, ggml_get_name(layer.wv_b));
  700. copy_tensor_by_name(layer.wo_a, f_ggml_ctx, ggml_get_name(layer.wo_a));
  701. copy_tensor_by_name(layer.wo_b, f_ggml_ctx, ggml_get_name(layer.wo_b));
  702. copy_tensor_by_name(layer.ffn_norm_a, f_ggml_ctx, ggml_get_name(layer.ffn_norm_a));
  703. copy_tensor_by_name(layer.ffn_norm_b, f_ggml_ctx, ggml_get_name(layer.ffn_norm_b));
  704. copy_tensor_by_name(layer.ffn_gate_a, f_ggml_ctx, ggml_get_name(layer.ffn_gate_a));
  705. copy_tensor_by_name(layer.ffn_gate_b, f_ggml_ctx, ggml_get_name(layer.ffn_gate_b));
  706. copy_tensor_by_name(layer.ffn_down_a, f_ggml_ctx, ggml_get_name(layer.ffn_down_a));
  707. copy_tensor_by_name(layer.ffn_down_b, f_ggml_ctx, ggml_get_name(layer.ffn_down_b));
  708. copy_tensor_by_name(layer.ffn_up_a, f_ggml_ctx, ggml_get_name(layer.ffn_up_a));
  709. copy_tensor_by_name(layer.ffn_up_b, f_ggml_ctx, ggml_get_name(layer.ffn_up_b));
  710. }
  711. }
  712. static void save_llama_lora_gguf(struct gguf_context * fctx, struct my_llama_model * model, struct my_llama_lora * lora) {
  713. const char * arch = "llama";
  714. enum llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
  715. std::vector<char> keybuf;
  716. keybuf.resize(512);
  717. auto kv = [arch, &keybuf](const char * key) -> const char * {
  718. snprintf(keybuf.data(), keybuf.size(), key, arch);
  719. return keybuf.data();
  720. };
  721. gguf_set_val_str(fctx, LLM_KV_GENERAL_ARCHITECTURE, arch);
  722. gguf_set_val_u32(fctx, LLM_KV_GENERAL_FILE_TYPE, ftype);
  723. gguf_set_val_u32(fctx, kv(LLM_KV_CONTEXT_LENGTH), model->hparams.n_ctx);
  724. gguf_set_val_u32(fctx, kv(LLM_KV_EMBEDDING_LENGTH), model->hparams.n_embd);
  725. gguf_set_val_u32(fctx, kv(LLM_KV_FEED_FORWARD_LENGTH), model->hparams.n_ff);
  726. gguf_set_val_u32(fctx, kv(LLM_KV_ATTENTION_HEAD_COUNT), model->hparams.n_head);
  727. gguf_set_val_u32(fctx, kv(LLM_KV_ATTENTION_HEAD_COUNT_KV), model->hparams.n_head_kv);
  728. gguf_set_val_u32(fctx, kv(LLM_KV_BLOCK_COUNT), model->hparams.n_layer);
  729. gguf_set_val_u32(fctx, kv(LLM_KV_ROPE_DIMENSION_COUNT), model->hparams.n_embd_head());
  730. gguf_set_val_f32(fctx, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS), model->hparams.f_norm_rms_eps);
  731. gguf_set_val_f32(fctx, kv(LLM_KV_ROPE_FREQ_BASE), model->hparams.rope_freq_base);
  732. gguf_set_val_f32(fctx, kv(LLM_KV_ROPE_SCALE_LINEAR), model->hparams.rope_freq_scale);
  733. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_TOKEN_EMBD, lora->hparams.n_rank_tok_embeddings);
  734. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_OUTPUT_NORM, lora->hparams.n_rank_norm);
  735. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_OUTPUT, lora->hparams.n_rank_output);
  736. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_NORM, lora->hparams.n_rank_attention_norm);
  737. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_Q, lora->hparams.n_rank_wq);
  738. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_K, lora->hparams.n_rank_wk);
  739. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_V, lora->hparams.n_rank_wv);
  740. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_OUT, lora->hparams.n_rank_wo);
  741. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_NORM, lora->hparams.n_rank_ffn_norm);
  742. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_GATE, lora->hparams.n_rank_ffn_gate);
  743. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN, lora->hparams.n_rank_ffn_down);
  744. gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_UP, lora->hparams.n_rank_ffn_up);
  745. gguf_add_tensor(fctx, lora->tok_embeddings_a);
  746. gguf_add_tensor(fctx, lora->tok_embeddings_b);
  747. gguf_add_tensor(fctx, lora->norm_a);
  748. gguf_add_tensor(fctx, lora->norm_b);
  749. gguf_add_tensor(fctx, lora->output_a);
  750. gguf_add_tensor(fctx, lora->output_b);
  751. for (uint32_t i = 0; i < lora->layers.size(); ++i) {
  752. auto & layer = lora->layers[i];
  753. gguf_add_tensor(fctx, layer.attention_norm_a);
  754. gguf_add_tensor(fctx, layer.attention_norm_b);
  755. gguf_add_tensor(fctx, layer.wq_a);
  756. gguf_add_tensor(fctx, layer.wq_b);
  757. gguf_add_tensor(fctx, layer.wk_a);
  758. gguf_add_tensor(fctx, layer.wk_b);
  759. gguf_add_tensor(fctx, layer.wv_a);
  760. gguf_add_tensor(fctx, layer.wv_b);
  761. gguf_add_tensor(fctx, layer.wo_a);
  762. gguf_add_tensor(fctx, layer.wo_b);
  763. gguf_add_tensor(fctx, layer.ffn_norm_a);
  764. gguf_add_tensor(fctx, layer.ffn_norm_b);
  765. gguf_add_tensor(fctx, layer.ffn_gate_a);
  766. gguf_add_tensor(fctx, layer.ffn_gate_b);
  767. gguf_add_tensor(fctx, layer.ffn_down_a);
  768. gguf_add_tensor(fctx, layer.ffn_down_b);
  769. gguf_add_tensor(fctx, layer.ffn_up_a);
  770. gguf_add_tensor(fctx, layer.ffn_up_b);
  771. }
  772. }
  773. static void load_checkpoint_lora_gguf(struct gguf_context * fctx, struct ggml_context * f_ggml_ctx, struct my_llama_model * model, struct my_llama_lora * lora, struct train_state * train) {
  774. std::string train_type = LLM_KV_TRAINING_TYPE_FINETUNE_LORA;
  775. GGUF_GET_KEY(fctx, train_type, gguf_get_val_str, GGUF_TYPE_STRING, false, LLM_KV_TRAINING_TYPE);
  776. GGML_ASSERT(train_type == LLM_KV_TRAINING_TYPE_FINETUNE_LORA);
  777. load_train_state_gguf(fctx, f_ggml_ctx, train);
  778. load_llama_lora_gguf(fctx, f_ggml_ctx, model, lora);
  779. }
  780. static void save_checkpoint_lora_gguf(struct gguf_context * fctx, struct my_llama_model * model, struct my_llama_lora * lora, struct train_state * train) {
  781. gguf_set_val_str(fctx, LLM_KV_TRAINING_TYPE, LLM_KV_TRAINING_TYPE_FINETUNE_LORA);
  782. save_llama_lora_gguf(fctx, model, lora);
  783. save_train_state_gguf(fctx, train);
  784. }
  785. static bool load_checkpoint_lora_file(const char * filename, struct my_llama_model * model, struct my_llama_lora * lora, struct train_state * train) {
  786. struct ggml_context * f_ggml_ctx;
  787. struct gguf_init_params params;
  788. params.no_alloc = false;
  789. params.ctx = &f_ggml_ctx;
  790. struct gguf_context * fctx = gguf_init_from_file(filename, params);
  791. if (fctx == NULL) {
  792. return false;
  793. }
  794. load_checkpoint_lora_gguf(fctx, f_ggml_ctx, model, lora, train);
  795. gguf_free(fctx);
  796. return true;
  797. }
  798. static void save_checkpoint_lora_file(const char * filename, struct my_llama_model * model, struct my_llama_lora * lora, struct train_state * train) {
  799. printf("%s: saving to %s\n", __func__, filename);
  800. struct gguf_context * fctx = gguf_init_empty();
  801. save_checkpoint_lora_gguf(fctx, model, lora, train);
  802. // write file
  803. const bool only_meta = false;
  804. gguf_write_to_file(fctx, filename, only_meta);
  805. gguf_free(fctx);
  806. }
  807. struct llama_file {
  808. // use FILE * so we don't have to re-open the file to mmap
  809. FILE * fp;
  810. size_t size;
  811. llama_file(const char * fname, const char * mode) {
  812. fp = std::fopen(fname, mode);
  813. if (fp == NULL) {
  814. size = 0;
  815. } else {
  816. seek(0, SEEK_END);
  817. size = tell();
  818. seek(0, SEEK_SET);
  819. }
  820. }
  821. size_t tell() const {
  822. #ifdef _WIN32
  823. __int64 ret = _ftelli64(fp);
  824. #else
  825. long ret = std::ftell(fp);
  826. #endif
  827. GGML_ASSERT(ret != -1); // this really shouldn't fail
  828. return (size_t) ret;
  829. }
  830. void seek(size_t offset, int whence) {
  831. #ifdef _WIN32
  832. int ret = _fseeki64(fp, (__int64) offset, whence);
  833. #else
  834. int ret = std::fseek(fp, (long) offset, whence);
  835. #endif
  836. GGML_ASSERT(ret == 0); // same
  837. }
  838. void read_raw(void * ptr, size_t size) {
  839. if (size == 0) {
  840. return;
  841. }
  842. errno = 0;
  843. std::size_t ret = std::fread(ptr, size, 1, fp);
  844. if (ferror(fp)) {
  845. die_fmt("read error: %s", strerror(errno));
  846. }
  847. if (ret != 1) {
  848. die("unexpectedly reached end of file");
  849. }
  850. }
  851. std::uint32_t read_u32() {
  852. std::uint32_t ret;
  853. read_raw(&ret, sizeof(ret));
  854. return ret;
  855. }
  856. std::string read_string(std::uint32_t len) {
  857. std::vector<char> chars(len);
  858. read_raw(chars.data(), len);
  859. return std::string(chars.data(), len);
  860. }
  861. void write_raw(const void * ptr, size_t size) {
  862. if (size == 0) {
  863. return;
  864. }
  865. errno = 0;
  866. size_t ret = std::fwrite(ptr, size, 1, fp);
  867. if (ret != 1) {
  868. die_fmt("write error: %s", strerror(errno));
  869. }
  870. }
  871. void write_u32(std::uint32_t val) {
  872. write_raw(&val, sizeof(val));
  873. }
  874. ~llama_file() {
  875. if (fp) {
  876. std::fclose(fp);
  877. }
  878. }
  879. };
  880. static void write_tensor(struct llama_file * file, struct ggml_tensor * tensor, const char * name) {
  881. if (tensor == NULL) {
  882. file->write_u32(0);
  883. file->write_u32(0);
  884. file->write_u32(GGML_TYPE_F32);
  885. file->seek((0-file->tell()) & 31, SEEK_CUR);
  886. return;
  887. }
  888. if (name == NULL) {
  889. name = ggml_get_name(tensor);
  890. }
  891. uint32_t name_len = strlen(name);
  892. uint32_t nd = ggml_n_dims(tensor);
  893. uint32_t ne[4] = { (uint32_t)tensor->ne[0],
  894. (uint32_t)tensor->ne[1],
  895. (uint32_t)tensor->ne[2],
  896. (uint32_t)tensor->ne[3] };
  897. file->write_u32(nd);
  898. file->write_u32(name_len);
  899. file->write_u32(tensor->type);
  900. file->write_raw(ne, sizeof(ne[0]) * nd);
  901. file->write_raw(name, name_len);
  902. file->seek((0-file->tell()) & 31, SEEK_CUR);
  903. file->write_raw(tensor->data, ggml_nbytes(tensor));
  904. }
  905. static void save_as_llama_lora(const char * filename, struct my_llama_lora * lora) {
  906. printf("%s: saving to %s\n", __func__, filename);
  907. struct llama_file file(filename, "wb");
  908. if (file.fp == NULL) {
  909. return;
  910. }
  911. std::vector<char> tn_buf;
  912. tn_buf.resize(GGML_MAX_NAME);
  913. auto tn = [&tn_buf](const char * key, const char * suffix) -> const char * {
  914. snprintf(tn_buf.data(), tn_buf.size(), "%s%s", key, suffix);
  915. return tn_buf.data();
  916. };
  917. auto tni = [&tn_buf](const char * key, int bid, const char * suffix) -> const char * {
  918. snprintf(tn_buf.data(), tn_buf.size(), key, bid);
  919. std::string s = tn_buf.data();
  920. snprintf(tn_buf.data(), tn_buf.size(), "%s%s", s.c_str(), suffix);
  921. return tn_buf.data();
  922. };
  923. // write_magic
  924. file.write_u32(LLAMA_FILE_MAGIC_GGLA); // magic
  925. file.write_u32(1); // version
  926. // write_hparams
  927. file.write_u32(lora->hparams.lora_r);
  928. file.write_u32(lora->hparams.lora_alpha);
  929. // write tensors
  930. write_tensor(&file, lora->tok_embeddings_a, tn(LLM_TENSOR_TOKEN_EMBD, ".weight.loraA"));
  931. write_tensor(&file, lora->tok_embeddings_b, tn(LLM_TENSOR_TOKEN_EMBD, ".weight.loraB"));
  932. write_tensor(&file, lora->norm_a, tn(LLM_TENSOR_OUTPUT_NORM, ".weight.loraA"));
  933. write_tensor(&file, lora->norm_b, tn(LLM_TENSOR_OUTPUT_NORM, ".weight.loraB"));
  934. write_tensor(&file, lora->output_a, tn(LLM_TENSOR_OUTPUT, ".weight.loraA"));
  935. write_tensor(&file, lora->output_b, tn(LLM_TENSOR_OUTPUT, ".weight.loraB"));
  936. for (uint32_t i = 0; i < lora->layers.size(); ++i) {
  937. auto & layer = lora->layers[i];
  938. write_tensor(&file, layer.attention_norm_a, tni(LLM_TENSOR_ATTN_NORM, i, ".weight.loraA"));
  939. write_tensor(&file, layer.attention_norm_b, tni(LLM_TENSOR_ATTN_NORM, i, ".weight.loraB"));
  940. write_tensor(&file, layer.wq_a, tni(LLM_TENSOR_ATTN_Q, i, ".weight.loraA"));
  941. write_tensor(&file, layer.wq_b, tni(LLM_TENSOR_ATTN_Q, i, ".weight.loraB"));
  942. write_tensor(&file, layer.wk_a, tni(LLM_TENSOR_ATTN_K, i, ".weight.loraA"));
  943. write_tensor(&file, layer.wk_b, tni(LLM_TENSOR_ATTN_K, i, ".weight.loraB"));
  944. write_tensor(&file, layer.wv_a, tni(LLM_TENSOR_ATTN_V, i, ".weight.loraA"));
  945. write_tensor(&file, layer.wv_b, tni(LLM_TENSOR_ATTN_V, i, ".weight.loraB"));
  946. write_tensor(&file, layer.wo_a, tni(LLM_TENSOR_ATTN_OUT, i, ".weight.loraA"));
  947. write_tensor(&file, layer.wo_b, tni(LLM_TENSOR_ATTN_OUT, i, ".weight.loraB"));
  948. write_tensor(&file, layer.ffn_norm_a, tni(LLM_TENSOR_FFN_NORM, i, ".weight.loraA"));
  949. write_tensor(&file, layer.ffn_norm_b, tni(LLM_TENSOR_FFN_NORM, i, ".weight.loraB"));
  950. write_tensor(&file, layer.ffn_gate_a, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraA"));
  951. write_tensor(&file, layer.ffn_gate_b, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraB"));
  952. write_tensor(&file, layer.ffn_down_a, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraA"));
  953. write_tensor(&file, layer.ffn_down_b, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraB"));
  954. write_tensor(&file, layer.ffn_up_a, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraA"));
  955. write_tensor(&file, layer.ffn_up_b, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraB"));
  956. }
  957. }
  958. struct train_params {
  959. struct train_params_common common;
  960. const char * fn_model_base;
  961. const char * fn_lora_out;
  962. bool only_write_lora;
  963. float f_norm_rms_eps;
  964. float rope_freq_base;
  965. float rope_freq_scale;
  966. bool custom_f_norm_rms_eps;
  967. bool custom_rope_freq_base;
  968. bool custom_rope_freq_scale;
  969. int32_t lora_r;
  970. int32_t lora_alpha;
  971. bool custom_lora_alpha;
  972. uint32_t n_rank_attention_norm;
  973. uint32_t n_rank_wq;
  974. uint32_t n_rank_wk;
  975. uint32_t n_rank_wv;
  976. uint32_t n_rank_wo;
  977. uint32_t n_rank_ffn_norm;
  978. uint32_t n_rank_ffn_gate;
  979. uint32_t n_rank_ffn_down;
  980. uint32_t n_rank_ffn_up;
  981. uint32_t n_rank_tok_embeddings;
  982. uint32_t n_rank_norm;
  983. uint32_t n_rank_output;
  984. bool custom_n_rank_attention_norm;
  985. bool custom_n_rank_wq;
  986. bool custom_n_rank_wk;
  987. bool custom_n_rank_wv;
  988. bool custom_n_rank_wo;
  989. bool custom_n_rank_ffn_norm;
  990. bool custom_n_rank_ffn_gate;
  991. bool custom_n_rank_ffn_down;
  992. bool custom_n_rank_ffn_up;
  993. bool custom_n_rank_tok_embeddings;
  994. bool custom_n_rank_norm;
  995. bool custom_n_rank_output;
  996. };
  997. static struct train_params get_default_train_params() {
  998. struct train_params params;
  999. params.common = get_default_train_params_common();
  1000. params.fn_model_base = "";
  1001. params.fn_lora_out = "ggml-lora-ITERATION-f32.gguf";
  1002. params.only_write_lora = false;
  1003. params.f_norm_rms_eps = 1e-5f;
  1004. params.rope_freq_base = 10000.0f;
  1005. params.rope_freq_scale = 1.0f;
  1006. params.custom_f_norm_rms_eps = false;
  1007. params.custom_rope_freq_base = false;
  1008. params.custom_rope_freq_scale = false;
  1009. params.lora_r = 4;
  1010. params.lora_alpha = 4;
  1011. params.custom_lora_alpha = false;
  1012. params.n_rank_attention_norm = 1;
  1013. params.n_rank_wq = 4;
  1014. params.n_rank_wk = 4;
  1015. params.n_rank_wv = 4;
  1016. params.n_rank_wo = 4;
  1017. params.n_rank_ffn_norm = 1;
  1018. params.n_rank_ffn_gate = 4;
  1019. params.n_rank_ffn_down = 4;
  1020. params.n_rank_ffn_up = 4;
  1021. params.n_rank_tok_embeddings = 4;
  1022. params.n_rank_norm = 1;
  1023. params.n_rank_output = 4;
  1024. params.custom_n_rank_attention_norm = false;
  1025. params.custom_n_rank_wq = false;
  1026. params.custom_n_rank_wk = false;
  1027. params.custom_n_rank_wv = false;
  1028. params.custom_n_rank_wo = false;
  1029. params.custom_n_rank_ffn_norm = false;
  1030. params.custom_n_rank_ffn_gate = false;
  1031. params.custom_n_rank_ffn_down = false;
  1032. params.custom_n_rank_ffn_up = false;
  1033. params.custom_n_rank_tok_embeddings = false;
  1034. params.custom_n_rank_norm = false;
  1035. params.custom_n_rank_output = false;
  1036. return params;
  1037. }
  1038. static void train_print_usage(int argc, char ** argv, const struct train_params * params) {
  1039. fprintf(stderr, "usage: %s [options]\n", argv[0]);
  1040. fprintf(stderr, "\n");
  1041. fprintf(stderr, "options:\n");
  1042. fprintf(stderr, " -h, --help show this help message and exit\n");
  1043. fprintf(stderr, " --model-base FNAME model path from which to load base model (default '%s')\n", params->fn_model_base);
  1044. fprintf(stderr, " --lora-out FNAME path to save llama lora (default '%s')\n", params->fn_lora_out);
  1045. fprintf(stderr, " --only-write-lora only save llama lora, don't do any training. use this if you only want to convert a checkpoint to a lora adapter.\n");
  1046. fprintf(stderr, " --norm-rms-eps F RMS-Norm epsilon value (default %f)\n", params->f_norm_rms_eps);
  1047. fprintf(stderr, " --rope-freq-base F Frequency base for ROPE (default %f)\n", params->rope_freq_base);
  1048. fprintf(stderr, " --rope-freq-scale F Frequency scale for ROPE (default %f)\n", params->rope_freq_scale);
  1049. fprintf(stderr, " --lora-alpha N LORA alpha : resulting LORA scaling is alpha/r. (default %d)\n", params->lora_alpha);
  1050. fprintf(stderr, " --lora-r N LORA r: default rank. Also specifies resulting scaling together with lora-alpha. (default %d)\n", params->lora_r);
  1051. fprintf(stderr, " --rank-att-norm N LORA rank for attention norm tensor, overrides default rank. Norm tensors should generally have rank 1.\n");
  1052. fprintf(stderr, " --rank-ffn-norm N LORA rank for feed-forward norm tensor, overrides default rank. Norm tensors should generally have rank 1.\n");
  1053. fprintf(stderr, " --rank-out-norm N LORA rank for output norm tensor, overrides default rank. Norm tensors should generally have rank 1.\n");
  1054. fprintf(stderr, " --rank-tok-embd N LORA rank for token embeddings tensor, overrides default rank.\n");
  1055. fprintf(stderr, " --rank-out N LORA rank for output tensor, overrides default rank.\n");
  1056. fprintf(stderr, " --rank-wq N LORA rank for wq tensor, overrides default rank.\n");
  1057. fprintf(stderr, " --rank-wk N LORA rank for wk tensor, overrides default rank.\n");
  1058. fprintf(stderr, " --rank-wv N LORA rank for wv tensor, overrides default rank.\n");
  1059. fprintf(stderr, " --rank-wo N LORA rank for wo tensor, overrides default rank.\n");
  1060. fprintf(stderr, " --rank-ffn_gate N LORA rank for ffn_gate tensor, overrides default rank.\n");
  1061. fprintf(stderr, " --rank-ffn_down N LORA rank for ffn_down tensor, overrides default rank.\n");
  1062. fprintf(stderr, " --rank-ffn_up N LORA rank for ffn_up tensor, overrides default rank.\n");
  1063. print_common_train_usage(argc, argv, &params->common);
  1064. }
  1065. static bool train_params_parse(int argc, char ** argv, struct train_params * params) {
  1066. bool invalid_param = false;
  1067. std::string arg;
  1068. struct train_params default_params = get_default_train_params();
  1069. const std::string arg_prefix = "--";
  1070. for (int i = 1; i < argc; i++) {
  1071. arg = argv[i];
  1072. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  1073. std::replace(arg.begin(), arg.end(), '_', '-');
  1074. }
  1075. if (consume_common_train_arg(argc, argv, &i, &params->common, &invalid_param)) {
  1076. if (invalid_param) {
  1077. break;
  1078. } else if (params->common.print_usage) {
  1079. train_print_usage(argc, argv, &default_params);
  1080. exit(0);
  1081. }
  1082. } else if (arg == "--model-base") {
  1083. if (++i >= argc) {
  1084. invalid_param = true;
  1085. break;
  1086. }
  1087. params->fn_model_base = argv[i];
  1088. } else if (arg == "--lora-out") {
  1089. if (++i >= argc) {
  1090. invalid_param = true;
  1091. break;
  1092. }
  1093. params->fn_lora_out = argv[i];
  1094. } else if (arg == "--only-write-lora") {
  1095. params->only_write_lora = true;
  1096. } else if (arg == "--norm-rms-eps") {
  1097. if (++i >= argc) {
  1098. invalid_param = true;
  1099. break;
  1100. }
  1101. params->f_norm_rms_eps = std::stof(argv[i]);
  1102. params->custom_f_norm_rms_eps = true;
  1103. } else if (arg == "--rope-freq-base") {
  1104. if (++i >= argc) {
  1105. invalid_param = true;
  1106. break;
  1107. }
  1108. params->rope_freq_base = std::stof(argv[i]);
  1109. params->custom_rope_freq_base = true;
  1110. } else if (arg == "--rope-freq-scale") {
  1111. if (++i >= argc) {
  1112. invalid_param = true;
  1113. break;
  1114. }
  1115. params->rope_freq_scale = std::stof(argv[i]);
  1116. params->custom_rope_freq_scale = true;
  1117. } else if (arg == "--lora-alpha") {
  1118. if (++i >= argc) {
  1119. invalid_param = true;
  1120. break;
  1121. }
  1122. params->lora_alpha = std::stoi(argv[i]);
  1123. params->custom_lora_alpha = true;
  1124. } else if (arg == "--lora-r") {
  1125. if (++i >= argc) {
  1126. invalid_param = true;
  1127. break;
  1128. }
  1129. params->lora_r = std::stoi(argv[i]);
  1130. } else if (arg == "--rank-att-norm") {
  1131. if (++i >= argc) {
  1132. invalid_param = true;
  1133. break;
  1134. }
  1135. params->n_rank_attention_norm = std::stoi(argv[i]);
  1136. params->custom_n_rank_attention_norm = true;
  1137. } else if (arg == "--rank-ffn-norm") {
  1138. if (++i >= argc) {
  1139. invalid_param = true;
  1140. break;
  1141. }
  1142. params->n_rank_ffn_norm = std::stoi(argv[i]);
  1143. params->custom_n_rank_ffn_norm = true;
  1144. } else if (arg == "--rank-out-norm") {
  1145. if (++i >= argc) {
  1146. invalid_param = true;
  1147. break;
  1148. }
  1149. params->n_rank_norm = std::stoi(argv[i]);
  1150. params->custom_n_rank_norm = true;
  1151. } else if (arg == "--rank-tok-embd") {
  1152. if (++i >= argc) {
  1153. invalid_param = true;
  1154. break;
  1155. }
  1156. params->n_rank_tok_embeddings = std::stoi(argv[i]);
  1157. params->custom_n_rank_tok_embeddings = true;
  1158. } else if (arg == "--rank-out") {
  1159. if (++i >= argc) {
  1160. invalid_param = true;
  1161. break;
  1162. }
  1163. params->n_rank_output = std::stoi(argv[i]);
  1164. params->custom_n_rank_output = true;
  1165. } else if (arg == "--rank-wq") {
  1166. if (++i >= argc) {
  1167. invalid_param = true;
  1168. break;
  1169. }
  1170. params->n_rank_wq = std::stoi(argv[i]);
  1171. params->custom_n_rank_wq = true;
  1172. } else if (arg == "--rank-wk") {
  1173. if (++i >= argc) {
  1174. invalid_param = true;
  1175. break;
  1176. }
  1177. params->n_rank_wk = std::stoi(argv[i]);
  1178. params->custom_n_rank_wk = true;
  1179. } else if (arg == "--rank-wv") {
  1180. if (++i >= argc) {
  1181. invalid_param = true;
  1182. break;
  1183. }
  1184. params->n_rank_wv = std::stoi(argv[i]);
  1185. params->custom_n_rank_wv = true;
  1186. } else if (arg == "--rank-wo") {
  1187. if (++i >= argc) {
  1188. invalid_param = true;
  1189. break;
  1190. }
  1191. params->n_rank_wo = std::stoi(argv[i]);
  1192. params->custom_n_rank_wo = true;
  1193. } else if (arg == "--rank-ffn_gate") {
  1194. if (++i >= argc) {
  1195. invalid_param = true;
  1196. break;
  1197. }
  1198. params->n_rank_ffn_gate = std::stoi(argv[i]);
  1199. params->custom_n_rank_ffn_gate = true;
  1200. } else if (arg == "--rank-ffn_down") {
  1201. if (++i >= argc) {
  1202. invalid_param = true;
  1203. break;
  1204. }
  1205. params->n_rank_ffn_down = std::stoi(argv[i]);
  1206. params->custom_n_rank_ffn_down = true;
  1207. } else if (arg == "--rank-ffn_up") {
  1208. if (++i >= argc) {
  1209. invalid_param = true;
  1210. break;
  1211. }
  1212. params->n_rank_ffn_up = std::stoi(argv[i]);
  1213. params->custom_n_rank_ffn_up = true;
  1214. } else {
  1215. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  1216. train_print_usage(argc, argv, &default_params);
  1217. exit(1);
  1218. }
  1219. }
  1220. if (invalid_param) {
  1221. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  1222. train_print_usage(argc, argv, &default_params);
  1223. exit(1);
  1224. }
  1225. finish_processing_train_args(&params->common);
  1226. return true;
  1227. }
  1228. struct save_train_files_data {
  1229. const char * fn_checkpoint_out;
  1230. const char * fn_lora_out;
  1231. const char * pattern_fn_it;
  1232. const char * fn_latest;
  1233. struct my_llama_model * model;
  1234. struct my_llama_lora * lora;
  1235. };
  1236. static void save_train_files(void * vdata, struct train_state * train) {
  1237. struct save_train_files_data * data = (struct save_train_files_data *) vdata;
  1238. int64_t iter = train->opt->iter;
  1239. if (strlen(data->fn_checkpoint_out) > 0) {
  1240. save_checkpoint_lora_file(get_train_filename(data->fn_checkpoint_out, data->pattern_fn_it, data->fn_latest, iter).c_str(), data->model, data->lora, train);
  1241. save_checkpoint_lora_file(get_train_filename(data->fn_checkpoint_out, data->pattern_fn_it, data->fn_latest, -1 ).c_str(), data->model, data->lora, train);
  1242. }
  1243. if (strlen(data->fn_lora_out) > 0) {
  1244. save_as_llama_lora(get_train_filename(data->fn_lora_out, data->pattern_fn_it, data->fn_latest, iter).c_str(), data->lora);
  1245. save_as_llama_lora(get_train_filename(data->fn_lora_out, data->pattern_fn_it, data->fn_latest, -1 ).c_str(), data->lora);
  1246. }
  1247. }
  1248. static int64_t get_parameter_count(struct my_llama_lora* lora) {
  1249. int64_t nx = 0;
  1250. nx += ggml_nelements(lora->tok_embeddings_a);
  1251. nx += ggml_nelements(lora->tok_embeddings_b);
  1252. nx += ggml_nelements(lora->norm_a);
  1253. nx += ggml_nelements(lora->norm_b);
  1254. nx += ggml_nelements(lora->output_a);
  1255. nx += ggml_nelements(lora->output_b);
  1256. for (uint32_t i = 0; i < lora->layers.size(); ++i) {
  1257. auto & layer = lora->layers[i];
  1258. nx += ggml_nelements(layer.attention_norm_a);
  1259. nx += ggml_nelements(layer.attention_norm_b);
  1260. nx += ggml_nelements(layer.wq_a);
  1261. nx += ggml_nelements(layer.wq_b);
  1262. nx += ggml_nelements(layer.wk_a);
  1263. nx += ggml_nelements(layer.wk_b);
  1264. nx += ggml_nelements(layer.wv_a);
  1265. nx += ggml_nelements(layer.wv_b);
  1266. nx += ggml_nelements(layer.wo_a);
  1267. nx += ggml_nelements(layer.wo_b);
  1268. nx += ggml_nelements(layer.ffn_norm_a);
  1269. nx += ggml_nelements(layer.ffn_norm_b);
  1270. nx += ggml_nelements(layer.ffn_gate_a);
  1271. nx += ggml_nelements(layer.ffn_gate_b);
  1272. nx += ggml_nelements(layer.ffn_down_a);
  1273. nx += ggml_nelements(layer.ffn_down_b);
  1274. nx += ggml_nelements(layer.ffn_up_a);
  1275. nx += ggml_nelements(layer.ffn_up_b);
  1276. }
  1277. return nx;
  1278. }
  1279. int main(int argc, char ** argv) {
  1280. struct train_params params = get_default_train_params();
  1281. if (!train_params_parse(argc, argv, &params)) {
  1282. return 1;
  1283. }
  1284. if (params.common.seed == LLAMA_DEFAULT_SEED) {
  1285. params.common.seed = time(NULL);
  1286. }
  1287. printf("%s: seed: %u\n", __func__, params.common.seed);
  1288. srand(params.common.seed);
  1289. struct llama_model_params llama_mparams = llama_model_default_params();
  1290. llama_mparams.n_gpu_layers = params.common.n_gpu_layers;
  1291. llama_mparams.vocab_only = false;
  1292. printf("%s: model base = '%s'\n", __func__, params.fn_model_base);
  1293. struct llama_model * lmodel = llama_load_model_from_file(params.fn_model_base, llama_mparams);
  1294. struct llama_context_params llama_cparams = llama_context_default_params();
  1295. struct llama_context * lctx = llama_new_context_with_model(lmodel, llama_cparams);
  1296. struct my_llama_model model;
  1297. init_model(lmodel, &model, params.fn_model_base, params.common.n_ctx);
  1298. struct my_llama_lora lora;
  1299. struct train_state * train = init_train_state();
  1300. struct ggml_opt_context * opt = train->opt;
  1301. // set params from command line
  1302. if (params.custom_f_norm_rms_eps) {
  1303. model.hparams.f_norm_rms_eps = params.f_norm_rms_eps;
  1304. }
  1305. if (params.custom_rope_freq_base) {
  1306. model.hparams.rope_freq_base = params.rope_freq_base;
  1307. }
  1308. if (params.custom_rope_freq_scale) {
  1309. model.hparams.rope_freq_scale = params.rope_freq_scale;
  1310. }
  1311. lora.hparams.lora_r = params.lora_r;
  1312. lora.hparams.lora_alpha = params.custom_lora_alpha ? params.lora_alpha : params.lora_r;
  1313. uint32_t n_rank_attention_norm = params.custom_n_rank_attention_norm ? params.n_rank_attention_norm : 1;
  1314. uint32_t n_rank_wq = params.custom_n_rank_wq ? params.n_rank_wq : params.lora_r;
  1315. uint32_t n_rank_wk = params.custom_n_rank_wk ? params.n_rank_wk : params.lora_r;
  1316. uint32_t n_rank_wv = params.custom_n_rank_wv ? params.n_rank_wv : params.lora_r;
  1317. uint32_t n_rank_wo = params.custom_n_rank_wo ? params.n_rank_wo : params.lora_r;
  1318. uint32_t n_rank_ffn_norm = params.custom_n_rank_ffn_norm ? params.n_rank_ffn_norm : 1;
  1319. uint32_t n_rank_ffn_gate = params.custom_n_rank_ffn_gate ? params.n_rank_ffn_gate : params.lora_r;
  1320. uint32_t n_rank_ffn_down = params.custom_n_rank_ffn_down ? params.n_rank_ffn_down : params.lora_r;
  1321. uint32_t n_rank_ffn_up = params.custom_n_rank_ffn_up ? params.n_rank_ffn_up : params.lora_r;
  1322. uint32_t n_rank_tok_embeddings = params.custom_n_rank_tok_embeddings ? params.n_rank_tok_embeddings : params.lora_r;
  1323. uint32_t n_rank_norm = params.custom_n_rank_norm ? params.n_rank_norm : 1;
  1324. uint32_t n_rank_output = params.custom_n_rank_output ? params.n_rank_output : params.lora_r;
  1325. lora.hparams.n_rank_attention_norm = n_rank_attention_norm;
  1326. lora.hparams.n_rank_wq = n_rank_wq;
  1327. lora.hparams.n_rank_wk = n_rank_wk;
  1328. lora.hparams.n_rank_wv = n_rank_wv;
  1329. lora.hparams.n_rank_wo = n_rank_wo;
  1330. lora.hparams.n_rank_ffn_norm = n_rank_ffn_norm;
  1331. lora.hparams.n_rank_ffn_gate = n_rank_ffn_gate;
  1332. lora.hparams.n_rank_ffn_down = n_rank_ffn_down;
  1333. lora.hparams.n_rank_ffn_up = n_rank_ffn_up;
  1334. lora.hparams.n_rank_tok_embeddings = n_rank_tok_embeddings;
  1335. lora.hparams.n_rank_norm = n_rank_norm;
  1336. lora.hparams.n_rank_output = n_rank_output;
  1337. // set opt params from command line
  1338. opt->params = ggml_opt_default_params(GGML_OPT_TYPE_ADAM);
  1339. opt->params.print_forward_graph = false;
  1340. opt->params.print_backward_graph = false;
  1341. opt->params.graph_size = LLAMA_TRAIN_MAX_NODES;
  1342. opt->params.n_threads = params.common.n_threads;
  1343. opt->params.past = params.common.opt_past;
  1344. opt->params.delta = params.common.opt_delta;
  1345. opt->params.max_no_improvement = params.common.opt_max_no_improvement;
  1346. opt->params.n_gradient_accumulation = params.common.n_gradient_accumulation;
  1347. opt->params.adam.n_iter = params.common.adam_n_iter;
  1348. opt->params.adam.sched = 1.0f;
  1349. opt->params.adam.alpha = params.common.adam_alpha;
  1350. opt->params.adam.decay = params.common.adam_decay;
  1351. opt->params.adam.decay_min_ndim = params.common.adam_decay_min_ndim;
  1352. opt->params.adam.beta1 = params.common.adam_beta1;
  1353. opt->params.adam.beta2 = params.common.adam_beta2;
  1354. opt->params.adam.gclip = params.common.adam_gclip;
  1355. opt->params.adam.eps_f = params.common.adam_eps_f;
  1356. printf("%s: init model\n", __func__);
  1357. bool existed = load_checkpoint_lora_file(params.common.fn_checkpoint_in, &model, &lora, train);
  1358. if (existed) {
  1359. // overwrite last n_ctx with user provided n_ctx
  1360. if (params.common.custom_n_ctx) {
  1361. model.hparams.n_ctx = params.common.n_ctx;
  1362. }
  1363. const bool opt_param_count_changed = (
  1364. (lora.hparams.n_rank_attention_norm != n_rank_attention_norm)
  1365. || (lora.hparams.n_rank_wq != n_rank_wq)
  1366. || (lora.hparams.n_rank_wk != n_rank_wk)
  1367. || (lora.hparams.n_rank_wv != n_rank_wv)
  1368. || (lora.hparams.n_rank_wo != n_rank_wo)
  1369. || (lora.hparams.n_rank_ffn_norm != n_rank_ffn_norm)
  1370. || (lora.hparams.n_rank_ffn_gate != n_rank_ffn_gate)
  1371. || (lora.hparams.n_rank_ffn_down != n_rank_ffn_down)
  1372. || (lora.hparams.n_rank_ffn_up != n_rank_ffn_up)
  1373. || (lora.hparams.n_rank_tok_embeddings != n_rank_tok_embeddings)
  1374. || (lora.hparams.n_rank_norm != n_rank_norm)
  1375. || (lora.hparams.n_rank_output != n_rank_output)
  1376. );
  1377. const bool opt_past_changed = opt->params.past != params.common.opt_past;
  1378. if (opt_param_count_changed) {
  1379. print_lora_params(&lora.hparams);
  1380. die("Provided rank differs from checkpoint file. To use different rank start finetune from scratch with empty input checkpoint, e.g --checkpoint-in ''. Aborting.");
  1381. // need to discard previous optimizer gradient statistics and opt_init with new shapes
  1382. // TODO
  1383. }
  1384. if (opt_past_changed) {
  1385. die("Optimizer parameter '--opt-past N' differs from checkpoint file. To use different value finetune from scratch with empty input checkpoint, e.g --checkpoint-in ''. Aborting");
  1386. // need to discard previous optimizer past function value statistics and opt_init with new shapes
  1387. // TODO
  1388. }
  1389. } else { // existed == false
  1390. init_lora(&model, &lora);
  1391. randomize_lora(&lora, params.common.seed, 0.0f, 1.0f, -1.0f, +1.0f);
  1392. if (!params.only_write_lora) {
  1393. ggml_opt_init(opt->ctx, opt, opt->params, get_parameter_count(&lora));
  1394. }
  1395. }
  1396. opt->iter = train->train_its;
  1397. print_params(&model.hparams);
  1398. print_lora_params(&lora.hparams);
  1399. printf("%s: total train_iterations %llu\n", __func__, (long long unsigned) train->train_its);
  1400. printf("%s: seen train_samples %llu\n", __func__, (long long unsigned) train->train_samples);
  1401. printf("%s: seen train_tokens %llu\n", __func__, (long long unsigned) train->train_tokens);
  1402. printf("%s: completed train_epochs %llu\n", __func__, (long long unsigned) train->train_epochs);
  1403. printf("%s: lora_size = %zu bytes (%.1f MB)\n", __func__, (ggml_used_mem(lora.ctx) + ggml_backend_buffer_get_size(lora.data)), (float) (ggml_used_mem(lora.ctx) + ggml_backend_buffer_get_size(lora.data)) / (1024.0f*1024.0f));
  1404. if (params.only_write_lora) {
  1405. save_train_files_data save_data;
  1406. save_data.fn_checkpoint_out = "";
  1407. save_data.fn_lora_out = params.fn_lora_out;
  1408. save_data.pattern_fn_it = params.common.pattern_fn_it;
  1409. save_data.fn_latest = params.common.fn_latest;
  1410. save_data.model = &model;
  1411. save_data.lora = &lora;
  1412. save_train_files(&save_data, train);
  1413. free_train_state(train);
  1414. ggml_free(lora.ctx);
  1415. llama_free(lctx);
  1416. llama_free_model(lmodel);
  1417. return 0;
  1418. }
  1419. printf("%s: opt_size = %zu bytes (%.1f MB)\n", __func__, ggml_get_mem_size(opt->ctx), (float) ggml_get_mem_size(opt->ctx) / (1024.0f*1024.0f));
  1420. printf("%s: opt iter %d\n", __func__, opt->iter);
  1421. int n_tokens = model.hparams.n_ctx;
  1422. int n_vocab = model.hparams.n_vocab;
  1423. int n_batch = params.common.n_batch;
  1424. // context for input tensors without their data
  1425. struct ggml_init_params ctx_input_params = {
  1426. ggml_tensor_overhead() * 2, // mem_size
  1427. NULL, // mem_buffer
  1428. true, // no_alloc
  1429. };
  1430. struct ggml_context * ctx_input = ggml_init(ctx_input_params);
  1431. // the input tensors
  1432. struct ggml_tensor * tokens_input = ggml_new_tensor_2d(ctx_input, GGML_TYPE_I32, n_tokens, n_batch);
  1433. struct ggml_tensor * target_probs = ggml_new_tensor_3d(ctx_input, GGML_TYPE_F32, n_vocab, n_tokens, n_batch);
  1434. // allocate input tensors
  1435. // measure required memory for input tensors
  1436. ggml_backend_buffer_t input_data = ggml_backend_alloc_ctx_tensors_from_buft(ctx_input, ggml_backend_cpu_buffer_type());
  1437. size_t max_input_size = ggml_backend_buffer_get_size(input_data);
  1438. printf("%s: input_size = %zu bytes (%.1f MB)\n", __func__, max_input_size, (float) max_input_size / (1024.0f*1024.0f));
  1439. // context for compute tensors without their data
  1440. const size_t estimated_compute_size_wo_data = (
  1441. 2*LLAMA_TRAIN_MAX_NODES*ggml_tensor_overhead() +
  1442. (params.common.use_checkpointing ? 3 : 2)*(GGML_OBJECT_SIZE+ggml_graph_overhead_custom(LLAMA_TRAIN_MAX_NODES, true))
  1443. );
  1444. struct ggml_init_params ctx_compute_params = {
  1445. estimated_compute_size_wo_data, // mem_size
  1446. NULL, // mem_buffer
  1447. true, // no_alloc
  1448. };
  1449. struct ggml_context * ctx_compute = NULL;
  1450. struct ggml_tensor * loss = NULL;
  1451. struct ggml_tensor * logits = NULL;
  1452. struct ggml_cgraph * gf = NULL;
  1453. struct ggml_cgraph * gb = NULL;
  1454. struct ggml_cgraph * gb_tmp = NULL;
  1455. // measure required memory for compute tensors
  1456. size_t best_compute_size = SIZE_MAX;
  1457. enum ggml_cgraph_eval_order best_order = GGML_CGRAPH_EVAL_ORDER_COUNT;
  1458. // find best evaluation order
  1459. for (unsigned order = 0; order < (unsigned) GGML_CGRAPH_EVAL_ORDER_COUNT; ++order) {
  1460. ctx_compute = ggml_init(ctx_compute_params);
  1461. ggml_gallocr_t alloc = ggml_gallocr_new(ggml_backend_cpu_buffer_type());
  1462. gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
  1463. gf->order = (enum ggml_cgraph_eval_order) order;
  1464. gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
  1465. gb_tmp = params.common.use_checkpointing
  1466. ? ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true)
  1467. : NULL;
  1468. loss = llama_build_lora_finetune_graphs(
  1469. &model, &lora, alloc, ctx_compute,
  1470. gf, gb, gb_tmp,
  1471. &logits, tokens_input, target_probs,
  1472. n_tokens, n_batch,
  1473. params.common.use_flash,
  1474. params.common.use_checkpointing,
  1475. true
  1476. );
  1477. size_t max_compute_size = ggml_gallocr_get_buffer_size(alloc, 0); // FIXME: this will still allocate the buffer
  1478. if (max_compute_size < best_compute_size) {
  1479. best_compute_size = max_compute_size;
  1480. best_order = gf->order;
  1481. }
  1482. ggml_gallocr_free(alloc);
  1483. ggml_free(ctx_compute);
  1484. }
  1485. size_t max_compute_size = best_compute_size;
  1486. printf("%s: compute_size = %zu bytes (%.1f MB)\n", __func__, max_compute_size, (float) max_compute_size / (1024.0f*1024.0f));
  1487. printf("%s: evaluation order = %s\n", __func__,
  1488. (best_order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? "LEFT_TO_RIGHT" :
  1489. (best_order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? "RIGHT_TO_LEFT" :
  1490. "invalid");
  1491. // allocate compute tensors
  1492. ctx_compute = ggml_init(ctx_compute_params);
  1493. ggml_gallocr_t alloc = ggml_gallocr_new(ggml_backend_cpu_buffer_type());
  1494. gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
  1495. gf->order = best_order;
  1496. gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
  1497. gb_tmp = params.common.use_checkpointing
  1498. ? ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true)
  1499. : NULL;
  1500. loss = llama_build_lora_finetune_graphs(
  1501. &model, &lora, alloc, ctx_compute,
  1502. gf, gb, gb_tmp,
  1503. &logits, tokens_input, target_probs,
  1504. n_tokens, n_batch,
  1505. params.common.use_flash,
  1506. params.common.use_checkpointing,
  1507. false
  1508. );
  1509. // tokenize data
  1510. std::vector<llama_token> train_tokens;
  1511. std::vector<size_t> train_samples_begin;
  1512. std::vector<size_t> train_samples_size;
  1513. printf("%s: tokenize training data from %s\n", __func__, params.common.fn_train_data);
  1514. printf("%s: sample-start: %s\n", __func__, params.common.sample_start.c_str());
  1515. printf("%s: include-sample-start: %s\n", __func__, params.common.include_sample_start ? "true" : "false");
  1516. tokenize_file(lctx,
  1517. params.common.fn_train_data,
  1518. params.common.sample_start,
  1519. params.common.include_sample_start,
  1520. params.common.overlapping_samples,
  1521. n_tokens,
  1522. train_tokens,
  1523. train_samples_begin,
  1524. train_samples_size);
  1525. GGML_ASSERT(train_samples_begin.size() == train_samples_size.size());
  1526. printf("%s: number of training tokens: %zu\n", __func__, train_tokens.size());
  1527. std::vector<size_t> token_noccurs;
  1528. token_noccurs.resize(model.hparams.n_vocab, 0);
  1529. for (unsigned int i = 0; i < train_tokens.size(); ++i) {
  1530. ++token_noccurs[train_tokens[i]];
  1531. }
  1532. int n_unique_tokens = 0;
  1533. for (unsigned int i = 0; i < token_noccurs.size(); ++i) {
  1534. if (token_noccurs[i] == 0) continue;
  1535. ++n_unique_tokens;
  1536. }
  1537. printf("%s: number of unique tokens: %d\n", __func__, n_unique_tokens);
  1538. size_t shuffle_samples_hash = compute_samples_hash(params.common.fn_train_data, train_samples_begin.data(), train_samples_size.data(), train_samples_size.size());
  1539. const bool changed_train_data = (shuffle_samples_hash != train->shuffle_samples_hash) || (train->shuffle_sample_count != train_samples_size.size());
  1540. if (changed_train_data) {
  1541. printf("%s: train data seems to have changed. restarting shuffled epoch.\n", __func__);
  1542. }
  1543. if (params.common.force_reshuffle) {
  1544. printf("%s: forced reshuffling of data. restarting with newly shuffled epoch.\n", __func__);
  1545. }
  1546. if ((train->shuffle_rng_state_current == "") || changed_train_data || params.common.force_reshuffle) {
  1547. train->shuffle_rng_state_current = mt19937_seed_to_state(params.common.seed);
  1548. train->shuffle_sample_count = train_samples_size.size();
  1549. train->shuffle_next_sample = 0;
  1550. train->shuffle_samples_hash = shuffle_samples_hash;
  1551. }
  1552. std::vector<size_t> train_shuffled_samples_offs;
  1553. std::vector<size_t> train_shuffled_samples_begin;
  1554. std::vector<size_t> train_shuffled_samples_size;
  1555. train_shuffled_samples_offs.resize(train_samples_begin.size());
  1556. train_shuffled_samples_begin.resize(train_samples_begin.size());
  1557. train_shuffled_samples_size.resize(train_samples_size.size());
  1558. train->shuffle_rng_state_next = shuffle_samples(
  1559. train->shuffle_rng_state_current,
  1560. train_shuffled_samples_offs.data(),
  1561. train_shuffled_samples_begin.data(),
  1562. train_shuffled_samples_size.data(),
  1563. train_samples_begin.data(),
  1564. train_samples_size.data(),
  1565. train_samples_size.size());
  1566. printf("%s: begin training\n", __func__);
  1567. save_train_files_data save_data;
  1568. save_data.fn_checkpoint_out = params.common.fn_checkpoint_out;
  1569. save_data.fn_lora_out = params.fn_lora_out;
  1570. save_data.pattern_fn_it = params.common.pattern_fn_it;
  1571. save_data.fn_latest = params.common.fn_latest;
  1572. save_data.model = &model;
  1573. save_data.lora = &lora;
  1574. struct train_opt_callback_data opt_cb_data;
  1575. opt_cb_data.params = &params.common;
  1576. opt_cb_data.train = train;
  1577. opt_cb_data.save_cb = &save_train_files;
  1578. opt_cb_data.save_data = &save_data;
  1579. opt_cb_data.lctx = lctx;
  1580. opt_cb_data.last_save_iter = opt->iter;
  1581. opt_cb_data.tokens_data = train_tokens.data();
  1582. opt_cb_data.tokens_size = train_tokens.size();
  1583. opt_cb_data.samples_begin = train_samples_begin.data();
  1584. opt_cb_data.samples_size = train_samples_size.data();
  1585. opt_cb_data.shuffled_samples_offs = train_shuffled_samples_offs.data();
  1586. opt_cb_data.shuffled_samples_begin = train_shuffled_samples_begin.data();
  1587. opt_cb_data.shuffled_samples_size = train_shuffled_samples_size.data();
  1588. opt_cb_data.samples_count = train_samples_size.size();
  1589. opt_cb_data.tokens_input = tokens_input;
  1590. opt_cb_data.target_probs = target_probs;
  1591. opt_cb_data.first_iter = opt->iter;
  1592. opt_cb_data.first_epoch = train->train_epochs;
  1593. opt_cb_data.iter_at_last_epoch = -1;
  1594. opt_cb_data.last_time = ggml_time_ms();
  1595. opt_cb_data.millis_per_iter = 0.0;
  1596. // measure required memory for work buffer
  1597. size_t max_work_size = ggml_graph_plan(gb, params.common.n_threads).work_size + GGML_OBJECT_SIZE;
  1598. printf("%s: work_size = %zu bytes (%.1f MB)\n", __func__, max_work_size, (float) max_work_size / (1024.0f*1024.0f));
  1599. // context for work buffer
  1600. struct ggml_init_params ctx_work_params = {
  1601. max_work_size, // mem_size
  1602. NULL, // mem_buffer
  1603. false, // no_alloc
  1604. };
  1605. struct ggml_context * ctx_work = ggml_init(ctx_work_params);
  1606. int64_t t0 = ggml_time_ms();
  1607. ggml_opt_resume_g(ctx_work, opt, loss, gf, gb, &train_opt_callback, (void *) &opt_cb_data);
  1608. ggml_free(ctx_work);
  1609. ggml_free(ctx_compute);
  1610. ggml_free(ctx_input);
  1611. ggml_gallocr_free(alloc);
  1612. int64_t t1 = ggml_time_ms();
  1613. printf("%s: total training time: ", __func__);
  1614. print_duration((double) (t1 - t0));
  1615. printf("\n");
  1616. int new_iters = opt->iter - opt_cb_data.last_save_iter;
  1617. if (new_iters > 0) {
  1618. train->train_its += new_iters;
  1619. train->train_tokens += new_iters * opt->params.n_gradient_accumulation * n_batch * n_tokens;
  1620. save_train_files(&save_data, train);
  1621. opt_cb_data.last_save_iter = opt->iter;
  1622. }
  1623. ggml_free(opt->ctx);
  1624. free_train_state(train);
  1625. ggml_free(lora.ctx);
  1626. llama_free(lctx);
  1627. llama_free_model(lmodel);
  1628. return 0;
  1629. }