train-text-from-scratch.cpp 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248
  1. #include "ggml.h"
  2. #include "ggml-alloc.h"
  3. #include "ggml-backend.h"
  4. #include "common.h"
  5. #include "train.h"
  6. #include "llama.h"
  7. #include <unordered_map>
  8. #include <vector>
  9. #include <cassert>
  10. #include <climits>
  11. #include <cstring>
  12. #include <cstdarg>
  13. #include <ctime>
  14. #include <random>
  15. #include <stdexcept>
  16. #include <algorithm>
  17. #include <string>
  18. #if defined(_MSC_VER)
  19. #pragma warning(disable: 4244 4267) // possible loss of data
  20. #endif
  21. struct my_llama_hparams {
  22. uint32_t n_vocab = 32000;
  23. uint32_t n_ctx = 512;
  24. uint32_t n_embd = 4096;
  25. uint32_t n_head = 32;
  26. uint32_t n_layer = 32;
  27. uint32_t n_rot = 64;
  28. uint32_t n_ff = 11008;
  29. // float f_norm_eps = 1e-5f; // falcon
  30. float f_norm_rms_eps = 1e-5f; // llama
  31. float rope_freq_base = 10000.0f;
  32. float rope_freq_scale = 1.0f;
  33. };
  34. struct my_llama_layer {
  35. // normalization
  36. struct ggml_tensor * attention_norm;
  37. // attention
  38. struct ggml_tensor * wq;
  39. struct ggml_tensor * wk;
  40. struct ggml_tensor * wv;
  41. struct ggml_tensor * wo;
  42. // normalization
  43. struct ggml_tensor * ffn_norm;
  44. // ff
  45. struct ggml_tensor * ffn_gate; // w1
  46. struct ggml_tensor * ffn_down; // w2
  47. struct ggml_tensor * ffn_up; // w3
  48. };
  49. struct my_llama_model {
  50. struct ggml_context * ctx = NULL;
  51. ggml_backend_buffer_t data = NULL;
  52. my_llama_hparams hparams;
  53. struct ggml_tensor * tok_embeddings;
  54. struct ggml_tensor * norm;
  55. struct ggml_tensor * output;
  56. std::vector<my_llama_layer> layers;
  57. };
  58. // gguf constants (sync with gguf.py)
  59. static const char * LLM_KV_TRAINING_TYPE_TRAIN_MODEL = "train_model";
  60. static const char * LLM_KV_TRAINING_TYPE = "training.type";
  61. static const char * LLM_KV_GENERAL_ARCHITECTURE = "general.architecture";
  62. static const char * LLM_KV_GENERAL_FILE_TYPE = "general.file_type";
  63. static const char * LLM_KV_CONTEXT_LENGTH = "%s.context_length";
  64. static const char * LLM_KV_EMBEDDING_LENGTH = "%s.embedding_length";
  65. static const char * LLM_KV_BLOCK_COUNT = "%s.block_count";
  66. static const char * LLM_KV_FEED_FORWARD_LENGTH = "%s.feed_forward_length";
  67. static const char * LLM_KV_ATTENTION_HEAD_COUNT = "%s.attention.head_count";
  68. static const char * LLM_KV_ATTENTION_LAYERNORM_RMS_EPS = "%s.attention.layer_norm_rms_epsilon";
  69. static const char * LLM_KV_ROPE_DIMENSION_COUNT = "%s.rope.dimension_count";
  70. static const char * LLM_KV_ROPE_FREQ_BASE = "%s.rope.freq_base"; // TODO load in llama.cpp
  71. static const char * LLM_KV_ROPE_SCALE_LINEAR = "%s.rope.scale_linear";
  72. static const char * LLM_KV_TOKENIZER_MODEL = "tokenizer.ggml.model";
  73. static const char * LLM_KV_TOKENIZER_LIST = "tokenizer.ggml.tokens";
  74. static const char * LLM_KV_TOKENIZER_TOKEN_TYPE = "tokenizer.ggml.token_type";
  75. static const char * LLM_KV_TOKENIZER_SCORES = "tokenizer.ggml.scores";
  76. static const char * LLM_KV_TOKENIZER_MERGES = "tokenizer.ggml.merges";
  77. static const char * LLM_KV_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id";
  78. static const char * LLM_KV_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id";
  79. static const char * LLM_KV_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id";
  80. static const char * LLM_KV_TOKENIZER_SEP_ID = "tokenizer.ggml.seperator_token_id";
  81. static const char * LLM_KV_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id";
  82. static const char * LLM_TENSOR_TOKEN_EMBD = "token_embd";
  83. static const char * LLM_TENSOR_OUTPUT_NORM = "output_norm";
  84. static const char * LLM_TENSOR_OUTPUT = "output";
  85. static const char * LLM_TENSOR_ATTN_NORM = "blk.%d.attn_norm";
  86. static const char * LLM_TENSOR_ATTN_Q = "blk.%d.attn_q";
  87. static const char * LLM_TENSOR_ATTN_K = "blk.%d.attn_k";
  88. static const char * LLM_TENSOR_ATTN_V = "blk.%d.attn_v";
  89. static const char * LLM_TENSOR_ATTN_OUT = "blk.%d.attn_output";
  90. static const char * LLM_TENSOR_FFN_NORM = "blk.%d.ffn_norm";
  91. static const char * LLM_TENSOR_FFN_GATE = "blk.%d.ffn_gate";
  92. static const char * LLM_TENSOR_FFN_DOWN = "blk.%d.ffn_down";
  93. static const char * LLM_TENSOR_FFN_UP = "blk.%d.ffn_up";
  94. static void print_params(struct my_llama_hparams * params) {
  95. printf("%s: n_vocab: %u\n", __func__, params->n_vocab);
  96. printf("%s: n_ctx: %u\n", __func__, params->n_ctx);
  97. printf("%s: n_embd: %u\n", __func__, params->n_embd);
  98. printf("%s: n_head: %u\n", __func__, params->n_head);
  99. printf("%s: n_ff: %u\n", __func__, params->n_ff);
  100. printf("%s: n_layer: %u\n", __func__, params->n_layer);
  101. printf("%s: n_rot: %u\n", __func__, params->n_rot);
  102. }
  103. static void set_param_model(struct my_llama_model * model) {
  104. const auto& hparams = model->hparams;
  105. const uint32_t n_layer = hparams.n_layer;
  106. struct ggml_context* ctx = model->ctx;
  107. ggml_set_param(ctx, model->tok_embeddings);
  108. ggml_set_param(ctx, model->norm);
  109. ggml_set_param(ctx, model->output);
  110. for (uint32_t i = 0; i < n_layer; ++i) {
  111. auto & layer = model->layers[i];
  112. ggml_set_param(ctx, layer.attention_norm);
  113. ggml_set_param(ctx, layer.wq);
  114. ggml_set_param(ctx, layer.wk);
  115. ggml_set_param(ctx, layer.wv);
  116. ggml_set_param(ctx, layer.wo);
  117. ggml_set_param(ctx, layer.ffn_norm);
  118. ggml_set_param(ctx, layer.ffn_gate);
  119. ggml_set_param(ctx, layer.ffn_down);
  120. ggml_set_param(ctx, layer.ffn_up);
  121. }
  122. }
  123. static void init_model(struct my_llama_model * model) {
  124. const auto & hparams = model->hparams;
  125. const uint32_t n_embd = hparams.n_embd;
  126. const uint32_t n_layer = hparams.n_layer;
  127. const uint32_t n_vocab = hparams.n_vocab;
  128. const uint32_t n_ff = hparams.n_ff;
  129. std::vector<char> tn_buf;
  130. tn_buf.resize(GGML_MAX_NAME);
  131. auto tn = [&tn_buf](const char * key) -> const char * {
  132. snprintf(tn_buf.data(), tn_buf.size(), "%s.weight", key);
  133. return tn_buf.data();
  134. };
  135. auto tni = [&tn_buf](const char * key, int bid) -> const char * {
  136. snprintf(tn_buf.data(), tn_buf.size(), key, bid);
  137. std::string s = tn_buf.data();
  138. snprintf(tn_buf.data(), tn_buf.size(), "%s.weight", s.c_str());
  139. return tn_buf.data();
  140. };
  141. // context for model tensors without their data
  142. struct ggml_init_params ctx_model_params;
  143. ctx_model_params.mem_size = ggml_tensor_overhead()*2*(6 + n_layer*18);
  144. ctx_model_params.mem_buffer = NULL;
  145. ctx_model_params.no_alloc = true;
  146. struct ggml_context * ctx = ggml_init(ctx_model_params);
  147. model->ctx = ctx;
  148. model->tok_embeddings = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab);
  149. model->norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
  150. model->output = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab);
  151. ggml_set_name(model->tok_embeddings, tn(LLM_TENSOR_TOKEN_EMBD));
  152. ggml_set_name(model->norm, tn(LLM_TENSOR_OUTPUT_NORM));
  153. ggml_set_name(model->output, tn(LLM_TENSOR_OUTPUT));
  154. model->layers.resize(n_layer);
  155. for (uint32_t i = 0; i < n_layer; ++i) {
  156. auto & layer = model->layers[i];
  157. layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
  158. layer.wq = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
  159. layer.wk = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
  160. layer.wv = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
  161. layer.wo = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
  162. layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
  163. layer.ffn_gate = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff);
  164. layer.ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_ff, n_embd);
  165. layer.ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff);
  166. ggml_set_name(layer.attention_norm, tni(LLM_TENSOR_ATTN_NORM, i));
  167. ggml_set_name(layer.wq, tni(LLM_TENSOR_ATTN_Q, i));
  168. ggml_set_name(layer.wk, tni(LLM_TENSOR_ATTN_K, i));
  169. ggml_set_name(layer.wv, tni(LLM_TENSOR_ATTN_V, i));
  170. ggml_set_name(layer.wo, tni(LLM_TENSOR_ATTN_OUT, i));
  171. ggml_set_name(layer.ffn_norm, tni(LLM_TENSOR_FFN_NORM, i));
  172. ggml_set_name(layer.ffn_gate, tni(LLM_TENSOR_FFN_GATE, i));
  173. ggml_set_name(layer.ffn_down, tni(LLM_TENSOR_FFN_DOWN, i));
  174. ggml_set_name(layer.ffn_up, tni(LLM_TENSOR_FFN_UP, i));
  175. }
  176. set_param_model(model);
  177. // allocate data
  178. model->data = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_cpu_buffer_type());
  179. }
  180. static void randomize_model(struct my_llama_model * model, int seed, float mean, float std, float min, float max) {
  181. const auto & hparams = model->hparams;
  182. const uint32_t n_layer = hparams.n_layer;
  183. struct random_normal_distribution * rnd = init_random_normal_distribution(seed, mean, std, min, max);
  184. randomize_tensor_normal(model->tok_embeddings, rnd);
  185. randomize_tensor_normal(model->norm, rnd);
  186. randomize_tensor_normal(model->output, rnd);
  187. for (uint32_t i = 0; i < n_layer; ++i) {
  188. auto & layer = model->layers[i];
  189. randomize_tensor_normal(layer.attention_norm, rnd);
  190. randomize_tensor_normal(layer.wq, rnd);
  191. randomize_tensor_normal(layer.wk, rnd);
  192. randomize_tensor_normal(layer.wv, rnd);
  193. randomize_tensor_normal(layer.wo, rnd);
  194. randomize_tensor_normal(layer.ffn_norm, rnd);
  195. randomize_tensor_normal(layer.ffn_gate, rnd);
  196. randomize_tensor_normal(layer.ffn_down, rnd);
  197. randomize_tensor_normal(layer.ffn_up, rnd);
  198. }
  199. free_random_normal_distribution(rnd);
  200. }
  201. static struct ggml_tensor * llama_build_train_graphs(
  202. struct my_llama_model * model,
  203. ggml_gallocr_t alloc,
  204. struct ggml_context * ctx,
  205. struct ggml_cgraph * gf,
  206. struct ggml_cgraph * gb,
  207. struct ggml_cgraph * gb_tmp,
  208. struct ggml_tensor * * logits,
  209. struct ggml_tensor * tokens_input,
  210. struct ggml_tensor * targets,
  211. const int n_tokens,
  212. const int n_batch,
  213. const bool enable_flash_attn,
  214. const bool enable_checkpointing,
  215. const bool measure_only) {
  216. ggml_set_scratch(ctx, { 0, 0, nullptr, });
  217. const int n_past = 0;
  218. const int N = n_tokens;
  219. const auto & hparams = model->hparams;
  220. const int n_ctx = hparams.n_ctx;
  221. const int n_vocab = hparams.n_vocab;
  222. const int n_embd = hparams.n_embd;
  223. const int n_layer = hparams.n_layer;
  224. const int n_head = hparams.n_head;
  225. const int n_rot = hparams.n_rot;
  226. const int n_ff = hparams.n_ff;
  227. const float f_norm_rms_eps = hparams.f_norm_rms_eps;
  228. const float rope_freq_base = hparams.rope_freq_base;
  229. const float rope_freq_scale = hparams.rope_freq_scale;
  230. auto set_name = [](struct ggml_tensor * t, const char * n) {
  231. ggml_set_name(t, n);
  232. if (t->grad) {
  233. ggml_format_name(t->grad, "%s->grad", n);
  234. }
  235. };
  236. // KQ_pos - contains the positions
  237. struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N);
  238. ggml_set_input(KQ_pos);
  239. // rope has so much parameters that we make a custom function for it
  240. auto rope = [ctx, KQ_pos, n_rot, n_ctx, rope_freq_base, rope_freq_scale]
  241. (struct ggml_tensor * t) -> struct ggml_tensor * {
  242. // not capturing these, to silcence warnings
  243. const int rope_mode = 0;
  244. return ggml_rope_custom(
  245. ctx, t, KQ_pos, n_rot, rope_mode, n_ctx, 0, rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f
  246. );
  247. };
  248. set_name(tokens_input, "tokens_input");
  249. set_name(targets, "targets");
  250. GGML_ASSERT(tokens_input->type == GGML_TYPE_I32);
  251. struct ggml_tensor * t00 = ggml_reshape_1d(ctx, tokens_input, N*n_batch); set_name(t00, "t00"); assert_shape_1d(t00, N*n_batch);
  252. struct ggml_tensor * t01 = ggml_get_rows(ctx, model->tok_embeddings, t00); set_name(t01, "t01"); assert_shape_2d(t01, n_embd, N*n_batch);
  253. struct ggml_tensor * cur = t01;
  254. std::vector<struct ggml_tensor *> checkpoints;
  255. checkpoints.push_back(tokens_input);
  256. checkpoints.push_back(targets);
  257. checkpoints.push_back(t00);
  258. checkpoints.push_back(t01);
  259. const float kv_scale = 1.0f/sqrtf(float(n_embd)/n_head);
  260. for (int il = 0; il < n_layer; ++il) {
  261. struct my_llama_layer & layer = model->layers[il];
  262. struct ggml_tensor * t02 = ggml_rms_norm (ctx, cur, f_norm_rms_eps); set_name(t02, "t02"); assert_shape_2d(t02, n_embd, N*n_batch);
  263. struct ggml_tensor * t03 = ggml_repeat (ctx, layer.attention_norm, t02); set_name(t03, "t03"); assert_shape_2d(t03, n_embd, N*n_batch);
  264. struct ggml_tensor * t04 = ggml_mul (ctx, t03, t02); set_name(t04, "t04"); assert_shape_2d(t04, n_embd, N*n_batch);
  265. struct ggml_tensor * t05 = ggml_mul_mat (ctx, layer.wq, t04); set_name(t05, "t05"); assert_shape_2d(t05, n_embd, N*n_batch);
  266. struct ggml_tensor * t06 = ggml_reshape_4d (ctx, t05, n_embd/n_head, n_head, N, n_batch); set_name(t06, "t06"); assert_shape_4d(t06, n_embd/n_head, n_head, N, n_batch);
  267. struct ggml_tensor * t07 = rope (t06); set_name(t07, "t07"); assert_shape_4d(t07, n_embd/n_head, n_head, N, n_batch);
  268. struct ggml_tensor * t08 = ggml_mul_mat (ctx, layer.wk, t04); set_name(t08, "t08"); assert_shape_2d(t08, n_embd, N*n_batch);
  269. struct ggml_tensor * t09 = ggml_reshape_4d (ctx, t08, n_embd/n_head, n_head, N, n_batch); set_name(t09, "t09"); assert_shape_4d(t09, n_embd/n_head, n_head, N, n_batch);
  270. struct ggml_tensor * t10 = rope (t09); set_name(t10, "t10"); assert_shape_4d(t10, n_embd/n_head, n_head, N, n_batch);
  271. struct ggml_tensor * t11 = ggml_mul_mat (ctx, t04, layer.wv); set_name(t11, "t11"); assert_shape_2d(t11, N*n_batch, n_embd);
  272. struct ggml_tensor * t12 = ggml_reshape_4d (ctx, t11, N, n_batch, n_embd/n_head, n_head); set_name(t12, "t12"); assert_shape_4d(t12, N, n_batch, n_embd/n_head, n_head);
  273. struct ggml_tensor * t13 = ggml_permute (ctx, t07, 0, 2, 1, 3); set_name(t13, "t13"); assert_shape_4d(t13, n_embd/n_head, N, n_head, n_batch);
  274. struct ggml_tensor * t14 = ggml_permute (ctx, t10, 0, 2, 1, 3); set_name(t14, "t14"); assert_shape_4d(t14, n_embd/n_head, N, n_head, n_batch);
  275. struct ggml_tensor * t15 = ggml_permute (ctx, t12, 0, 3, 1, 2); set_name(t15, "t15"); assert_shape_4d(t15, N, n_embd/n_head, n_head, n_batch);
  276. struct ggml_tensor * t16;
  277. if (enable_flash_attn) {
  278. t16 = ggml_flash_attn(ctx, t13, t14, t15, true); set_name(t16, "t16"); assert_shape_4d(t16, n_embd/n_head, N, n_head, n_batch);
  279. } else {
  280. struct ggml_tensor * t16_0 = ggml_mul_mat (ctx, t14, t13); set_name(t16_0, "t16_0"); assert_shape_4d(t16_0, N, N, n_head, n_batch);
  281. struct ggml_tensor * t16_1 = ggml_scale_inplace (ctx, t16_0, kv_scale); set_name(t16_1, "t16_1"); assert_shape_4d(t16_1, N, N, n_head, n_batch);
  282. struct ggml_tensor * t16_2 = ggml_diag_mask_inf_inplace(ctx, t16_1, n_past); set_name(t16_2, "t16_2"); assert_shape_4d(t16_2, N, N, n_head, n_batch);
  283. struct ggml_tensor * t16_3 = ggml_soft_max_inplace (ctx, t16_2); set_name(t16_3, "t16_3"); assert_shape_4d(t16_3, N, N, n_head, n_batch);
  284. t16 = ggml_mul_mat(ctx, t15, t16_3); set_name(t16, "t16"); assert_shape_4d(t16, n_embd/n_head, N, n_head, n_batch);
  285. }
  286. struct ggml_tensor * t17 = ggml_permute (ctx, t16, 0, 2, 1, 3); set_name(t17, "t17"); assert_shape_4d(t17, n_embd/n_head, n_head, N, n_batch);
  287. struct ggml_tensor * t18 = ggml_cont (ctx, t17); set_name(t18, "t18"); assert_shape_4d(t18, n_embd/n_head, n_head, N, n_batch);
  288. struct ggml_tensor * t19 = ggml_reshape_2d (ctx, t18, n_embd, N*n_batch); set_name(t19, "t19"); assert_shape_2d(t19, n_embd, N*n_batch);
  289. struct ggml_tensor * t20 = ggml_mul_mat (ctx, layer.wo, t19); set_name(t20, "t20"); assert_shape_2d(t20, n_embd, N*n_batch);
  290. struct ggml_tensor * t21 = ggml_add (ctx, t20, cur); set_name(t21, "t21"); assert_shape_2d(t21, n_embd, N*n_batch);
  291. struct ggml_tensor * t22 = ggml_rms_norm (ctx, t21, f_norm_rms_eps); set_name(t22, "t22"); assert_shape_2d(t22, n_embd, N*n_batch);
  292. struct ggml_tensor * t23 = ggml_repeat (ctx, layer.ffn_norm, t22); set_name(t23, "t23"); assert_shape_2d(t23, n_embd, N*n_batch);
  293. struct ggml_tensor * t24 = ggml_mul (ctx, t23, t22); set_name(t24, "t24"); assert_shape_2d(t24, n_embd, N*n_batch);
  294. struct ggml_tensor * t25 = ggml_mul_mat (ctx, layer.ffn_up, t24); set_name(t25, "t25"); assert_shape_2d(t25, n_ff, N*n_batch);
  295. struct ggml_tensor * t26 = ggml_mul_mat (ctx, layer.ffn_gate, t24); set_name(t26, "t26"); assert_shape_2d(t26, n_ff, N*n_batch);
  296. struct ggml_tensor * t27 = ggml_silu (ctx, t26); set_name(t27, "t27"); assert_shape_2d(t27, n_ff, N*n_batch);
  297. struct ggml_tensor * t28 = ggml_mul (ctx, t27, t25); set_name(t28, "t28"); assert_shape_2d(t28, n_ff, N*n_batch);
  298. struct ggml_tensor * t29 = ggml_mul_mat (ctx, layer.ffn_down, t28); set_name(t29, "t29"); assert_shape_2d(t29, n_embd, N*n_batch);
  299. struct ggml_tensor * t30 = ggml_add (ctx, t29, t21); set_name(t30, "t30"); assert_shape_2d(t30, n_embd, N*n_batch);
  300. cur = t30;
  301. checkpoints.push_back(cur);
  302. }
  303. struct ggml_tensor * t31 = ggml_rms_norm (ctx, cur, f_norm_rms_eps); set_name(t31, "t31"); assert_shape_2d(t31, n_embd, N*n_batch);
  304. struct ggml_tensor * t32 = ggml_repeat (ctx, model->norm, t31); set_name(t32, "t32"); assert_shape_2d(t32, n_embd, N*n_batch);
  305. struct ggml_tensor * t33 = ggml_mul (ctx, t32, t31); set_name(t33, "t33"); assert_shape_2d(t33, n_embd, N*n_batch);
  306. struct ggml_tensor * t34 = ggml_mul_mat (ctx, model->output, t33); set_name(t34, "t34"); assert_shape_2d(t34, n_vocab, N*n_batch);
  307. struct ggml_tensor * t35 = ggml_reshape_3d (ctx, t34, n_vocab, N, n_batch); set_name(t35, "t35"); assert_shape_3d(t35, n_vocab, N, n_batch);
  308. struct ggml_tensor * t36 = ggml_cross_entropy_loss(ctx, t35, targets); set_name(t36, "t36"); assert_shape_1d(t36, 1);
  309. checkpoints.push_back(t31);
  310. checkpoints.push_back(t32);
  311. checkpoints.push_back(t33);
  312. checkpoints.push_back(t34);
  313. checkpoints.push_back(t35);
  314. checkpoints.push_back(t36);
  315. ggml_build_forward_expand(gf, t36);
  316. if (enable_checkpointing) {
  317. ggml_build_backward_gradient_checkpointing(ctx, gf, gb, gb_tmp, checkpoints.data(), (int) checkpoints.size());
  318. } else {
  319. ggml_graph_cpy(gf, gb);
  320. ggml_build_backward_expand(ctx, gf, gb, true);
  321. }
  322. if (alloc) {
  323. // make sure some tensors are not reallocated by inserting new temporary nodes depending on them
  324. int n_leafs_before = gb->n_leafs;
  325. int n_nodes_before = gb->n_nodes;
  326. // output tensors
  327. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, 1.0f));
  328. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, 1.0f));
  329. // input gradient
  330. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, 1.0f));
  331. // KQ_pos
  332. ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, 1.0f));
  333. GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL);
  334. ggml_set_input(t36->grad);
  335. // allocating checkpoints in one block to reduce memory fragmentation
  336. // note: they will be freed in reverse order
  337. for (int i = 0; i < (int) checkpoints.size(); ++i) {
  338. if (checkpoints[i]->data == NULL && checkpoints[i]->view_src == NULL) {
  339. ggml_set_input(checkpoints[i]);
  340. }
  341. }
  342. //int n_leafs_after = gb->n_leafs;
  343. //int n_nodes_after = gb->n_nodes;
  344. if (measure_only) {
  345. // FIXME: will still allocate
  346. ggml_gallocr_reserve(alloc, gb);
  347. } else {
  348. ggml_gallocr_alloc_graph(alloc, gb);
  349. if (!measure_only) {
  350. int * data = (int *) KQ_pos->data;
  351. for (int i = 0; i < N; ++i) {
  352. data[i] = n_past + i;
  353. }
  354. }
  355. }
  356. // remove the additional nodes and leafs
  357. for (int i = n_leafs_before; i < gb->n_leafs; ++i) {
  358. gb->leafs[i] = NULL;
  359. }
  360. for (int i = n_nodes_before; i < gb->n_nodes; ++i) {
  361. gb->nodes[i] = NULL;
  362. }
  363. gb->n_leafs = n_leafs_before;
  364. gb->n_nodes = n_nodes_before;
  365. }
  366. *logits = t35;
  367. return t36;
  368. }
  369. #define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
  370. do { \
  371. const std::string skey(key); \
  372. const int kid = gguf_find_key(ctx, skey.c_str()); \
  373. if (kid >= 0) { \
  374. enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \
  375. if (ktype != (type)) { \
  376. die_fmt("key %s has wrong type: %s", skey.c_str(), gguf_type_name(ktype)); \
  377. } \
  378. (dst) = func(ctx, kid); \
  379. } else if (req) { \
  380. die_fmt("key not found in model: %s", skey.c_str()); \
  381. } \
  382. } while (0)
  383. static void load_llama_model_gguf(struct gguf_context * fctx, struct ggml_context * f_ggml_ctx, struct my_llama_model * model) {
  384. // NOTE: gguf_context must be initialized with f_ggml_ctx and no_alloc=false, otherwise tensor data can not be read
  385. std::string arch;
  386. std::vector<char> keybuf;
  387. keybuf.resize(512);
  388. auto kv = [&arch, &keybuf](const char * key) -> const char * {
  389. snprintf(keybuf.data(), keybuf.size(), key, arch.c_str());
  390. return keybuf.data();
  391. };
  392. std::vector<char> tn_buf;
  393. tn_buf.resize(GGML_MAX_NAME);
  394. auto tn = [&tn_buf](const char * key) -> const char * {
  395. snprintf(tn_buf.data(), tn_buf.size(), "%s.weight", key);
  396. return tn_buf.data();
  397. };
  398. auto tni = [&tn_buf](const char * key, int bid) -> const char * {
  399. snprintf(tn_buf.data(), tn_buf.size(), key, bid);
  400. std::string s = tn_buf.data();
  401. snprintf(tn_buf.data(), tn_buf.size(), "%s.weight", s.c_str());
  402. return tn_buf.data();
  403. };
  404. GGUF_GET_KEY(fctx, arch, gguf_get_val_str, GGUF_TYPE_STRING, true, LLM_KV_GENERAL_ARCHITECTURE);
  405. GGML_ASSERT(arch == "llama");
  406. uint32_t ftype_u;
  407. GGUF_GET_KEY(fctx, ftype_u, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_GENERAL_FILE_TYPE);
  408. GGML_ASSERT((enum llama_ftype) ftype_u == LLAMA_FTYPE_ALL_F32);
  409. // n_ctx was not saved in earlier checkpoint file versions, so we make it optional here
  410. GGUF_GET_KEY(fctx, model->hparams.n_ctx, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_CONTEXT_LENGTH));
  411. GGUF_GET_KEY(fctx, model->hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_EMBEDDING_LENGTH));
  412. GGUF_GET_KEY(fctx, model->hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_FEED_FORWARD_LENGTH));
  413. GGUF_GET_KEY(fctx, model->hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_ATTENTION_HEAD_COUNT));
  414. GGUF_GET_KEY(fctx, model->hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_BLOCK_COUNT));
  415. model->hparams.n_rot = model->hparams.n_embd / model->hparams.n_head;
  416. GGUF_GET_KEY(fctx, model->hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ROPE_DIMENSION_COUNT));
  417. float rope_freq_scale = 1.0f;
  418. GGUF_GET_KEY(fctx, model->hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
  419. GGUF_GET_KEY(fctx, model->hparams.rope_freq_base, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE));
  420. GGUF_GET_KEY(fctx, rope_freq_scale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR));
  421. if (rope_freq_scale != 1.0f) {
  422. model->hparams.rope_freq_scale = 1.0f / rope_freq_scale;
  423. }
  424. init_model(model);
  425. copy_tensor_by_name(model->tok_embeddings, f_ggml_ctx, tn(LLM_TENSOR_TOKEN_EMBD));
  426. copy_tensor_by_name(model->norm, f_ggml_ctx, tn(LLM_TENSOR_OUTPUT_NORM));
  427. copy_tensor_by_name(model->output, f_ggml_ctx, tn(LLM_TENSOR_OUTPUT));
  428. for (uint32_t i = 0; i < model->hparams.n_layer; ++i) {
  429. auto & layer = model->layers[i];
  430. copy_tensor_by_name(layer.attention_norm, f_ggml_ctx, tni(LLM_TENSOR_ATTN_NORM, i));
  431. copy_tensor_by_name(layer.wq, f_ggml_ctx, tni(LLM_TENSOR_ATTN_Q, i));
  432. copy_tensor_by_name(layer.wk, f_ggml_ctx, tni(LLM_TENSOR_ATTN_K, i));
  433. copy_tensor_by_name(layer.wv, f_ggml_ctx, tni(LLM_TENSOR_ATTN_V, i));
  434. copy_tensor_by_name(layer.wo, f_ggml_ctx, tni(LLM_TENSOR_ATTN_OUT, i));
  435. copy_tensor_by_name(layer.ffn_norm, f_ggml_ctx, tni(LLM_TENSOR_FFN_NORM, i));
  436. copy_tensor_by_name(layer.ffn_gate, f_ggml_ctx, tni(LLM_TENSOR_FFN_GATE, i));
  437. copy_tensor_by_name(layer.ffn_down, f_ggml_ctx, tni(LLM_TENSOR_FFN_DOWN, i));
  438. copy_tensor_by_name(layer.ffn_up, f_ggml_ctx, tni(LLM_TENSOR_FFN_UP, i));
  439. }
  440. }
  441. static void save_llama_model_gguf(struct gguf_context * fctx, const char * fn_vocab_model, struct my_llama_model * model) {
  442. const char * arch = "llama";
  443. enum llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
  444. std::vector<char> keybuf;
  445. keybuf.resize(512);
  446. auto kv = [arch, &keybuf](const char * key) -> const char * {
  447. snprintf(keybuf.data(), keybuf.size(), key, arch);
  448. return keybuf.data();
  449. };
  450. // set arch
  451. gguf_set_val_str(fctx, LLM_KV_GENERAL_ARCHITECTURE, arch);
  452. gguf_set_val_u32(fctx, LLM_KV_GENERAL_FILE_TYPE, ftype);
  453. // set hparams
  454. gguf_set_val_u32(fctx, kv(LLM_KV_CONTEXT_LENGTH), model->hparams.n_ctx );
  455. gguf_set_val_u32(fctx, kv(LLM_KV_EMBEDDING_LENGTH), model->hparams.n_embd );
  456. gguf_set_val_u32(fctx, kv(LLM_KV_FEED_FORWARD_LENGTH), model->hparams.n_ff );
  457. gguf_set_val_u32(fctx, kv(LLM_KV_ATTENTION_HEAD_COUNT), model->hparams.n_head );
  458. gguf_set_val_u32(fctx, kv(LLM_KV_BLOCK_COUNT), model->hparams.n_layer );
  459. gguf_set_val_u32(fctx, kv(LLM_KV_ROPE_DIMENSION_COUNT), model->hparams.n_rot );
  460. gguf_set_val_f32(fctx, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS), model->hparams.f_norm_rms_eps );
  461. gguf_set_val_f32(fctx, kv(LLM_KV_ROPE_FREQ_BASE), model->hparams.rope_freq_base ); // TODO load in llama.cpp
  462. gguf_set_val_f32(fctx, kv(LLM_KV_ROPE_SCALE_LINEAR), 1.0f / model->hparams.rope_freq_scale );
  463. // set vocab by copying from vocab_model gguf file
  464. {
  465. struct gguf_init_params params = {
  466. /*.no_alloc = */ false,
  467. /*.ctx = */ NULL,
  468. };
  469. struct gguf_context * vctx = gguf_init_from_file(fn_vocab_model, params);
  470. const int token_idx = gguf_find_key(vctx, kv(LLM_KV_TOKENIZER_LIST));
  471. if (token_idx == -1) {
  472. die("cannot find tokenizer vocab in model file");
  473. }
  474. const uint32_t n_vocab = gguf_get_arr_n(vctx, token_idx);
  475. const int score_idx = gguf_find_key(vctx, kv(LLM_KV_TOKENIZER_SCORES));
  476. if (score_idx == -1) {
  477. die("cannot find tokenizer scores in model file");
  478. }
  479. const float * scores = (const float * ) gguf_get_arr_data(vctx, score_idx);
  480. const int toktype_idx = gguf_find_key(vctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE));
  481. if (toktype_idx == -1) {
  482. die("cannot find token type list in GGUF file");
  483. }
  484. const int * toktypes = (const int * ) gguf_get_arr_data(vctx, toktype_idx);
  485. std::string tokenizer_name;
  486. GGUF_GET_KEY(vctx, tokenizer_name, gguf_get_val_str, GGUF_TYPE_STRING, true, kv(LLM_KV_TOKENIZER_MODEL));
  487. gguf_set_val_str(fctx, kv(LLM_KV_TOKENIZER_MODEL), tokenizer_name.c_str());
  488. gguf_set_arr_data(fctx, kv(LLM_KV_TOKENIZER_SCORES), GGUF_TYPE_FLOAT32, scores, n_vocab);
  489. gguf_set_arr_data(fctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE), GGUF_TYPE_INT32, toktypes, n_vocab);
  490. int32_t special_bos_id = 1;
  491. int32_t special_eos_id = 2;
  492. int32_t special_unk_id = 0;
  493. int32_t special_sep_id = -1;
  494. int32_t special_pad_id = -1;
  495. if (tokenizer_name == "llama") {
  496. // default special tokens
  497. special_bos_id = 1;
  498. special_eos_id = 2;
  499. special_unk_id = 0;
  500. special_sep_id = -1;
  501. special_pad_id = -1;
  502. } else if (tokenizer_name == "gpt2") {
  503. // read and copy bpe merges
  504. const int merges_keyidx = gguf_find_key(vctx, kv(LLM_KV_TOKENIZER_MERGES));
  505. if (merges_keyidx == -1) {
  506. die("cannot find tokenizer merges in model file");
  507. }
  508. const int n_merges = gguf_get_arr_n(vctx, merges_keyidx);
  509. std::vector<const char*> merges;
  510. merges.resize(n_merges);
  511. for (int i = 0; i < n_merges; i++) {
  512. merges[i] = gguf_get_arr_str(vctx, merges_keyidx, i);
  513. }
  514. gguf_set_arr_str(fctx, kv(LLM_KV_TOKENIZER_MERGES), merges.data(), n_merges);
  515. // default special tokens
  516. special_bos_id = 11;
  517. special_eos_id = 11;
  518. special_unk_id = -1;
  519. special_sep_id = -1;
  520. special_pad_id = -1;
  521. } else {
  522. fprintf(stderr, "%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str());
  523. fprintf(stderr, "%s: using default tokenizer: 'llama'", __func__);
  524. }
  525. std::vector<const char*> tokens;
  526. tokens.resize(n_vocab);
  527. for (uint32_t i = 0; i < n_vocab; i++) {
  528. tokens[i] = gguf_get_arr_str(vctx, token_idx, i);
  529. }
  530. gguf_set_arr_str(fctx, kv(LLM_KV_TOKENIZER_LIST), tokens.data(), n_vocab);
  531. GGUF_GET_KEY(vctx, special_bos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_BOS_ID));
  532. GGUF_GET_KEY(vctx, special_eos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_EOS_ID));
  533. GGUF_GET_KEY(vctx, special_unk_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_UNK_ID));
  534. GGUF_GET_KEY(vctx, special_sep_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_SEP_ID));
  535. GGUF_GET_KEY(vctx, special_pad_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_PAD_ID));
  536. gguf_set_val_u32(fctx, kv(LLM_KV_TOKENIZER_BOS_ID), special_bos_id);
  537. gguf_set_val_u32(fctx, kv(LLM_KV_TOKENIZER_EOS_ID), special_eos_id);
  538. gguf_set_val_u32(fctx, kv(LLM_KV_TOKENIZER_UNK_ID), special_unk_id);
  539. gguf_set_val_u32(fctx, kv(LLM_KV_TOKENIZER_SEP_ID), special_sep_id);
  540. gguf_set_val_u32(fctx, kv(LLM_KV_TOKENIZER_PAD_ID), special_pad_id);
  541. gguf_free(vctx);
  542. }
  543. // add tensors
  544. gguf_add_tensor(fctx, model->tok_embeddings);
  545. gguf_add_tensor(fctx, model->norm);
  546. gguf_add_tensor(fctx, model->output);
  547. for (uint32_t i = 0; i < model->hparams.n_layer; ++i) {
  548. auto & layer = model->layers[i];
  549. gguf_add_tensor(fctx, layer.attention_norm);
  550. gguf_add_tensor(fctx, layer.wq);
  551. gguf_add_tensor(fctx, layer.wk);
  552. gguf_add_tensor(fctx, layer.wv);
  553. gguf_add_tensor(fctx, layer.wo);
  554. gguf_add_tensor(fctx, layer.ffn_norm);
  555. gguf_add_tensor(fctx, layer.ffn_gate);
  556. gguf_add_tensor(fctx, layer.ffn_down);
  557. gguf_add_tensor(fctx, layer.ffn_up);
  558. }
  559. }
  560. static void save_llama_model_file(const char * filename, const char * fn_vocab_model, struct my_llama_model * model) {
  561. printf("%s: saving to %s\n", __func__, filename);
  562. struct gguf_context * fctx = gguf_init_empty();
  563. save_llama_model_gguf(fctx, fn_vocab_model, model);
  564. // write file
  565. const bool only_meta = false;
  566. gguf_write_to_file(fctx, filename, only_meta);
  567. gguf_free(fctx);
  568. }
  569. static void load_checkpoint_gguf(struct gguf_context * fctx, struct ggml_context * f_ggml_ctx, struct my_llama_model * model, struct train_state * train) {
  570. load_llama_model_gguf(fctx, f_ggml_ctx, model);
  571. if (load_train_state_gguf(fctx, f_ggml_ctx, train)) {
  572. std::string train_type = LLM_KV_TRAINING_TYPE_TRAIN_MODEL;
  573. GGUF_GET_KEY(fctx, train_type, gguf_get_val_str, GGUF_TYPE_STRING, false, LLM_KV_TRAINING_TYPE);
  574. GGML_ASSERT(train_type == LLM_KV_TRAINING_TYPE_TRAIN_MODEL);
  575. } else {
  576. printf("%s: loaded llama model as checkpoint\n", __func__);
  577. }
  578. }
  579. static void save_checkpoint_gguf(struct gguf_context * fctx, const char * fn_vocab_model, struct my_llama_model * model, struct train_state * train) {
  580. gguf_set_val_str(fctx, LLM_KV_TRAINING_TYPE, LLM_KV_TRAINING_TYPE_TRAIN_MODEL);
  581. save_llama_model_gguf(fctx, fn_vocab_model, model);
  582. save_train_state_gguf(fctx, train);
  583. }
  584. static bool load_checkpoint_file(const char * filename, struct my_llama_model * model, struct train_state * train) {
  585. struct ggml_context * f_ggml_ctx;
  586. struct gguf_init_params params;
  587. params.no_alloc = false;
  588. params.ctx = &f_ggml_ctx;
  589. struct gguf_context * fctx = gguf_init_from_file(filename, params);
  590. if (fctx == NULL) {
  591. return false;
  592. }
  593. load_checkpoint_gguf(fctx, f_ggml_ctx, model, train);
  594. return true;
  595. }
  596. static void save_checkpoint_file(const char * filename, const char * fn_vocab_model, struct my_llama_model * model, struct train_state * train) {
  597. printf("%s: saving to %s\n", __func__, filename);
  598. struct gguf_context * fctx = gguf_init_empty();
  599. save_checkpoint_gguf(fctx, fn_vocab_model, model, train);
  600. // write file
  601. const bool only_meta = false;
  602. gguf_write_to_file(fctx, filename, only_meta);
  603. gguf_free(fctx);
  604. }
  605. struct train_params {
  606. struct train_params_common common;
  607. const char * fn_vocab_model;
  608. const char * fn_model_out;
  609. bool only_write_model;
  610. int n_ctx;
  611. int n_embd;
  612. int n_head;
  613. int n_layer;
  614. int n_ff;
  615. float f_norm_rms_eps;
  616. float rope_freq_base;
  617. float rope_freq_scale;
  618. };
  619. static struct train_params get_default_train_params() {
  620. struct train_params params;
  621. params.common = get_default_train_params_common();
  622. params.fn_vocab_model = "ggml-vic7b-uncensored-q4_0.bin";
  623. params.fn_model_out = "ggml-checkpoint-f32.bin";
  624. params.only_write_model = false;
  625. params.n_ctx = 128;
  626. params.n_embd = 256;
  627. params.n_head = 8;
  628. params.n_layer = 16;
  629. params.n_ff = 768;
  630. params.f_norm_rms_eps = 1e-5f;
  631. params.rope_freq_base = 10000.0f;
  632. params.rope_freq_scale = 1.0f;
  633. return params;
  634. }
  635. static void train_print_usage(int argc, char ** argv, const struct train_params * params) {
  636. fprintf(stderr, "usage: %s [options]\n", argv[0]);
  637. fprintf(stderr, "\n");
  638. fprintf(stderr, "options:\n");
  639. fprintf(stderr, " -h, --help show this help message and exit\n");
  640. fprintf(stderr, " --vocab-model FNAME model path from which to load vocab (default '%s')\n", params->fn_vocab_model);
  641. fprintf(stderr, " --model-out FNAME path to save ggml model (default '%s')\n", params->fn_model_out);
  642. fprintf(stderr, " --only-write-model only save llama model, don't do any training. use this if you only want to convert a checkpoint to a model.\n");
  643. fprintf(stderr, " --embd N Embedding size used for new models (default %d)\n", params->n_embd);
  644. fprintf(stderr, " --ff N Feedforward size used for new models. (default %d)\n", params->n_ff);
  645. fprintf(stderr, " --head N Number of heads for new models (default %d)\n", params->n_head);
  646. fprintf(stderr, " --layer N Number of layers for new models (default %d)\n", params->n_layer);
  647. fprintf(stderr, " --norm-rms-eps F RMS-Norm epsilon value (default %f)\n", params->f_norm_rms_eps);
  648. fprintf(stderr, " --rope-freq-base F Frequency base for ROPE (default %f)\n", params->rope_freq_base);
  649. fprintf(stderr, " --rope-freq-scale F Frequency scale for ROPE (default %f)\n", params->rope_freq_scale);
  650. print_common_train_usage(argc, argv, &params->common);
  651. }
  652. static bool train_params_parse(int argc, char ** argv, struct train_params * params) {
  653. bool invalid_param = false;
  654. std::string arg;
  655. struct train_params default_params = get_default_train_params();
  656. const std::string arg_prefix = "--";
  657. for (int i = 1; i < argc; i++) {
  658. arg = argv[i];
  659. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  660. std::replace(arg.begin(), arg.end(), '_', '-');
  661. }
  662. if (consume_common_train_arg(argc, argv, &i, &params->common, &invalid_param)) {
  663. if (invalid_param) {
  664. break;
  665. } else if (params->common.print_usage) {
  666. train_print_usage(argc, argv, &default_params);
  667. exit(0);
  668. }
  669. } else if (arg == "--vocab-model") {
  670. if (++i >= argc) {
  671. invalid_param = true;
  672. break;
  673. }
  674. params->fn_vocab_model = argv[i];
  675. } else if (arg == "--model-out") {
  676. if (++i >= argc) {
  677. invalid_param = true;
  678. break;
  679. }
  680. params->fn_model_out = argv[i];
  681. } else if (arg == "--only-write-model") {
  682. params->only_write_model = true;
  683. } else if (arg == "--embd") {
  684. if (++i >= argc) {
  685. invalid_param = true;
  686. break;
  687. }
  688. params->n_embd = std::stoi(argv[i]);
  689. } else if (arg == "--ff") {
  690. if (++i >= argc) {
  691. invalid_param = true;
  692. break;
  693. }
  694. params->n_ff = std::stoi(argv[i]);
  695. } else if (arg == "--head") {
  696. if (++i >= argc) {
  697. invalid_param = true;
  698. break;
  699. }
  700. params->n_head = std::stoi(argv[i]);
  701. } else if (arg == "--layer") {
  702. if (++i >= argc) {
  703. invalid_param = true;
  704. break;
  705. }
  706. params->n_layer = std::stoi(argv[i]);
  707. } else if (arg == "--norm-rms-eps") {
  708. if (++i >= argc) {
  709. invalid_param = true;
  710. break;
  711. }
  712. params->f_norm_rms_eps = std::stof(argv[i]);
  713. } else if (arg == "--rope-freq-base") {
  714. if (++i >= argc) {
  715. invalid_param = true;
  716. break;
  717. }
  718. params->rope_freq_base = std::stof(argv[i]);
  719. } else if (arg == "--rope-freq-scale") {
  720. if (++i >= argc) {
  721. invalid_param = true;
  722. break;
  723. }
  724. params->rope_freq_scale = std::stof(argv[i]);
  725. } else {
  726. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  727. train_print_usage(argc, argv, &default_params);
  728. exit(1);
  729. }
  730. }
  731. if (invalid_param) {
  732. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  733. train_print_usage(argc, argv, &default_params);
  734. exit(1);
  735. }
  736. finish_processing_train_args(&params->common);
  737. return true;
  738. }
  739. struct save_train_files_data {
  740. const char * fn_checkpoint_out;
  741. const char * fn_model_out;
  742. const char * fn_vocab_model;
  743. const char * pattern_fn_it;
  744. const char * fn_latest;
  745. struct my_llama_model * model;
  746. };
  747. static void save_train_files(void * vdata, struct train_state * train) {
  748. struct save_train_files_data * data = (struct save_train_files_data *) vdata;
  749. int64_t iter = train->opt->iter;
  750. if (strlen(data->fn_checkpoint_out) > 0) {
  751. save_checkpoint_file(get_train_filename(data->fn_checkpoint_out, data->pattern_fn_it, data->fn_latest, iter).c_str(), data->fn_vocab_model, data->model, train);
  752. save_checkpoint_file(get_train_filename(data->fn_checkpoint_out, data->pattern_fn_it, data->fn_latest, -1 ).c_str(), data->fn_vocab_model, data->model, train);
  753. }
  754. if (strlen(data->fn_model_out) > 0) {
  755. save_llama_model_file(get_train_filename(data->fn_model_out, data->pattern_fn_it, data->fn_latest, iter).c_str(), data->fn_vocab_model, data->model);
  756. save_llama_model_file(get_train_filename(data->fn_model_out, data->pattern_fn_it, data->fn_latest, -1 ).c_str(), data->fn_vocab_model, data->model);
  757. }
  758. }
  759. static int64_t get_parameter_count(struct my_llama_model* model) {
  760. int64_t nx = 0;
  761. nx += ggml_nelements(model->tok_embeddings);
  762. nx += ggml_nelements(model->norm);
  763. nx += ggml_nelements(model->output);
  764. for (uint32_t i = 0; i < model->layers.size(); ++i) {
  765. auto & layer = model->layers[i];
  766. nx += ggml_nelements(layer.attention_norm);
  767. nx += ggml_nelements(layer.wq);
  768. nx += ggml_nelements(layer.wk);
  769. nx += ggml_nelements(layer.wv);
  770. nx += ggml_nelements(layer.wo);
  771. nx += ggml_nelements(layer.ffn_norm);
  772. nx += ggml_nelements(layer.ffn_gate);
  773. nx += ggml_nelements(layer.ffn_down);
  774. nx += ggml_nelements(layer.ffn_up);
  775. }
  776. return nx;
  777. }
  778. int main(int argc, char ** argv) {
  779. struct train_params params = get_default_train_params();
  780. if (!train_params_parse(argc, argv, &params)) {
  781. return 1;
  782. }
  783. if (params.common.seed == LLAMA_DEFAULT_SEED) {
  784. params.common.seed = time(NULL);
  785. }
  786. printf("%s: seed: %u\n", __func__, params.common.seed);
  787. srand(params.common.seed);
  788. struct llama_model_params mparams = llama_model_default_params();
  789. mparams.vocab_only = true;
  790. struct llama_context_params cparams = llama_context_default_params();
  791. struct llama_model * lmodel = llama_load_model_from_file(params.fn_vocab_model, mparams);
  792. struct llama_context * lctx = llama_new_context_with_model(lmodel, cparams);
  793. struct my_llama_model model;
  794. model.hparams.n_vocab = llama_n_vocab(lmodel);
  795. model.hparams.n_ctx = params.common.n_ctx;
  796. model.hparams.n_embd = params.n_embd;
  797. model.hparams.n_head = params.n_head;
  798. model.hparams.n_layer = params.n_layer;
  799. model.hparams.n_ff = params.n_ff;
  800. // llama.cpp requires n_rot to be exactly n_embd / n_head
  801. model.hparams.n_rot = model.hparams.n_embd / model.hparams.n_head;
  802. model.hparams.f_norm_rms_eps = params.f_norm_rms_eps;
  803. model.hparams.rope_freq_base = params.rope_freq_base;
  804. model.hparams.rope_freq_scale = params.rope_freq_scale;
  805. struct train_state * train = init_train_state();
  806. struct ggml_opt_context * opt = train->opt;
  807. // set opt params from command line
  808. opt->params = ggml_opt_default_params(GGML_OPT_ADAM);
  809. opt->params.print_forward_graph = false;
  810. opt->params.print_backward_graph = false;
  811. opt->params.graph_size = LLAMA_TRAIN_MAX_NODES;
  812. opt->params.n_threads = params.common.n_threads;
  813. opt->params.past = params.common.opt_past;
  814. opt->params.delta = params.common.opt_delta;
  815. opt->params.max_no_improvement = params.common.opt_max_no_improvement;
  816. opt->params.n_gradient_accumulation = params.common.n_gradient_accumulation;
  817. opt->params.adam.n_iter = params.common.adam_n_iter;
  818. opt->params.adam.sched = 1.0f;
  819. opt->params.adam.alpha = params.common.adam_alpha;
  820. opt->params.adam.decay = params.common.adam_decay;
  821. opt->params.adam.decay_min_ndim = params.common.adam_decay_min_ndim;
  822. opt->params.adam.beta1 = params.common.adam_beta1;
  823. opt->params.adam.beta2 = params.common.adam_beta2;
  824. opt->params.adam.gclip = params.common.adam_gclip;
  825. opt->params.adam.eps_f = params.common.adam_eps_f;
  826. printf("%s: init model\n", __func__);
  827. bool existed = load_checkpoint_file(params.common.fn_checkpoint_in, &model, train);
  828. if (existed) {
  829. // overwrite last n_ctx with user provided n_ctx
  830. if (params.common.custom_n_ctx) {
  831. model.hparams.n_ctx = params.common.n_ctx;
  832. }
  833. const bool opt_past_changed = opt->params.past != params.common.opt_past;
  834. if (opt_past_changed) {
  835. die("Optimizer parameter '--opt-past N' differs from checkpoint file. To use different value train from scratch with empty input checkpoint, e.g --checkpoint-in ''. Aborting");
  836. // need to discard previous optimizer past function value statistics and opt_init with new shapes
  837. // TODO
  838. }
  839. } else {
  840. init_model(&model);
  841. randomize_model(&model, params.common.seed, 0.0f, 1.0f, -1.0f, +1.0f);
  842. if (!params.only_write_model) {
  843. ggml_opt_init(opt->ctx, opt, opt->params, get_parameter_count(&model));
  844. }
  845. }
  846. opt->iter = train->train_its;
  847. print_params(&model.hparams);
  848. printf("%s: total train_iterations %llu\n", __func__, (long long unsigned) train->train_its);
  849. printf("%s: seen train_samples %llu\n", __func__, (long long unsigned) train->train_samples);
  850. printf("%s: seen train_tokens %llu\n", __func__, (long long unsigned) train->train_tokens);
  851. printf("%s: completed train_epochs %llu\n", __func__, (long long unsigned) train->train_epochs);
  852. printf("%s: model_size = %zu bytes (%.1f MB)\n", __func__, (ggml_used_mem(model.ctx) + ggml_backend_buffer_get_size(model.data)), (float) (ggml_used_mem(model.ctx) + ggml_backend_buffer_get_size(model.data)) / (1024.0f*1024.0f));
  853. if (params.only_write_model) {
  854. save_train_files_data save_data;
  855. save_data.fn_checkpoint_out = "";
  856. save_data.fn_model_out = params.fn_model_out;
  857. save_data.fn_vocab_model = params.fn_vocab_model;
  858. save_data.pattern_fn_it = params.common.pattern_fn_it;
  859. save_data.fn_latest = params.common.fn_latest;
  860. save_data.model = &model;
  861. save_train_files(&save_data, train);
  862. free_train_state(train);
  863. ggml_free(model.ctx);
  864. llama_free(lctx);
  865. llama_free_model(lmodel);
  866. return 0;
  867. }
  868. printf("%s: opt_size = %zu bytes (%.1f MB)\n", __func__, ggml_get_mem_size(opt->ctx), (float) ggml_get_mem_size(opt->ctx) / (1024.0f*1024.0f));
  869. printf("%s: opt iter %d\n", __func__, opt->iter);
  870. int n_tokens = model.hparams.n_ctx;
  871. int n_vocab = model.hparams.n_vocab;
  872. int n_batch = params.common.n_batch;
  873. // context for input tensors without their data
  874. struct ggml_init_params ctx_input_params = {
  875. ggml_tensor_overhead() * 2, // mem_size
  876. NULL, // mem_buffer
  877. true, // no_alloc
  878. };
  879. struct ggml_context * ctx_input = ggml_init(ctx_input_params);
  880. // the input tensors
  881. struct ggml_tensor * tokens_input = ggml_new_tensor_2d(ctx_input, GGML_TYPE_I32, n_tokens, n_batch);
  882. struct ggml_tensor * target_probs = ggml_new_tensor_3d(ctx_input, GGML_TYPE_F32, n_vocab, n_tokens, n_batch);
  883. // measure required memory for input tensors
  884. // allocate input tensors
  885. ggml_backend_buffer_t input_data = ggml_backend_alloc_ctx_tensors_from_buft(ctx_input, ggml_backend_cpu_buffer_type());
  886. size_t max_input_size = ggml_backend_buffer_get_size(input_data);
  887. printf("%s: input_size = %zu bytes (%.1f MB)\n", __func__, max_input_size, (float) max_input_size / (1024.0f*1024.0f));
  888. // context for compute tensors without their data
  889. const size_t estimated_compute_size_wo_data = (
  890. 2*LLAMA_TRAIN_MAX_NODES*ggml_tensor_overhead() +
  891. (params.common.use_checkpointing ? 3 : 2)*(GGML_OBJECT_SIZE+ggml_graph_overhead_custom(LLAMA_TRAIN_MAX_NODES, true))
  892. );
  893. struct ggml_init_params ctx_compute_params = {
  894. estimated_compute_size_wo_data, // mem_size
  895. NULL, // mem_buffer
  896. true, // no_alloc
  897. };
  898. struct ggml_context * ctx_compute = NULL;
  899. struct ggml_tensor * loss = NULL;
  900. struct ggml_tensor * logits = NULL;
  901. struct ggml_cgraph * gf = NULL;
  902. struct ggml_cgraph * gb = NULL;
  903. struct ggml_cgraph * gb_tmp = NULL;
  904. // measure required memory for compute tensors
  905. size_t best_compute_size = SIZE_MAX;
  906. enum ggml_cgraph_eval_order best_order = GGML_CGRAPH_EVAL_ORDER_COUNT;
  907. // find best evaluation order
  908. for (unsigned order = 0; order < (unsigned) GGML_CGRAPH_EVAL_ORDER_COUNT; ++order) {
  909. ctx_compute = ggml_init(ctx_compute_params);
  910. ggml_gallocr_t alloc = ggml_gallocr_new(ggml_backend_cpu_buffer_type());
  911. gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
  912. gf->order = (enum ggml_cgraph_eval_order) order;
  913. gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
  914. gb_tmp = params.common.use_checkpointing
  915. ? ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true)
  916. : NULL;
  917. loss = llama_build_train_graphs(
  918. &model, alloc, ctx_compute,
  919. gf, gb, gb_tmp,
  920. &logits, tokens_input, target_probs,
  921. n_tokens, n_batch,
  922. params.common.use_flash,
  923. params.common.use_checkpointing,
  924. true
  925. );
  926. size_t max_compute_size = ggml_gallocr_get_buffer_size(alloc, 0); // FIXME: this will still allocate the buffer
  927. if (max_compute_size < best_compute_size) {
  928. best_compute_size = max_compute_size;
  929. best_order = gf->order;
  930. }
  931. ggml_free(ctx_compute);
  932. }
  933. size_t max_compute_size = best_compute_size;
  934. printf("%s: compute_size = %zu bytes (%.1f MB)\n", __func__, max_compute_size, (float) max_compute_size / (1024.0f*1024.0f));
  935. printf("%s: evaluation order = %s\n", __func__,
  936. (best_order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? "LEFT_TO_RIGHT" :
  937. (best_order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? "RIGHT_TO_LEFT" :
  938. "invalid");
  939. // allocate compute tensors
  940. ctx_compute = ggml_init(ctx_compute_params);
  941. ggml_gallocr_t alloc = ggml_gallocr_new(ggml_backend_cpu_buffer_type());
  942. gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
  943. gf->order = best_order;
  944. gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
  945. gb_tmp = params.common.use_checkpointing
  946. ? ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true)
  947. : NULL;
  948. loss = llama_build_train_graphs(
  949. &model, alloc, ctx_compute,
  950. gf, gb, gb_tmp,
  951. &logits, tokens_input, target_probs,
  952. n_tokens, n_batch,
  953. params.common.use_flash,
  954. params.common.use_checkpointing,
  955. false
  956. );
  957. std::vector<llama_token> train_tokens;
  958. std::vector<size_t> train_samples_begin;
  959. std::vector<size_t> train_samples_size;
  960. printf("%s: tokenize training data\n", __func__);
  961. tokenize_file(lctx,
  962. params.common.fn_train_data,
  963. params.common.sample_start,
  964. params.common.include_sample_start,
  965. params.common.overlapping_samples,
  966. n_tokens,
  967. train_tokens,
  968. train_samples_begin,
  969. train_samples_size);
  970. GGML_ASSERT(train_samples_begin.size() == train_samples_size.size());
  971. printf("%s: number of training tokens: %zu\n", __func__, train_tokens.size());
  972. size_t shuffle_samples_hash = compute_samples_hash(params.common.fn_train_data, train_samples_begin.data(), train_samples_size.data(), train_samples_size.size());
  973. const bool changed_train_data = (shuffle_samples_hash != train->shuffle_samples_hash) || (train->shuffle_sample_count != train_samples_size.size());
  974. if (changed_train_data) {
  975. printf("%s: train data seems to have changed. restarting shuffled epoch.\n", __func__);
  976. }
  977. if (params.common.force_reshuffle) {
  978. printf("%s: forced reshuffling of data. restarting with newly shuffled epoch.\n", __func__);
  979. }
  980. if ((train->shuffle_rng_state_current == "") || changed_train_data || params.common.force_reshuffle) {
  981. train->shuffle_rng_state_current = mt19937_seed_to_state(params.common.seed);
  982. train->shuffle_sample_count = train_samples_size.size();
  983. train->shuffle_next_sample = 0;
  984. train->shuffle_samples_hash = shuffle_samples_hash;
  985. }
  986. std::vector<size_t> train_shuffled_samples_offs;
  987. std::vector<size_t> train_shuffled_samples_begin;
  988. std::vector<size_t> train_shuffled_samples_size;
  989. train_shuffled_samples_offs.resize(train_samples_begin.size());
  990. train_shuffled_samples_begin.resize(train_samples_begin.size());
  991. train_shuffled_samples_size.resize(train_samples_size.size());
  992. train->shuffle_rng_state_next = shuffle_samples(
  993. train->shuffle_rng_state_current,
  994. train_shuffled_samples_offs.data(),
  995. train_shuffled_samples_begin.data(),
  996. train_shuffled_samples_size.data(),
  997. train_samples_begin.data(),
  998. train_samples_size.data(),
  999. train_samples_size.size());
  1000. printf("%s: begin training\n", __func__);
  1001. save_train_files_data save_data;
  1002. save_data.fn_checkpoint_out = params.common.fn_checkpoint_out;
  1003. save_data.fn_model_out = params.fn_model_out;
  1004. save_data.fn_vocab_model = params.fn_vocab_model;
  1005. save_data.pattern_fn_it = params.common.pattern_fn_it;
  1006. save_data.fn_latest = params.common.fn_latest;
  1007. save_data.model = &model;
  1008. struct train_opt_callback_data opt_cb_data;
  1009. opt_cb_data.params = &params.common;
  1010. opt_cb_data.train = train;
  1011. opt_cb_data.save_cb = &save_train_files;
  1012. opt_cb_data.save_data = &save_data;
  1013. opt_cb_data.lctx = lctx;
  1014. opt_cb_data.last_save_iter = opt->iter;
  1015. opt_cb_data.tokens_data = train_tokens.data();
  1016. opt_cb_data.tokens_size = train_tokens.size();
  1017. opt_cb_data.samples_begin = train_samples_begin.data();
  1018. opt_cb_data.samples_size = train_samples_size.data();
  1019. opt_cb_data.shuffled_samples_offs = train_shuffled_samples_offs.data();
  1020. opt_cb_data.shuffled_samples_begin = train_shuffled_samples_begin.data();
  1021. opt_cb_data.shuffled_samples_size = train_shuffled_samples_size.data();
  1022. opt_cb_data.samples_count = train_samples_size.size();
  1023. opt_cb_data.tokens_input = tokens_input;
  1024. opt_cb_data.target_probs = target_probs;
  1025. opt_cb_data.first_iter = opt->iter;
  1026. opt_cb_data.first_epoch = train->train_epochs;
  1027. opt_cb_data.iter_at_last_epoch = -1;
  1028. opt_cb_data.last_time = ggml_time_ms();
  1029. opt_cb_data.millis_per_iter = 0.0;
  1030. // measure required memory for work buffer
  1031. size_t max_work_size = ggml_graph_plan(gb, params.common.n_threads).work_size + GGML_OBJECT_SIZE;
  1032. printf("%s: work_size = %zu bytes (%.1f MB)\n", __func__, max_work_size, (float) max_work_size / (1024.0f*1024.0f));
  1033. // context for work buffer
  1034. struct ggml_init_params ctx_work_params = {
  1035. max_work_size, // mem_size
  1036. NULL, // mem_buffer
  1037. false, // no_alloc
  1038. };
  1039. struct ggml_context * ctx_work = ggml_init(ctx_work_params);
  1040. int64_t t0 = ggml_time_ms();
  1041. ggml_opt_resume_g(ctx_work, opt, loss, gf, gb, &train_opt_callback, (void *) &opt_cb_data);
  1042. ggml_free(ctx_work);
  1043. ggml_free(ctx_compute);
  1044. ggml_free(ctx_input);
  1045. int64_t t1 = ggml_time_ms();
  1046. printf("%s: total training time: ", __func__);
  1047. print_duration((double) (t1 - t0));
  1048. printf("\n");
  1049. int new_iters = opt->iter - opt_cb_data.last_save_iter;
  1050. if (new_iters > 0) {
  1051. train->train_its += new_iters;
  1052. train->train_tokens += new_iters * opt->params.n_gradient_accumulation * n_batch * n_tokens;
  1053. save_train_files(&save_data, train);
  1054. opt_cb_data.last_save_iter = opt->iter;
  1055. }
  1056. ggml_free(opt->ctx);
  1057. free_train_state(train);
  1058. ggml_free(model.ctx);
  1059. llama_free(lctx);
  1060. llama_free_model(lmodel);
  1061. return 0;
  1062. }