ggml-opt.cpp 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032
  1. #include "ggml-opt.h"
  2. #include "ggml.h"
  3. #include "ggml-alloc.h"
  4. #include "ggml-backend.h"
  5. #include "ggml-impl.h"
  6. #include <algorithm>
  7. #include <cmath>
  8. #include <cstdint>
  9. #include <cinttypes>
  10. #include <map>
  11. #include <random>
  12. #include <vector>
  13. struct ggml_opt_dataset {
  14. struct ggml_context * ctx = nullptr;
  15. ggml_backend_buffer_t buf = nullptr;
  16. struct ggml_tensor * data = nullptr;
  17. struct ggml_tensor * labels = nullptr;
  18. int64_t ndata = -1;
  19. int64_t ndata_shard = -1;
  20. size_t nbs_data = -1;
  21. size_t nbs_labels = -1;
  22. std::vector<int64_t> permutation;
  23. };
  24. struct ggml_opt_context {
  25. ggml_backend_sched_t backend_sched = nullptr;
  26. ggml_cgraph * allocated_graph = nullptr;
  27. ggml_cgraph * allocated_graph_copy = nullptr;
  28. struct ggml_context * ctx_static = nullptr;
  29. struct ggml_context * ctx_cpu = nullptr;
  30. struct ggml_context * ctx_compute = nullptr;
  31. struct ggml_context * ctx_copy = nullptr;
  32. ggml_backend_buffer_t buf_static = nullptr;
  33. ggml_backend_buffer_t buf_cpu = nullptr;
  34. std::mt19937 rng;
  35. enum ggml_opt_loss_type loss_type;
  36. enum ggml_opt_build_type build_type;
  37. enum ggml_opt_build_type build_type_alloc;
  38. struct ggml_tensor * inputs = nullptr;
  39. struct ggml_tensor * outputs = nullptr;
  40. struct ggml_tensor * labels = nullptr;
  41. struct ggml_tensor * loss = nullptr;
  42. struct ggml_tensor * pred = nullptr;
  43. struct ggml_tensor * ncorrect = nullptr;
  44. struct ggml_cgraph * gf = nullptr;
  45. struct ggml_cgraph * gb_grad = nullptr;
  46. struct ggml_cgraph * gb_opt = nullptr;
  47. bool static_graphs = false;
  48. bool eval_ready = false;
  49. std::vector<struct ggml_tensor *> grad_accs;
  50. std::vector<struct ggml_tensor *> grad_m;
  51. std::vector<struct ggml_tensor *> grad_v;
  52. int64_t iter = 1;
  53. int32_t opt_period = 1;
  54. int32_t opt_i = 0;
  55. bool loss_per_datapoint = false;
  56. ggml_opt_get_optimizer_params get_opt_pars = nullptr;
  57. void * get_opt_pars_ud = nullptr;
  58. struct ggml_tensor * adamw_params = nullptr;
  59. };
  60. struct ggml_opt_result {
  61. int64_t ndata = 0;
  62. std::vector<float> loss;
  63. std::vector<int32_t> pred;
  64. int64_t ncorrect = 0;
  65. int64_t opt_period = -1;
  66. bool loss_per_datapoint = false;
  67. };
  68. // ====== Dataset ======
  69. ggml_opt_dataset_t ggml_opt_dataset_init(
  70. enum ggml_type type_data,
  71. enum ggml_type type_label,
  72. int64_t ne_datapoint,
  73. int64_t ne_label,
  74. int64_t ndata,
  75. int64_t ndata_shard) {
  76. GGML_ASSERT(ne_datapoint > 0);
  77. GGML_ASSERT(ne_label >= 0);
  78. GGML_ASSERT(ndata > 0);
  79. GGML_ASSERT(ndata_shard > 0);
  80. ggml_opt_dataset_t result = new ggml_opt_dataset;
  81. result->ndata = ndata;
  82. result->ndata_shard = ndata_shard;
  83. {
  84. struct ggml_init_params params = {
  85. /*.mem_size =*/ 2*ggml_tensor_overhead(),
  86. /*.mem_buffer =*/ nullptr,
  87. /*.no_alloc =*/ true,
  88. };
  89. result->ctx = ggml_init(params);
  90. }
  91. result->data = ggml_new_tensor_2d(result->ctx, type_data, ne_datapoint, ndata);
  92. result->nbs_data = ggml_nbytes(result->data) * ndata_shard/ndata;
  93. if (ne_label > 0) {
  94. result->labels = ggml_new_tensor_2d(result->ctx, type_label, ne_label, ndata);
  95. result->nbs_labels = ggml_nbytes(result->labels) * ndata_shard/ndata;
  96. } else {
  97. result->labels = nullptr;
  98. result->nbs_labels = 0;
  99. }
  100. result->buf = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx, ggml_backend_cpu_buffer_type());
  101. const int64_t nshards = ndata/ndata_shard;
  102. result->permutation.resize(nshards);
  103. for (int64_t i = 0; i < nshards; ++i) {
  104. result->permutation[i] = i;
  105. }
  106. return result;
  107. }
  108. void ggml_opt_dataset_free(ggml_opt_dataset_t dataset) {
  109. ggml_backend_buffer_free(dataset->buf);
  110. ggml_free(dataset->ctx);
  111. delete dataset;
  112. }
  113. int64_t ggml_opt_dataset_ndata(ggml_opt_dataset_t dataset) {
  114. return dataset->ndata;
  115. }
  116. struct ggml_tensor * ggml_opt_dataset_data(ggml_opt_dataset_t dataset) {
  117. return dataset->data;
  118. }
  119. struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset) {
  120. return dataset->labels;
  121. }
  122. void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata) {
  123. GGML_ASSERT(idata <= dataset->ndata);
  124. if (idata < 0) {
  125. std::shuffle(dataset->permutation.begin(), dataset->permutation.end(), opt_ctx->rng);
  126. return;
  127. }
  128. GGML_ASSERT(idata % dataset->ndata_shard == 0);
  129. const int64_t ishard_max = idata / dataset->ndata_shard;
  130. std::shuffle(dataset->permutation.begin(), dataset->permutation.begin() + ishard_max, opt_ctx->rng);
  131. }
  132. void ggml_opt_dataset_get_batch(ggml_opt_dataset_t dataset, struct ggml_tensor * data_batch, struct ggml_tensor * labels_batch, int64_t ibatch) {
  133. GGML_ASSERT( data_batch && ggml_is_contiguous(data_batch));
  134. GGML_ASSERT(!labels_batch || ggml_is_contiguous(labels_batch));
  135. GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr));
  136. GGML_ASSERT( data_batch->type == dataset->data->type);
  137. GGML_ASSERT(!labels_batch || labels_batch->type == dataset->labels->type);
  138. const size_t nb_data_batch = ggml_nbytes(data_batch);
  139. GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0);
  140. const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data;
  141. if (labels_batch) {
  142. const size_t nb_labels_batch = ggml_nbytes(labels_batch);
  143. GGML_ASSERT(nb_labels_batch == shards_per_batch*dataset->nbs_labels);
  144. }
  145. GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size()));
  146. for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) {
  147. const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch];
  148. const char * ptr_data = (const char *) dataset->data->data + ishard*dataset->nbs_data;
  149. ggml_backend_tensor_set(data_batch, ptr_data, ishard_batch*dataset->nbs_data, dataset->nbs_data);
  150. if (!labels_batch) {
  151. continue;
  152. }
  153. const char * ptr_labels = (const char *) dataset->labels->data + ishard*dataset->nbs_labels;
  154. ggml_backend_tensor_set(labels_batch, ptr_labels, ishard_batch*dataset->nbs_labels, dataset->nbs_labels);
  155. }
  156. }
  157. void ggml_opt_dataset_get_batch_host(ggml_opt_dataset_t dataset, void * data_batch, size_t nb_data_batch, void * labels_batch, int64_t ibatch) {
  158. GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr));
  159. GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0);
  160. const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data;
  161. GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size()));
  162. for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) {
  163. const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch];
  164. const char * ptr_data = (const char *) dataset->data->data + ishard *dataset->nbs_data;
  165. char * ptr_data_batch = (char *) data_batch + ishard_batch*dataset->nbs_data;
  166. memcpy(ptr_data_batch, ptr_data, dataset->nbs_data);
  167. if (!labels_batch) {
  168. continue;
  169. }
  170. const char * ptr_labels = (const char *) dataset->labels->data + ishard *dataset->nbs_labels;
  171. char * ptr_labels_batch = (char *) labels_batch + ishard_batch*dataset->nbs_labels;
  172. memcpy(ptr_labels_batch, ptr_labels, dataset->nbs_labels);
  173. }
  174. }
  175. // ====== Model / Context ======
  176. struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata) {
  177. GGML_UNUSED(userdata);
  178. ggml_opt_optimizer_params result;
  179. result.adamw.alpha = 0.001f;
  180. result.adamw.beta1 = 0.9f;
  181. result.adamw.beta2 = 0.999f;
  182. result.adamw.eps = 1e-8f;
  183. result.adamw.wd = 0.0f;
  184. return result;
  185. }
  186. struct ggml_opt_optimizer_params ggml_opt_get_constant_optimizer_params(void * userdata) {
  187. return *((struct ggml_opt_optimizer_params *) userdata);
  188. }
  189. struct ggml_opt_params ggml_opt_default_params(
  190. ggml_backend_sched_t backend_sched,
  191. enum ggml_opt_loss_type loss_type) {
  192. return {
  193. /*backend_sched =*/ backend_sched,
  194. /*ctx_compute =*/ nullptr,
  195. /*inputs =*/ nullptr,
  196. /*logits =*/ nullptr,
  197. /*loss_type =*/ loss_type,
  198. /*build_type =*/ GGML_OPT_BUILD_TYPE_OPT,
  199. /*opt_period =*/ 1,
  200. /*get_opt_pars =*/ ggml_opt_get_default_optimizer_params,
  201. /*get_opt_pars_ud =*/ nullptr,
  202. };
  203. }
  204. static ggml_tensor * map_tensor(std::map<ggml_tensor *, ggml_tensor *> & tensor_map, ggml_context * ctx, ggml_tensor * tensor) {
  205. if (!tensor) {
  206. return nullptr;
  207. }
  208. if (tensor_map.find(tensor) != tensor_map.end()) {
  209. return tensor_map[tensor];
  210. }
  211. ggml_tensor * new_tensor = ggml_dup_tensor(ctx, tensor);
  212. tensor_map[tensor] = new_tensor;
  213. new_tensor->op = tensor->op;
  214. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  215. new_tensor->nb[i] = tensor->nb[i];
  216. }
  217. new_tensor->flags = tensor->flags;
  218. memcpy(new_tensor->op_params, tensor->op_params, sizeof(tensor->op_params));
  219. strcpy(new_tensor->name, tensor->name);
  220. new_tensor->data = tensor->data;
  221. new_tensor->buffer = tensor->buffer;
  222. new_tensor->extra = tensor->extra;
  223. new_tensor->view_offs = tensor->view_offs;
  224. new_tensor->view_src = map_tensor(tensor_map, ctx, tensor->view_src);
  225. for (int i = 0; i < GGML_MAX_SRC; i++) {
  226. new_tensor->src[i] = map_tensor(tensor_map, ctx, tensor->src[i]);
  227. }
  228. return new_tensor;
  229. }
  230. static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * src) {
  231. std::map<ggml_tensor *, ggml_tensor *> tensor_map;
  232. ggml_cgraph * dst = ggml_new_graph_custom(ctx, src->size, /*grads =*/ true);
  233. for (int i = 0; i < src->n_leafs; i++) {
  234. ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->leafs[i]));
  235. }
  236. GGML_ASSERT(dst->n_leafs == src->n_leafs);
  237. for (int i = 0; i < src->n_nodes; i++) {
  238. ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->nodes[i]));
  239. }
  240. GGML_ASSERT(dst->n_nodes == src->n_nodes);
  241. for (int i = 0; i < src->n_nodes; ++i) {
  242. const size_t igrad_src = ggml_hash_find(&src->visited_hash_set, src->nodes[i]);
  243. const size_t igrad_dst = ggml_hash_find(&dst->visited_hash_set, dst->nodes[i]);
  244. GGML_ASSERT(igrad_src != GGML_HASHSET_FULL);
  245. GGML_ASSERT(ggml_bitset_get(src->visited_hash_set.used, igrad_src));
  246. GGML_ASSERT(igrad_dst != GGML_HASHSET_FULL);
  247. GGML_ASSERT(ggml_bitset_get(dst->visited_hash_set.used, igrad_dst));
  248. dst->grads[igrad_dst] = src->grads[igrad_src];
  249. dst->grad_accs[igrad_dst] = src->grad_accs[igrad_src];
  250. }
  251. return dst;
  252. }
  253. static void ggml_opt_build(ggml_opt_context_t opt_ctx) {
  254. GGML_ASSERT(opt_ctx->ctx_compute && "no compute context set, either use static graphs or set one with ggml_opt_prepare_alloc");
  255. GGML_ASSERT((!opt_ctx->static_graphs || opt_ctx->inputs->data) && "when using static graphs the inputs must be allocated statically");
  256. const bool accumulate = opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_GRAD &&
  257. !(opt_ctx->static_graphs && opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT && opt_ctx->opt_period == 1);
  258. ggml_set_input(opt_ctx->inputs);
  259. ggml_set_output(opt_ctx->outputs);
  260. int n_param = 0;
  261. for (int i = 0; i < opt_ctx->gf->n_nodes; ++i) {
  262. const struct ggml_tensor * node = opt_ctx->gf->nodes[i];
  263. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  264. n_param++;
  265. }
  266. GGML_ASSERT(!(node->flags & GGML_TENSOR_FLAG_LOSS) && "support for extra loss terms not implemented");
  267. }
  268. if (!opt_ctx->ctx_static) {
  269. // The static context is used for:
  270. // - gradients (1 per loss, 1 tensor per param if using gradient accumulation)
  271. // - optimizer momenta (2 tensors per param)
  272. // - labels (if using static graphs)
  273. // - loss (if using static graphs, up to 5 tensors)
  274. // - pred (if using static graphs)
  275. // - ncorrect (if using static graphs, 2 tensors).
  276. constexpr size_t n_loss = 1;
  277. const size_t tensors_per_param = (accumulate ? 1 : 0) +
  278. (opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT ? 2 : 0);
  279. const size_t tensors_const = opt_ctx->static_graphs ? 9 : 0;
  280. const size_t size_meta = (n_loss + tensors_per_param*n_param + tensors_const) * ggml_tensor_overhead();
  281. struct ggml_init_params params = {
  282. /*.mem_size =*/ size_meta,
  283. /*.mem_buffer =*/ nullptr,
  284. /*.no_alloc =*/ true,
  285. };
  286. opt_ctx->ctx_static = ggml_init(params);
  287. }
  288. GGML_ASSERT(opt_ctx->build_type <= opt_ctx->build_type_alloc);
  289. {
  290. // The cpu context is allocated statically if using static graphs, dynamically otherwise.
  291. // It is used for:
  292. // - optimizer parameters (1 shared for all optimizer invocations)
  293. const size_t size_meta = 1 * ggml_tensor_overhead();
  294. struct ggml_init_params params = {
  295. /*.mem_size =*/ size_meta,
  296. /*.mem_buffer =*/ nullptr,
  297. /*.no_alloc =*/ true,
  298. };
  299. ggml_free(opt_ctx->ctx_cpu);
  300. opt_ctx->ctx_cpu = ggml_init(params);
  301. ggml_backend_buffer_free(opt_ctx->buf_cpu);
  302. opt_ctx->buf_cpu = nullptr;
  303. }
  304. struct ggml_context * ctx_results = opt_ctx->static_graphs ? opt_ctx->ctx_static : opt_ctx->ctx_compute;
  305. switch (opt_ctx->loss_type) {
  306. case GGML_OPT_LOSS_TYPE_MEAN: {
  307. opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->outputs);
  308. ggml_set_name(opt_ctx->loss, "loss_sum");
  309. const float scale = 1.0f / (opt_ctx->opt_period * ggml_nelements(opt_ctx->outputs));
  310. opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, scale);
  311. ggml_set_name(opt_ctx->loss, "loss_mean");
  312. opt_ctx->loss_per_datapoint = true;
  313. break;
  314. }
  315. case GGML_OPT_LOSS_TYPE_SUM: {
  316. opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->outputs);
  317. ggml_set_name(opt_ctx->loss, "loss_sum");
  318. opt_ctx->loss_per_datapoint = false;
  319. break;
  320. }
  321. case GGML_OPT_LOSS_TYPE_CROSS_ENTROPY: {
  322. opt_ctx->labels = ggml_dup_tensor(ctx_results, opt_ctx->outputs);
  323. ggml_set_input(opt_ctx->labels);
  324. ggml_set_name(opt_ctx->labels, "labels");
  325. opt_ctx->loss = ggml_cross_entropy_loss(ctx_results, opt_ctx->outputs, opt_ctx->labels);
  326. ggml_set_name(opt_ctx->loss, "loss_cross_entropy");
  327. if (opt_ctx->opt_period > 1) {
  328. opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, 1.0f / opt_ctx->opt_period);
  329. ggml_set_name(opt_ctx->loss, "loss_cross_entropy_scaled");
  330. }
  331. opt_ctx->loss_per_datapoint = true;
  332. break;
  333. }
  334. case GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR: {
  335. opt_ctx->labels = ggml_dup_tensor(ctx_results, opt_ctx->outputs);
  336. ggml_set_input(opt_ctx->labels);
  337. ggml_set_name(opt_ctx->labels, "labels");
  338. opt_ctx->loss = ggml_sub(ctx_results, opt_ctx->outputs, opt_ctx->labels);
  339. ggml_set_name(opt_ctx->loss, "loss_error");
  340. opt_ctx->loss = ggml_sqr(ctx_results, opt_ctx->loss);
  341. ggml_set_name(opt_ctx->loss, "loss_squared_error");
  342. opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->loss);
  343. ggml_set_name(opt_ctx->loss, "loss_sum_squared_error");
  344. const float scale = 1.0f / (opt_ctx->opt_period * ggml_nelements(opt_ctx->outputs));
  345. opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, scale);
  346. ggml_set_name(opt_ctx->loss, "loss_mean_squared_error");
  347. opt_ctx->loss_per_datapoint = true;
  348. break;
  349. }
  350. }
  351. ggml_set_output(opt_ctx->loss);
  352. ggml_set_loss(opt_ctx->loss);
  353. ggml_build_forward_expand(opt_ctx->gf, opt_ctx->loss);
  354. if (opt_ctx->loss_type == GGML_OPT_LOSS_TYPE_CROSS_ENTROPY) {
  355. opt_ctx->pred = ggml_argmax(ctx_results, opt_ctx->outputs);
  356. ggml_set_name(opt_ctx->pred, "pred");
  357. ggml_set_output(opt_ctx->pred);
  358. ggml_build_forward_expand(opt_ctx->gf, opt_ctx->pred);
  359. opt_ctx->ncorrect = ggml_count_equal(ctx_results, opt_ctx->pred, ggml_argmax(ctx_results, opt_ctx->labels));
  360. ggml_set_name(opt_ctx->ncorrect, "ncorrect");
  361. ggml_set_output(opt_ctx->ncorrect);
  362. ggml_build_forward_expand(opt_ctx->gf, opt_ctx->ncorrect);
  363. }
  364. if (opt_ctx->buf_static) {
  365. if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_FORWARD) {
  366. return;
  367. }
  368. } else if (opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_FORWARD) {
  369. opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(
  370. opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0));
  371. return;
  372. }
  373. if (opt_ctx->grad_accs.empty()) {
  374. GGML_ASSERT(opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_GRAD);
  375. const int n_nodes = opt_ctx->gf->n_nodes;
  376. opt_ctx->grad_accs.resize(n_nodes);
  377. for (int i = 0; i < n_nodes; ++i) {
  378. ggml_tensor * node = opt_ctx->gf->nodes[i];
  379. if ((accumulate && (node->flags & GGML_TENSOR_FLAG_PARAM)) || (node->flags & GGML_TENSOR_FLAG_LOSS)) {
  380. opt_ctx->grad_accs[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
  381. } else {
  382. opt_ctx->grad_accs[i] = nullptr;
  383. }
  384. }
  385. if (opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_OPT) {
  386. opt_ctx->grad_m.resize(n_nodes);
  387. opt_ctx->grad_v.resize(n_nodes);
  388. for (int i = 0; i < n_nodes; ++i) {
  389. ggml_tensor * node = opt_ctx->gf->nodes[i];
  390. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  391. opt_ctx->grad_m[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
  392. opt_ctx->grad_v[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
  393. } else {
  394. opt_ctx->grad_m[i] = nullptr;
  395. opt_ctx->grad_v[i] = nullptr;
  396. }
  397. }
  398. }
  399. }
  400. // gb_grad == graph backward gradients, forward pass, then backward pass to calculate gradients.
  401. opt_ctx->gb_grad = ggml_graph_dup(opt_ctx->ctx_compute, opt_ctx->gf, /*force_grads =*/ true);
  402. ggml_build_backward_expand(opt_ctx->ctx_compute, opt_ctx->gb_grad, opt_ctx->grad_accs.data());
  403. if (opt_ctx->buf_static) {
  404. if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_GRAD) {
  405. return;
  406. }
  407. } else if (opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_GRAD) {
  408. opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0));
  409. ggml_graph_reset(opt_ctx->gb_grad);
  410. }
  411. GGML_ASSERT(opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT);
  412. // gb_opt == graph backward optimize, forward pass, then backward pass to calculate gradients, then optimizer step.
  413. opt_ctx->gb_opt = ggml_graph_dup(opt_ctx->ctx_compute, opt_ctx->gb_grad, /*force_grads =*/ true);
  414. opt_ctx->adamw_params = ggml_new_tensor_1d(opt_ctx->ctx_cpu, GGML_TYPE_F32, 7);
  415. ggml_set_input(opt_ctx->adamw_params);
  416. ggml_set_name(opt_ctx->adamw_params, "adamw_params");
  417. for (int i = opt_ctx->gf->n_nodes-1; i >= 0; --i) {
  418. struct ggml_tensor * node = opt_ctx->gb_opt->nodes[i];
  419. struct ggml_tensor * grad = ggml_graph_get_grad(opt_ctx->gb_opt, node);
  420. if (grad && (node->flags & GGML_TENSOR_FLAG_PARAM)) {
  421. struct ggml_tensor * m = opt_ctx->grad_m[i];
  422. struct ggml_tensor * v = opt_ctx->grad_v[i];
  423. struct ggml_tensor * opt_step = ggml_opt_step_adamw(opt_ctx->ctx_compute, node, grad, m, v, opt_ctx->adamw_params);
  424. ggml_set_name(m, (std::string("AdamW m for ") + std::string(node->name)).c_str());
  425. ggml_set_name(v, (std::string("AdamW v for ") + std::string(node->name)).c_str());
  426. ggml_set_name(opt_step, (std::string("AdamW step for ") + std::string(node->name)).c_str());
  427. ggml_build_forward_expand(opt_ctx->gb_opt, opt_step);
  428. }
  429. }
  430. if (!opt_ctx->buf_static) {
  431. opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(
  432. opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0));
  433. ggml_graph_reset(opt_ctx->gb_opt);
  434. }
  435. opt_ctx->buf_cpu = ggml_backend_alloc_ctx_tensors_from_buft(opt_ctx->ctx_cpu, ggml_backend_cpu_buffer_type());
  436. }
  437. ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) {
  438. ggml_opt_context_t result = new struct ggml_opt_context;
  439. result->backend_sched = params.backend_sched;
  440. result->ctx_compute = params.ctx_compute;
  441. result->loss_type = params.loss_type;
  442. result->build_type = params.build_type;
  443. result->build_type_alloc = params.build_type;
  444. result->inputs = params.inputs;
  445. result->outputs = params.outputs;
  446. result->opt_period = params.opt_period;
  447. result->get_opt_pars = params.get_opt_pars;
  448. result->get_opt_pars_ud = params.get_opt_pars_ud;
  449. GGML_ASSERT(result->opt_period >= 1);
  450. result->static_graphs = result->ctx_compute;
  451. if (!result->static_graphs) {
  452. GGML_ASSERT(!result->inputs);
  453. GGML_ASSERT(!result->outputs);
  454. return result;
  455. }
  456. GGML_ASSERT(result->inputs);
  457. GGML_ASSERT(result->outputs);
  458. result->gf = ggml_new_graph_custom(result->ctx_compute, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true); // Forward pass.
  459. ggml_build_forward_expand(result->gf, result->outputs);
  460. ggml_opt_build(result);
  461. return result;
  462. }
  463. void ggml_opt_free(ggml_opt_context_t opt_ctx) {
  464. if (opt_ctx == nullptr) {
  465. return;
  466. }
  467. ggml_backend_buffer_free(opt_ctx->buf_static);
  468. ggml_backend_buffer_free(opt_ctx->buf_cpu);
  469. ggml_free(opt_ctx->ctx_static);
  470. ggml_free(opt_ctx->ctx_cpu);
  471. delete opt_ctx;
  472. }
  473. void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer) {
  474. if (optimizer) {
  475. ggml_graph_reset(opt_ctx->gb_opt);
  476. opt_ctx->iter = 1;
  477. } else {
  478. ggml_graph_reset(opt_ctx->gb_grad);
  479. }
  480. }
  481. struct ggml_tensor * ggml_opt_inputs(ggml_opt_context_t opt_ctx) {
  482. return opt_ctx->inputs;
  483. }
  484. struct ggml_tensor * ggml_opt_outputs(ggml_opt_context_t opt_ctx) {
  485. return opt_ctx->outputs;
  486. }
  487. struct ggml_tensor * ggml_opt_labels(ggml_opt_context_t opt_ctx) {
  488. return opt_ctx->labels;
  489. }
  490. struct ggml_tensor * ggml_opt_loss(ggml_opt_context_t opt_ctx) {
  491. return opt_ctx->loss;
  492. }
  493. struct ggml_tensor * ggml_opt_pred(ggml_opt_context_t opt_ctx) {
  494. return opt_ctx->pred;
  495. }
  496. struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx) {
  497. return opt_ctx->ncorrect;
  498. }
  499. struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node) {
  500. return ggml_graph_get_grad_acc(opt_ctx->gb_opt, node);
  501. }
  502. // ====== Optimization Result ======
  503. ggml_opt_result_t ggml_opt_result_init() {
  504. return new ggml_opt_result;
  505. }
  506. void ggml_opt_result_free(ggml_opt_result_t result) {
  507. delete result;
  508. }
  509. void ggml_opt_result_reset(ggml_opt_result_t result) {
  510. result->ndata = 0;
  511. result->loss.clear();
  512. result->pred.clear();
  513. result->ncorrect = 0;
  514. }
  515. void ggml_opt_result_ndata(ggml_opt_result_t result, int64_t * ndata) {
  516. *ndata = result->ndata;
  517. }
  518. void ggml_opt_result_loss(ggml_opt_result_t result, double * loss, double * unc) {
  519. const int64_t nbatches = result->loss.size(); // Number of physical batches.
  520. if (nbatches == 0) {
  521. *loss = 0.0;
  522. *unc = NAN;
  523. return;
  524. }
  525. double sum = 0.0;
  526. double sum_squared = 0.0;
  527. for (const float & loss : result->loss) {
  528. // If the loss is per datapoint it was scaled by 1.0f/opt_period for each physical batch.
  529. const float loss_scaled = result->loss_per_datapoint ? loss*result->opt_period : loss;
  530. sum += loss_scaled;
  531. sum_squared += loss_scaled*loss_scaled;
  532. }
  533. const double mean = sum/nbatches;
  534. *loss = result->loss_per_datapoint ? mean : sum;
  535. if (!unc) {
  536. return;
  537. }
  538. if (nbatches < 2) {
  539. *unc = NAN;
  540. return;
  541. }
  542. const double var_sum = sum_squared/nbatches - mean*mean; // variance without Bessel's correction, i.e. nbatches/(nbatches-1)
  543. *unc = result->loss_per_datapoint ? sqrt(var_sum / (nbatches - 1)) : sqrt(var_sum * nbatches/(nbatches - 1));
  544. }
  545. void ggml_opt_result_pred(ggml_opt_result_t result, int32_t * pred) {
  546. for (size_t i = 0; i < result->pred.size(); ++i) {
  547. pred[i] = result->pred[i];
  548. }
  549. }
  550. void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc) {
  551. *accuracy = result->ncorrect >= 0 ? double(result->ncorrect) / double(result->ndata) : NAN;
  552. if (!unc) {
  553. return;
  554. }
  555. *unc = result->ncorrect >= 0 && result->ndata >= 2 ?
  556. sqrt((*accuracy) * (1.0 - (*accuracy)) / double(result->ndata - 1)) : NAN;
  557. }
  558. // ====== Computation ======
  559. void ggml_opt_prepare_alloc(
  560. ggml_opt_context_t opt_ctx,
  561. struct ggml_context * ctx_compute,
  562. struct ggml_cgraph * gf,
  563. struct ggml_tensor * inputs,
  564. struct ggml_tensor * outputs) {
  565. GGML_ASSERT(!opt_ctx->static_graphs);
  566. opt_ctx->ctx_compute = ctx_compute;
  567. opt_ctx->gf = gf;
  568. opt_ctx->inputs = inputs;
  569. opt_ctx->outputs = outputs;
  570. }
  571. void ggml_opt_alloc(ggml_opt_context_t opt_ctx, bool backward) {
  572. GGML_ASSERT(!opt_ctx->eval_ready);
  573. if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_OPT && opt_ctx->opt_period > 1 && opt_ctx->opt_i == 0) {
  574. ggml_graph_reset(opt_ctx->gb_grad);
  575. }
  576. if (backward) {
  577. const int32_t opt_i_next = (opt_ctx->opt_i + 1) % opt_ctx->opt_period;
  578. opt_ctx->build_type = opt_i_next == 0 ? GGML_OPT_BUILD_TYPE_OPT : GGML_OPT_BUILD_TYPE_GRAD;
  579. } else {
  580. opt_ctx->build_type = GGML_OPT_BUILD_TYPE_FORWARD;
  581. }
  582. if (!opt_ctx->static_graphs) {
  583. ggml_opt_build(opt_ctx);
  584. }
  585. struct ggml_cgraph * graph = nullptr;
  586. switch (opt_ctx->build_type) {
  587. case GGML_OPT_BUILD_TYPE_FORWARD: {
  588. graph = opt_ctx->gf;
  589. } break;
  590. case GGML_OPT_BUILD_TYPE_GRAD: {
  591. graph = opt_ctx->gb_grad;
  592. } break;
  593. case GGML_OPT_BUILD_TYPE_OPT: {
  594. graph = opt_ctx->gb_opt;
  595. } break;
  596. }
  597. GGML_ASSERT(graph);
  598. if (opt_ctx->allocated_graph == graph) {
  599. opt_ctx->eval_ready = true;
  600. return;
  601. }
  602. ggml_backend_sched_reset(opt_ctx->backend_sched); // clear allocation of previous graph
  603. if (opt_ctx->static_graphs) {
  604. ggml_init_params params = {
  605. /*.mem_size =*/ graph->size*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph->size, graph->grads),
  606. /*.mem_buffer =*/ nullptr,
  607. /*.no_alloc =*/ true,
  608. };
  609. ggml_free(opt_ctx->ctx_copy);
  610. opt_ctx->ctx_copy = ggml_init(params);
  611. opt_ctx->allocated_graph_copy = dup_graph(opt_ctx->ctx_copy, graph);
  612. } else {
  613. opt_ctx->allocated_graph_copy = graph;
  614. }
  615. ggml_backend_sched_alloc_graph(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
  616. opt_ctx->allocated_graph = graph;
  617. opt_ctx->eval_ready = true;
  618. }
  619. void ggml_opt_eval(ggml_opt_context_t opt_ctx, ggml_opt_result_t result) {
  620. GGML_ASSERT(opt_ctx->eval_ready);
  621. if (opt_ctx->allocated_graph == opt_ctx->gb_opt) {
  622. struct ggml_opt_optimizer_params opt_pars = opt_ctx->get_opt_pars(opt_ctx->get_opt_pars_ud);
  623. GGML_ASSERT(opt_pars.adamw.alpha > 0.0f);
  624. GGML_ASSERT(opt_pars.adamw.beta1 >= 0.0f);
  625. GGML_ASSERT(opt_pars.adamw.beta1 <= 1.0f);
  626. GGML_ASSERT(opt_pars.adamw.beta2 >= 0.0f);
  627. GGML_ASSERT(opt_pars.adamw.beta2 <= 1.0f);
  628. GGML_ASSERT(opt_pars.adamw.eps >= 0.0f);
  629. GGML_ASSERT(opt_pars.adamw.wd >= 0.0f);
  630. GGML_ASSERT(opt_pars.adamw.wd <= 1.0f);
  631. // beta1, beta2 after applying warmup
  632. const float beta1h = 1.0f/(1.0f - powf(opt_pars.adamw.beta1, opt_ctx->iter));
  633. const float beta2h = 1.0f/(1.0f - powf(opt_pars.adamw.beta2, opt_ctx->iter));
  634. float * adamw_par_data = ggml_get_data_f32(opt_ctx->adamw_params);
  635. adamw_par_data[0] = opt_pars.adamw.alpha;
  636. adamw_par_data[1] = opt_pars.adamw.beta1;
  637. adamw_par_data[2] = opt_pars.adamw.beta2;
  638. adamw_par_data[3] = opt_pars.adamw.eps;
  639. adamw_par_data[4] = opt_pars.adamw.wd;
  640. adamw_par_data[5] = beta1h;
  641. adamw_par_data[6] = beta2h;
  642. }
  643. ggml_backend_sched_graph_compute(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
  644. opt_ctx->iter += opt_ctx->allocated_graph == opt_ctx->gb_opt;
  645. opt_ctx->opt_i = (opt_ctx->opt_i + 1) % opt_ctx->opt_period;
  646. if (!opt_ctx->static_graphs) {
  647. opt_ctx->gf = nullptr;
  648. opt_ctx->gb_grad = nullptr;
  649. opt_ctx->gb_opt = nullptr;
  650. opt_ctx->allocated_graph = nullptr;
  651. opt_ctx->allocated_graph_copy = nullptr;
  652. }
  653. opt_ctx->eval_ready = false;
  654. if (!result) {
  655. return;
  656. }
  657. if (result->ndata == 0) {
  658. result->loss_per_datapoint = opt_ctx->loss_per_datapoint;
  659. result->opt_period = opt_ctx->opt_period;
  660. } else {
  661. GGML_ASSERT(result->loss_per_datapoint == opt_ctx->loss_per_datapoint);
  662. GGML_ASSERT(result->opt_period == opt_ctx->opt_period);
  663. }
  664. const int64_t ndata = opt_ctx->outputs->ne[1];
  665. GGML_ASSERT(result->ndata == ndata*int64_t(result->loss.size()) && "varying batch size not supported");
  666. result->ndata += ndata;
  667. GGML_ASSERT(ggml_is_scalar(opt_ctx->loss));
  668. GGML_ASSERT(opt_ctx->loss->type == GGML_TYPE_F32);
  669. float loss;
  670. ggml_backend_tensor_get(opt_ctx->loss, &loss, 0, ggml_nbytes(opt_ctx->loss));
  671. result->loss.push_back(loss);
  672. if (opt_ctx->pred) {
  673. GGML_ASSERT(opt_ctx->pred->type == GGML_TYPE_I32);
  674. std::vector<int32_t> pred(ndata);
  675. ggml_backend_tensor_get(opt_ctx->pred, pred.data(), 0, ggml_nbytes(opt_ctx->pred));
  676. result->pred.insert(result->pred.end(), pred.begin(), pred.end());
  677. }
  678. if (!opt_ctx->ncorrect || result->ncorrect < 0) {
  679. result->ncorrect = -1;
  680. return;
  681. }
  682. GGML_ASSERT(ggml_is_scalar(opt_ctx->ncorrect));
  683. GGML_ASSERT(opt_ctx->ncorrect->type == GGML_TYPE_I64);
  684. int64_t ncorrect;
  685. ggml_backend_tensor_get(opt_ctx->ncorrect, &ncorrect, 0, ggml_nbytes(opt_ctx->ncorrect));
  686. result->ncorrect += ncorrect;
  687. }
  688. // ====== High-Level Functions ======
  689. void ggml_opt_epoch(
  690. ggml_opt_context_t opt_ctx,
  691. ggml_opt_dataset_t dataset,
  692. ggml_opt_result_t result_train,
  693. ggml_opt_result_t result_eval,
  694. int64_t idata_split,
  695. ggml_opt_epoch_callback callback_train,
  696. ggml_opt_epoch_callback callback_eval) {
  697. struct ggml_tensor * inputs = ggml_opt_inputs(opt_ctx);
  698. struct ggml_tensor * labels = ggml_opt_labels(opt_ctx);
  699. struct ggml_tensor * data = ggml_opt_dataset_data(dataset);
  700. GGML_ASSERT(data->ne[0] == inputs->ne[0]);
  701. const int64_t ndata = data->ne[1];
  702. const int64_t ndata_batch = inputs->ne[1];
  703. GGML_ASSERT(data->ne[1] % inputs->ne[1] == 0);
  704. const int64_t nbatches = ndata/ndata_batch;
  705. idata_split = idata_split < 0 ? ndata : idata_split;
  706. GGML_ASSERT(idata_split % ndata_batch == 0);
  707. const int64_t ibatch_split = idata_split / ndata_batch;
  708. int64_t ibatch = 0;
  709. int64_t t_loop_start = ggml_time_us();
  710. for (; ibatch < ibatch_split; ++ibatch) {
  711. ggml_opt_alloc(opt_ctx, /*backward =*/ true);
  712. ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
  713. ggml_opt_eval(opt_ctx, result_train);
  714. if (callback_train) {
  715. callback_train(true, opt_ctx, dataset, result_train, ibatch+1, ibatch_split, t_loop_start);
  716. }
  717. }
  718. t_loop_start = ggml_time_us();
  719. for (; ibatch < nbatches; ++ibatch) {
  720. ggml_opt_alloc(opt_ctx, /*backward =*/ false);
  721. ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
  722. ggml_opt_eval(opt_ctx, result_eval);
  723. if (callback_eval) {
  724. callback_eval(false, opt_ctx, dataset, result_eval, ibatch+1-ibatch_split, nbatches-ibatch_split, t_loop_start);
  725. }
  726. }
  727. }
  728. void ggml_opt_epoch_callback_progress_bar(
  729. bool train,
  730. ggml_opt_context_t opt_ctx,
  731. ggml_opt_dataset_t dataset,
  732. ggml_opt_result_t result,
  733. int64_t ibatch,
  734. int64_t ibatch_max,
  735. int64_t t_start_us) {
  736. fprintf(stderr, "%s[", train ? "train: " : "val: ");
  737. // The progress bar consists of partially filled blocks, unicode has 8 separate fill levels.
  738. constexpr int64_t bar_length = 8;
  739. const int64_t ibatch8 = 8 * ibatch;
  740. for (int64_t j = 0; j < bar_length; ++j) {
  741. if (ibatch_max * (8*j + 8) / bar_length < ibatch8) {
  742. fprintf(stderr, "\u2588"); // full block
  743. } else if (ibatch_max * (8*j + 7) / bar_length < ibatch8) {
  744. fprintf(stderr, "\u2589"); // 7/8 filled
  745. } else if (ibatch_max * (8*j + 6) / bar_length < ibatch8) {
  746. fprintf(stderr, "\u258A"); // 6/8 filled
  747. } else if (ibatch_max * (8*j + 5) / bar_length < ibatch8) {
  748. fprintf(stderr, "\u258B"); // 5/8 filled
  749. } else if (ibatch_max * (8*j + 4) / bar_length < ibatch8) {
  750. fprintf(stderr, "\u258C"); // 4/8 filled
  751. } else if (ibatch_max * (8*j + 3) / bar_length < ibatch8) {
  752. fprintf(stderr, "\u258D"); // 3/8 filled
  753. } else if (ibatch_max * (8*j + 2) / bar_length < ibatch8) {
  754. fprintf(stderr, "\u258E"); // 2/8 filled
  755. } else if (ibatch_max * (8*j + 1) / bar_length < ibatch8) {
  756. fprintf(stderr, "\u258F"); // 1/8 filled
  757. } else {
  758. fprintf(stderr, " ");
  759. }
  760. }
  761. const int64_t batch_size = ggml_opt_inputs(opt_ctx)->ne[1];
  762. const int64_t idata = ibatch*batch_size;
  763. const int64_t idata_max = ibatch_max*batch_size;
  764. double loss;
  765. double loss_unc;
  766. ggml_opt_result_loss(result, &loss, &loss_unc);
  767. double accuracy;
  768. double accuracy_unc;
  769. ggml_opt_result_accuracy(result, &accuracy, &accuracy_unc);
  770. const int64_t t_ibatch_us = ggml_time_us() - t_start_us;
  771. int64_t t_ibatch_s = t_ibatch_us / 1000000;
  772. const int64_t t_ibatch_h = t_ibatch_s / 3600;
  773. t_ibatch_s -= t_ibatch_h * 3600;
  774. const int64_t t_ibatch_m = t_ibatch_s / 60;
  775. t_ibatch_s -= t_ibatch_m * 60;
  776. const int64_t t_eta_us = t_ibatch_us * (ibatch_max - ibatch)/ibatch;
  777. int64_t t_eta_s = t_eta_us / 1000000;
  778. const int64_t t_eta_h = t_eta_s / 3600;
  779. t_eta_s -= t_eta_h * 3600;
  780. const int64_t t_eta_m = t_eta_s / 60;
  781. t_eta_s -= t_eta_m * 60;
  782. fprintf(stderr, "] data=%07" PRId64 "/%07" PRId64 " loss=%.5lf±%.5lf acc=%.2lf±%.2lf%% "
  783. "t=%02" PRId64 ":%02" PRId64 ":%02" PRId64 " ETA=%02" PRId64 ":%02" PRId64 ":%02" PRId64 " \r",
  784. idata, idata_max, loss, loss_unc, 100.0*accuracy, 100.0*accuracy_unc,
  785. t_ibatch_h, t_ibatch_m, t_ibatch_s, t_eta_h, t_eta_m, t_eta_s);
  786. if (ibatch == ibatch_max) {
  787. fprintf(stderr, "\n");
  788. }
  789. fflush(stderr);
  790. GGML_UNUSED(dataset);
  791. }
  792. void ggml_opt_fit(
  793. ggml_backend_sched_t backend_sched,
  794. ggml_context * ctx_compute,
  795. ggml_tensor * inputs,
  796. ggml_tensor * outputs,
  797. ggml_opt_dataset_t dataset,
  798. enum ggml_opt_loss_type loss_type,
  799. ggml_opt_get_optimizer_params get_opt_pars,
  800. int64_t nepoch,
  801. int64_t nbatch_logical,
  802. float val_split,
  803. bool silent) {
  804. ggml_time_init();
  805. const int64_t t_start_us = ggml_time_us();
  806. const int64_t ndata = ggml_opt_dataset_data(dataset)->ne[1];
  807. const int64_t nbatch_physical = inputs->ne[1];
  808. GGML_ASSERT(ndata % nbatch_logical == 0);
  809. GGML_ASSERT(nbatch_logical % nbatch_physical == 0);
  810. const int64_t opt_period = nbatch_logical / nbatch_physical;
  811. const int64_t nbatches_logical = ndata / nbatch_logical;
  812. GGML_ASSERT(val_split >= 0.0f);
  813. GGML_ASSERT(val_split < 1.0f);
  814. const int64_t ibatch_split = int64_t(((1.0f - val_split) * nbatches_logical)) * opt_period; // train <-> val split index (physical)
  815. const int64_t idata_split = ibatch_split * nbatch_physical;
  816. int64_t epoch = 1;
  817. ggml_opt_params params = ggml_opt_default_params(backend_sched, loss_type);
  818. params.ctx_compute = ctx_compute;
  819. params.inputs = inputs;
  820. params.outputs = outputs;
  821. params.opt_period = opt_period;
  822. params.get_opt_pars = get_opt_pars;
  823. params.get_opt_pars_ud = &epoch;
  824. ggml_opt_context_t opt_ctx = ggml_opt_init(params);
  825. // Shuffling the data is generally useful but there is only a point if not all data is used in a single batch.
  826. if (nbatch_logical < ndata) {
  827. ggml_opt_dataset_shuffle(opt_ctx, dataset, -1); // Shuffle all data (train + validation).
  828. }
  829. ggml_opt_result_t result_train = ggml_opt_result_init();
  830. ggml_opt_result_t result_val = ggml_opt_result_init();
  831. ggml_opt_epoch_callback epoch_callback = silent ? nullptr : ggml_opt_epoch_callback_progress_bar;
  832. for (; epoch <= nepoch; ++epoch) {
  833. if (nbatch_logical < idata_split) {
  834. ggml_opt_dataset_shuffle(opt_ctx, dataset, idata_split);
  835. }
  836. ggml_opt_result_reset(result_train);
  837. ggml_opt_result_reset(result_val);
  838. if (!silent) {
  839. fprintf(stderr, "%s: epoch %04" PRId64 "/%04" PRId64 ":\n", __func__, epoch, nepoch);
  840. }
  841. ggml_opt_epoch(opt_ctx, dataset, result_train, result_val, idata_split, epoch_callback, epoch_callback);
  842. if (!silent) {
  843. fprintf(stderr, "\n");
  844. }
  845. }
  846. if (!silent) {
  847. int64_t t_total_s = (ggml_time_us() - t_start_us) / 1000000;
  848. const int64_t t_total_h = t_total_s / 3600;
  849. t_total_s -= t_total_h * 3600;
  850. const int64_t t_total_m = t_total_s / 60;
  851. t_total_s -= t_total_m * 60;
  852. fprintf(stderr, "%s: training took %02" PRId64 ":%02" PRId64 ":%02" PRId64 "\n", __func__, t_total_h, t_total_m, t_total_s);
  853. }
  854. ggml_opt_free(opt_ctx);
  855. ggml_opt_result_free(result_train);
  856. ggml_opt_result_free(result_val);
  857. }