ggml-opt.cpp 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093
  1. #include "ggml-opt.h"
  2. #include "ggml.h"
  3. #include "ggml-alloc.h"
  4. #include "ggml-backend.h"
  5. #include "ggml-impl.h"
  6. #include <algorithm>
  7. #include <cmath>
  8. #include <cstdint>
  9. #include <cinttypes>
  10. #include <map>
  11. #include <random>
  12. #include <vector>
  13. struct ggml_opt_dataset {
  14. struct ggml_context * ctx = nullptr;
  15. ggml_backend_buffer_t buf = nullptr;
  16. struct ggml_tensor * data = nullptr;
  17. struct ggml_tensor * labels = nullptr;
  18. int64_t ndata = -1;
  19. int64_t ndata_shard = -1;
  20. size_t nbs_data = -1;
  21. size_t nbs_labels = -1;
  22. std::vector<int64_t> permutation;
  23. };
  24. struct ggml_opt_context {
  25. ggml_backend_sched_t backend_sched = nullptr;
  26. ggml_cgraph * allocated_graph = nullptr;
  27. ggml_cgraph * allocated_graph_copy = nullptr;
  28. struct ggml_context * ctx_static = nullptr;
  29. struct ggml_context * ctx_cpu = nullptr;
  30. struct ggml_context * ctx_compute = nullptr;
  31. struct ggml_context * ctx_copy = nullptr;
  32. ggml_backend_buffer_t buf_static = nullptr;
  33. ggml_backend_buffer_t buf_cpu = nullptr;
  34. std::mt19937 rng;
  35. enum ggml_opt_loss_type loss_type;
  36. enum ggml_opt_build_type build_type;
  37. enum ggml_opt_build_type build_type_alloc;
  38. struct ggml_tensor * inputs = nullptr;
  39. struct ggml_tensor * outputs = nullptr;
  40. struct ggml_tensor * labels = nullptr;
  41. struct ggml_tensor * loss = nullptr;
  42. struct ggml_tensor * pred = nullptr;
  43. struct ggml_tensor * ncorrect = nullptr;
  44. struct ggml_cgraph * gf = nullptr;
  45. struct ggml_cgraph * gb_grad = nullptr;
  46. struct ggml_cgraph * gb_opt = nullptr;
  47. bool static_graphs = false;
  48. bool eval_ready = false;
  49. std::vector<struct ggml_tensor *> grad_accs;
  50. std::vector<struct ggml_tensor *> grad_m;
  51. std::vector<struct ggml_tensor *> grad_v;
  52. int64_t iter = 1;
  53. int32_t opt_period = 1;
  54. int32_t opt_i = 0;
  55. bool loss_per_datapoint = false;
  56. ggml_opt_get_optimizer_params get_opt_pars = nullptr;
  57. void * get_opt_pars_ud = nullptr;
  58. struct ggml_tensor * opt_step_params = nullptr; // Stores output of get_opt_pars.
  59. enum ggml_opt_optimizer_type optimizer = GGML_OPT_OPTIMIZER_TYPE_ADAMW;
  60. };
  61. struct ggml_opt_result {
  62. int64_t ndata = 0;
  63. std::vector<float> loss;
  64. std::vector<int32_t> pred;
  65. int64_t ncorrect = 0;
  66. int64_t opt_period = -1;
  67. bool loss_per_datapoint = false;
  68. };
  69. // ====== Dataset ======
  70. ggml_opt_dataset_t ggml_opt_dataset_init(
  71. enum ggml_type type_data,
  72. enum ggml_type type_label,
  73. int64_t ne_datapoint,
  74. int64_t ne_label,
  75. int64_t ndata,
  76. int64_t ndata_shard) {
  77. GGML_ASSERT(ne_datapoint > 0);
  78. GGML_ASSERT(ne_label >= 0);
  79. GGML_ASSERT(ndata > 0);
  80. GGML_ASSERT(ndata_shard > 0);
  81. ggml_opt_dataset_t result = new ggml_opt_dataset;
  82. result->ndata = ndata;
  83. result->ndata_shard = ndata_shard;
  84. {
  85. struct ggml_init_params params = {
  86. /*.mem_size =*/ 2*ggml_tensor_overhead(),
  87. /*.mem_buffer =*/ nullptr,
  88. /*.no_alloc =*/ true,
  89. };
  90. result->ctx = ggml_init(params);
  91. }
  92. result->data = ggml_new_tensor_2d(result->ctx, type_data, ne_datapoint, ndata);
  93. result->nbs_data = ggml_nbytes(result->data) * ndata_shard/ndata;
  94. if (ne_label > 0) {
  95. result->labels = ggml_new_tensor_2d(result->ctx, type_label, ne_label, ndata);
  96. result->nbs_labels = ggml_nbytes(result->labels) * ndata_shard/ndata;
  97. } else {
  98. result->labels = nullptr;
  99. result->nbs_labels = 0;
  100. }
  101. result->buf = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx, ggml_backend_cpu_buffer_type());
  102. const int64_t nshards = ndata/ndata_shard;
  103. result->permutation.resize(nshards);
  104. for (int64_t i = 0; i < nshards; ++i) {
  105. result->permutation[i] = i;
  106. }
  107. return result;
  108. }
  109. void ggml_opt_dataset_free(ggml_opt_dataset_t dataset) {
  110. ggml_backend_buffer_free(dataset->buf);
  111. ggml_free(dataset->ctx);
  112. delete dataset;
  113. }
  114. int64_t ggml_opt_dataset_ndata(ggml_opt_dataset_t dataset) {
  115. return dataset->ndata;
  116. }
  117. struct ggml_tensor * ggml_opt_dataset_data(ggml_opt_dataset_t dataset) {
  118. return dataset->data;
  119. }
  120. struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset) {
  121. return dataset->labels;
  122. }
  123. void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata) {
  124. GGML_ASSERT(idata <= dataset->ndata);
  125. if (idata < 0) {
  126. std::shuffle(dataset->permutation.begin(), dataset->permutation.end(), opt_ctx->rng);
  127. return;
  128. }
  129. GGML_ASSERT(idata % dataset->ndata_shard == 0);
  130. const int64_t ishard_max = idata / dataset->ndata_shard;
  131. std::shuffle(dataset->permutation.begin(), dataset->permutation.begin() + ishard_max, opt_ctx->rng);
  132. }
  133. void ggml_opt_dataset_get_batch(ggml_opt_dataset_t dataset, struct ggml_tensor * data_batch, struct ggml_tensor * labels_batch, int64_t ibatch) {
  134. GGML_ASSERT( data_batch && ggml_is_contiguous(data_batch));
  135. GGML_ASSERT(!labels_batch || ggml_is_contiguous(labels_batch));
  136. GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr));
  137. GGML_ASSERT( data_batch->type == dataset->data->type);
  138. GGML_ASSERT(!labels_batch || labels_batch->type == dataset->labels->type);
  139. const size_t nb_data_batch = ggml_nbytes(data_batch);
  140. GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0);
  141. const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data;
  142. if (labels_batch) {
  143. const size_t nb_labels_batch = ggml_nbytes(labels_batch);
  144. GGML_ASSERT(nb_labels_batch == shards_per_batch*dataset->nbs_labels);
  145. }
  146. GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size()));
  147. for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) {
  148. const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch];
  149. const char * ptr_data = (const char *) dataset->data->data + ishard*dataset->nbs_data;
  150. ggml_backend_tensor_set(data_batch, ptr_data, ishard_batch*dataset->nbs_data, dataset->nbs_data);
  151. if (!labels_batch) {
  152. continue;
  153. }
  154. const char * ptr_labels = (const char *) dataset->labels->data + ishard*dataset->nbs_labels;
  155. ggml_backend_tensor_set(labels_batch, ptr_labels, ishard_batch*dataset->nbs_labels, dataset->nbs_labels);
  156. }
  157. }
  158. void ggml_opt_dataset_get_batch_host(ggml_opt_dataset_t dataset, void * data_batch, size_t nb_data_batch, void * labels_batch, int64_t ibatch) {
  159. GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr));
  160. GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0);
  161. const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data;
  162. GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size()));
  163. for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) {
  164. const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch];
  165. const char * ptr_data = (const char *) dataset->data->data + ishard *dataset->nbs_data;
  166. char * ptr_data_batch = (char *) data_batch + ishard_batch*dataset->nbs_data;
  167. memcpy(ptr_data_batch, ptr_data, dataset->nbs_data);
  168. if (!labels_batch) {
  169. continue;
  170. }
  171. const char * ptr_labels = (const char *) dataset->labels->data + ishard *dataset->nbs_labels;
  172. char * ptr_labels_batch = (char *) labels_batch + ishard_batch*dataset->nbs_labels;
  173. memcpy(ptr_labels_batch, ptr_labels, dataset->nbs_labels);
  174. }
  175. }
  176. // ====== Model / Context ======
  177. struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata) {
  178. GGML_UNUSED(userdata);
  179. ggml_opt_optimizer_params result;
  180. result.adamw.alpha = 0.001f;
  181. result.adamw.beta1 = 0.9f;
  182. result.adamw.beta2 = 0.999f;
  183. result.adamw.eps = 1e-8f;
  184. result.adamw.wd = 0.0f;
  185. result.sgd.alpha = 1e-3f;
  186. result.sgd.wd = 0.0f;
  187. return result;
  188. }
  189. struct ggml_opt_optimizer_params ggml_opt_get_constant_optimizer_params(void * userdata) {
  190. return *((struct ggml_opt_optimizer_params *) userdata);
  191. }
  192. struct ggml_opt_params ggml_opt_default_params(
  193. ggml_backend_sched_t backend_sched,
  194. enum ggml_opt_loss_type loss_type) {
  195. return {
  196. /*backend_sched =*/ backend_sched,
  197. /*ctx_compute =*/ nullptr,
  198. /*inputs =*/ nullptr,
  199. /*logits =*/ nullptr,
  200. /*loss_type =*/ loss_type,
  201. /*build_type =*/ GGML_OPT_BUILD_TYPE_OPT,
  202. /*opt_period =*/ 1,
  203. /*get_opt_pars =*/ ggml_opt_get_default_optimizer_params,
  204. /*get_opt_pars_ud =*/ nullptr,
  205. /*optimizer =*/ GGML_OPT_OPTIMIZER_TYPE_ADAMW,
  206. };
  207. }
  208. static ggml_tensor * map_tensor(std::map<ggml_tensor *, ggml_tensor *> & tensor_map, ggml_context * ctx, ggml_tensor * tensor) {
  209. if (!tensor) {
  210. return nullptr;
  211. }
  212. if (tensor_map.find(tensor) != tensor_map.end()) {
  213. return tensor_map[tensor];
  214. }
  215. ggml_tensor * new_tensor = ggml_dup_tensor(ctx, tensor);
  216. tensor_map[tensor] = new_tensor;
  217. new_tensor->op = tensor->op;
  218. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  219. new_tensor->nb[i] = tensor->nb[i];
  220. }
  221. new_tensor->flags = tensor->flags;
  222. memcpy(new_tensor->op_params, tensor->op_params, sizeof(tensor->op_params));
  223. strcpy(new_tensor->name, tensor->name);
  224. new_tensor->data = tensor->data;
  225. new_tensor->buffer = tensor->buffer;
  226. new_tensor->extra = tensor->extra;
  227. new_tensor->view_offs = tensor->view_offs;
  228. new_tensor->view_src = map_tensor(tensor_map, ctx, tensor->view_src);
  229. for (int i = 0; i < GGML_MAX_SRC; i++) {
  230. new_tensor->src[i] = map_tensor(tensor_map, ctx, tensor->src[i]);
  231. }
  232. return new_tensor;
  233. }
  234. static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * src) {
  235. std::map<ggml_tensor *, ggml_tensor *> tensor_map;
  236. ggml_cgraph * dst = ggml_new_graph_custom(ctx, src->size, /*grads =*/ true);
  237. for (int i = 0; i < src->n_leafs; i++) {
  238. ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->leafs[i]));
  239. }
  240. GGML_ASSERT(dst->n_leafs == src->n_leafs);
  241. for (int i = 0; i < src->n_nodes; i++) {
  242. ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->nodes[i]));
  243. }
  244. GGML_ASSERT(dst->n_nodes == src->n_nodes);
  245. for (int i = 0; i < src->n_nodes; ++i) {
  246. const size_t igrad_src = ggml_hash_find(&src->visited_hash_set, src->nodes[i]);
  247. const size_t igrad_dst = ggml_hash_find(&dst->visited_hash_set, dst->nodes[i]);
  248. GGML_ASSERT(igrad_src != GGML_HASHSET_FULL);
  249. GGML_ASSERT(ggml_bitset_get(src->visited_hash_set.used, igrad_src));
  250. GGML_ASSERT(igrad_dst != GGML_HASHSET_FULL);
  251. GGML_ASSERT(ggml_bitset_get(dst->visited_hash_set.used, igrad_dst));
  252. dst->grads[igrad_dst] = src->grads[igrad_src];
  253. dst->grad_accs[igrad_dst] = src->grad_accs[igrad_src];
  254. }
  255. return dst;
  256. }
  257. static void ggml_opt_build(ggml_opt_context_t opt_ctx) {
  258. GGML_ASSERT(opt_ctx->ctx_compute && "no compute context set, either use static graphs or set one with ggml_opt_prepare_alloc");
  259. GGML_ASSERT((!opt_ctx->static_graphs || opt_ctx->inputs->data) && "when using static graphs the inputs must be allocated statically");
  260. const enum ggml_opt_optimizer_type optimizer = opt_ctx->optimizer;
  261. const bool accumulate = opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_GRAD &&
  262. !(opt_ctx->static_graphs && opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT && opt_ctx->opt_period == 1);
  263. const bool need_momenta = opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT &&
  264. opt_ctx->optimizer == GGML_OPT_OPTIMIZER_TYPE_ADAMW;
  265. ggml_set_input(opt_ctx->inputs);
  266. ggml_set_output(opt_ctx->outputs);
  267. int n_param = 0;
  268. for (int i = 0; i < opt_ctx->gf->n_nodes; ++i) {
  269. const struct ggml_tensor * node = opt_ctx->gf->nodes[i];
  270. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  271. n_param++;
  272. }
  273. GGML_ASSERT(!(node->flags & GGML_TENSOR_FLAG_LOSS) && "support for extra loss terms not implemented");
  274. }
  275. if (!opt_ctx->ctx_static) {
  276. // The static context is used for:
  277. // - gradients (1 per loss, 1 tensor per param if using gradient accumulation)
  278. // - optimizer momenta (2 tensors per param)
  279. // - labels (if using static graphs)
  280. // - loss (if using static graphs, up to 5 tensors)
  281. // - pred (if using static graphs)
  282. // - ncorrect (if using static graphs, 2 tensors).
  283. constexpr size_t n_loss = 1;
  284. const size_t tensors_per_param = (accumulate ? 1 : 0) + (need_momenta ? 2 : 0);
  285. const size_t tensors_const = opt_ctx->static_graphs ? 9 : 0;
  286. const size_t size_meta = (n_loss + tensors_per_param*n_param + tensors_const) * ggml_tensor_overhead();
  287. struct ggml_init_params params = {
  288. /*.mem_size =*/ size_meta,
  289. /*.mem_buffer =*/ nullptr,
  290. /*.no_alloc =*/ true,
  291. };
  292. opt_ctx->ctx_static = ggml_init(params);
  293. }
  294. GGML_ASSERT(opt_ctx->build_type <= opt_ctx->build_type_alloc);
  295. {
  296. // The cpu context is allocated statically if using static graphs, dynamically otherwise.
  297. // It is used for:
  298. // - optimizer parameters (1 shared for all optimizer invocations)
  299. const size_t size_meta = 1 * ggml_tensor_overhead();
  300. struct ggml_init_params params = {
  301. /*.mem_size =*/ size_meta,
  302. /*.mem_buffer =*/ nullptr,
  303. /*.no_alloc =*/ true,
  304. };
  305. ggml_free(opt_ctx->ctx_cpu);
  306. opt_ctx->ctx_cpu = ggml_init(params);
  307. ggml_backend_buffer_free(opt_ctx->buf_cpu);
  308. opt_ctx->buf_cpu = nullptr;
  309. }
  310. struct ggml_context * ctx_results = opt_ctx->static_graphs ? opt_ctx->ctx_static : opt_ctx->ctx_compute;
  311. switch (opt_ctx->loss_type) {
  312. case GGML_OPT_LOSS_TYPE_MEAN: {
  313. opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->outputs);
  314. ggml_set_name(opt_ctx->loss, "loss_sum");
  315. const float scale = 1.0f / (opt_ctx->opt_period * ggml_nelements(opt_ctx->outputs));
  316. opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, scale);
  317. ggml_set_name(opt_ctx->loss, "loss_mean");
  318. opt_ctx->loss_per_datapoint = true;
  319. break;
  320. }
  321. case GGML_OPT_LOSS_TYPE_SUM: {
  322. opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->outputs);
  323. ggml_set_name(opt_ctx->loss, "loss_sum");
  324. opt_ctx->loss_per_datapoint = false;
  325. break;
  326. }
  327. case GGML_OPT_LOSS_TYPE_CROSS_ENTROPY: {
  328. opt_ctx->labels = ggml_dup_tensor(ctx_results, opt_ctx->outputs);
  329. ggml_set_input(opt_ctx->labels);
  330. ggml_set_name(opt_ctx->labels, "labels");
  331. opt_ctx->loss = ggml_cross_entropy_loss(ctx_results, opt_ctx->outputs, opt_ctx->labels);
  332. ggml_set_name(opt_ctx->loss, "loss_cross_entropy");
  333. if (opt_ctx->opt_period > 1) {
  334. opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, 1.0f / opt_ctx->opt_period);
  335. ggml_set_name(opt_ctx->loss, "loss_cross_entropy_scaled");
  336. }
  337. opt_ctx->loss_per_datapoint = true;
  338. break;
  339. }
  340. case GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR: {
  341. opt_ctx->labels = ggml_dup_tensor(ctx_results, opt_ctx->outputs);
  342. ggml_set_input(opt_ctx->labels);
  343. ggml_set_name(opt_ctx->labels, "labels");
  344. opt_ctx->loss = ggml_sub(ctx_results, opt_ctx->outputs, opt_ctx->labels);
  345. ggml_set_name(opt_ctx->loss, "loss_error");
  346. opt_ctx->loss = ggml_sqr(ctx_results, opt_ctx->loss);
  347. ggml_set_name(opt_ctx->loss, "loss_squared_error");
  348. opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->loss);
  349. ggml_set_name(opt_ctx->loss, "loss_sum_squared_error");
  350. const float scale = 1.0f / (opt_ctx->opt_period * ggml_nelements(opt_ctx->outputs));
  351. opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, scale);
  352. ggml_set_name(opt_ctx->loss, "loss_mean_squared_error");
  353. opt_ctx->loss_per_datapoint = true;
  354. break;
  355. }
  356. }
  357. ggml_set_output(opt_ctx->loss);
  358. ggml_set_loss(opt_ctx->loss);
  359. ggml_build_forward_expand(opt_ctx->gf, opt_ctx->loss);
  360. if (opt_ctx->loss_type == GGML_OPT_LOSS_TYPE_CROSS_ENTROPY) {
  361. opt_ctx->pred = ggml_argmax(ctx_results, opt_ctx->outputs);
  362. ggml_set_name(opt_ctx->pred, "pred");
  363. ggml_set_output(opt_ctx->pred);
  364. ggml_build_forward_expand(opt_ctx->gf, opt_ctx->pred);
  365. opt_ctx->ncorrect = ggml_count_equal(ctx_results, opt_ctx->pred, ggml_argmax(ctx_results, opt_ctx->labels));
  366. ggml_set_name(opt_ctx->ncorrect, "ncorrect");
  367. ggml_set_output(opt_ctx->ncorrect);
  368. ggml_build_forward_expand(opt_ctx->gf, opt_ctx->ncorrect);
  369. }
  370. if (opt_ctx->buf_static) {
  371. if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_FORWARD) {
  372. return;
  373. }
  374. } else if (opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_FORWARD) {
  375. opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(
  376. opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0));
  377. return;
  378. }
  379. if (opt_ctx->grad_accs.empty()) {
  380. GGML_ASSERT(opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_GRAD);
  381. const int n_nodes = opt_ctx->gf->n_nodes;
  382. opt_ctx->grad_accs.resize(n_nodes);
  383. for (int i = 0; i < n_nodes; ++i) {
  384. ggml_tensor * node = opt_ctx->gf->nodes[i];
  385. if ((accumulate && (node->flags & GGML_TENSOR_FLAG_PARAM)) || (node->flags & GGML_TENSOR_FLAG_LOSS)) {
  386. opt_ctx->grad_accs[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
  387. } else {
  388. opt_ctx->grad_accs[i] = nullptr;
  389. }
  390. }
  391. if (need_momenta && opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_OPT) {
  392. opt_ctx->grad_m.resize(n_nodes);
  393. opt_ctx->grad_v.resize(n_nodes);
  394. for (int i = 0; i < n_nodes; ++i) {
  395. ggml_tensor * node = opt_ctx->gf->nodes[i];
  396. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  397. opt_ctx->grad_m[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
  398. opt_ctx->grad_v[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
  399. } else {
  400. opt_ctx->grad_m[i] = nullptr;
  401. opt_ctx->grad_v[i] = nullptr;
  402. }
  403. }
  404. }
  405. }
  406. // gb_grad == graph backward gradients, forward pass, then backward pass to calculate gradients.
  407. opt_ctx->gb_grad = ggml_graph_dup(opt_ctx->ctx_compute, opt_ctx->gf, /*force_grads =*/ true);
  408. ggml_build_backward_expand(opt_ctx->ctx_compute, opt_ctx->gb_grad, opt_ctx->grad_accs.data());
  409. if (opt_ctx->buf_static) {
  410. if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_GRAD) {
  411. return;
  412. }
  413. } else if (opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_GRAD) {
  414. opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0));
  415. ggml_graph_reset(opt_ctx->gb_grad);
  416. }
  417. GGML_ASSERT(opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT);
  418. // gb_opt == graph backward optimize, forward pass, then backward pass to calculate gradients, then optimizer step.
  419. opt_ctx->gb_opt = ggml_graph_dup(opt_ctx->ctx_compute, opt_ctx->gb_grad, /*force_grads =*/ true);
  420. opt_ctx->opt_step_params = ggml_new_tensor_1d(opt_ctx->ctx_cpu, GGML_TYPE_F32, need_momenta ? 7 : 2);
  421. ggml_tensor * adamw_params = opt_ctx->opt_step_params;
  422. ggml_set_input(adamw_params);
  423. const char * optimizer_name = ggml_opt_optimizer_name(opt_ctx->optimizer);
  424. ggml_format_name(adamw_params, "%s_params", optimizer_name);
  425. for (int i = opt_ctx->gf->n_nodes-1; i >= 0; --i) {
  426. struct ggml_tensor * node = opt_ctx->gb_opt->nodes[i];
  427. struct ggml_tensor * grad = ggml_graph_get_grad(opt_ctx->gb_opt, node);
  428. if (grad && (node->flags & GGML_TENSOR_FLAG_PARAM)) {
  429. struct ggml_tensor * m = nullptr;
  430. struct ggml_tensor * v = nullptr;
  431. if (need_momenta) {
  432. m = opt_ctx->grad_m[i];
  433. v = opt_ctx->grad_v[i];
  434. ggml_format_name(m, "AdamW m for %s", node->name);
  435. ggml_format_name(v, "AdamW v for %s", node->name);
  436. }
  437. struct ggml_tensor * opt_step;
  438. switch (optimizer) {
  439. case GGML_OPT_OPTIMIZER_TYPE_ADAMW:
  440. opt_step = ggml_opt_step_adamw(opt_ctx->ctx_compute, node, grad, m, v, adamw_params);
  441. break;
  442. case GGML_OPT_OPTIMIZER_TYPE_SGD:
  443. opt_step = ggml_opt_step_sgd(opt_ctx->ctx_compute, node, grad, adamw_params);
  444. break;
  445. default:
  446. GGML_ABORT("fatal error");
  447. }
  448. ggml_format_name(opt_step, "%s step for %s", optimizer_name, node->name);
  449. ggml_build_forward_expand(opt_ctx->gb_opt, opt_step);
  450. }
  451. }
  452. if (!opt_ctx->buf_static) {
  453. opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(
  454. opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0));
  455. ggml_graph_reset(opt_ctx->gb_opt);
  456. }
  457. opt_ctx->buf_cpu = ggml_backend_alloc_ctx_tensors_from_buft(opt_ctx->ctx_cpu, ggml_backend_cpu_buffer_type());
  458. }
  459. ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) {
  460. ggml_opt_context_t result = new struct ggml_opt_context;
  461. result->backend_sched = params.backend_sched;
  462. result->ctx_compute = params.ctx_compute;
  463. result->loss_type = params.loss_type;
  464. result->build_type = params.build_type;
  465. result->build_type_alloc = params.build_type;
  466. result->inputs = params.inputs;
  467. result->outputs = params.outputs;
  468. result->opt_period = params.opt_period;
  469. result->get_opt_pars = params.get_opt_pars;
  470. result->get_opt_pars_ud = params.get_opt_pars_ud;
  471. result->optimizer = params.optimizer;
  472. GGML_ASSERT(result->opt_period >= 1);
  473. result->static_graphs = result->ctx_compute;
  474. if (!result->static_graphs) {
  475. GGML_ASSERT(!result->inputs);
  476. GGML_ASSERT(!result->outputs);
  477. return result;
  478. }
  479. GGML_ASSERT(result->inputs);
  480. GGML_ASSERT(result->outputs);
  481. result->gf = ggml_new_graph_custom(result->ctx_compute, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true); // Forward pass.
  482. ggml_build_forward_expand(result->gf, result->outputs);
  483. ggml_opt_build(result);
  484. return result;
  485. }
  486. void ggml_opt_free(ggml_opt_context_t opt_ctx) {
  487. if (opt_ctx == nullptr) {
  488. return;
  489. }
  490. ggml_backend_buffer_free(opt_ctx->buf_static);
  491. ggml_backend_buffer_free(opt_ctx->buf_cpu);
  492. ggml_free(opt_ctx->ctx_static);
  493. ggml_free(opt_ctx->ctx_cpu);
  494. delete opt_ctx;
  495. }
  496. void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer) {
  497. if (optimizer) {
  498. ggml_graph_reset(opt_ctx->gb_opt);
  499. opt_ctx->iter = 1;
  500. } else {
  501. ggml_graph_reset(opt_ctx->gb_grad);
  502. }
  503. }
  504. bool ggml_opt_static_graphs(ggml_opt_context_t opt_ctx) {
  505. return opt_ctx->static_graphs;
  506. }
  507. struct ggml_tensor * ggml_opt_inputs(ggml_opt_context_t opt_ctx) {
  508. return opt_ctx->inputs;
  509. }
  510. struct ggml_tensor * ggml_opt_outputs(ggml_opt_context_t opt_ctx) {
  511. return opt_ctx->outputs;
  512. }
  513. struct ggml_tensor * ggml_opt_labels(ggml_opt_context_t opt_ctx) {
  514. return opt_ctx->labels;
  515. }
  516. struct ggml_tensor * ggml_opt_loss(ggml_opt_context_t opt_ctx) {
  517. return opt_ctx->loss;
  518. }
  519. struct ggml_tensor * ggml_opt_pred(ggml_opt_context_t opt_ctx) {
  520. return opt_ctx->pred;
  521. }
  522. struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx) {
  523. return opt_ctx->ncorrect;
  524. }
  525. struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node) {
  526. return ggml_graph_get_grad_acc(opt_ctx->gb_opt, node);
  527. }
  528. // ====== Optimization Result ======
  529. ggml_opt_result_t ggml_opt_result_init() {
  530. return new ggml_opt_result;
  531. }
  532. void ggml_opt_result_free(ggml_opt_result_t result) {
  533. delete result;
  534. }
  535. void ggml_opt_result_reset(ggml_opt_result_t result) {
  536. result->ndata = 0;
  537. result->loss.clear();
  538. result->pred.clear();
  539. result->ncorrect = 0;
  540. }
  541. void ggml_opt_result_ndata(ggml_opt_result_t result, int64_t * ndata) {
  542. *ndata = result->ndata;
  543. }
  544. void ggml_opt_result_loss(ggml_opt_result_t result, double * loss, double * unc) {
  545. const int64_t nbatches = result->loss.size(); // Number of physical batches.
  546. if (nbatches == 0) {
  547. *loss = 0.0;
  548. *unc = NAN;
  549. return;
  550. }
  551. double sum = 0.0;
  552. double sum_squared = 0.0;
  553. for (const float & loss : result->loss) {
  554. // If the loss is per datapoint it was scaled by 1.0f/opt_period for each physical batch.
  555. const float loss_scaled = result->loss_per_datapoint ? loss*result->opt_period : loss;
  556. sum += loss_scaled;
  557. sum_squared += loss_scaled*loss_scaled;
  558. }
  559. const double mean = sum/nbatches;
  560. *loss = result->loss_per_datapoint ? mean : sum;
  561. if (!unc) {
  562. return;
  563. }
  564. if (nbatches < 2) {
  565. *unc = NAN;
  566. return;
  567. }
  568. const double var_sum = sum_squared/nbatches - mean*mean; // variance without Bessel's correction, i.e. nbatches/(nbatches-1)
  569. *unc = result->loss_per_datapoint ? sqrt(var_sum / (nbatches - 1)) : sqrt(var_sum * nbatches/(nbatches - 1));
  570. }
  571. void ggml_opt_result_pred(ggml_opt_result_t result, int32_t * pred) {
  572. for (size_t i = 0; i < result->pred.size(); ++i) {
  573. pred[i] = result->pred[i];
  574. }
  575. }
  576. void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc) {
  577. *accuracy = result->ncorrect >= 0 ? double(result->ncorrect) / double(result->ndata) : NAN;
  578. if (!unc) {
  579. return;
  580. }
  581. *unc = result->ncorrect >= 0 && result->ndata >= 2 ?
  582. sqrt((*accuracy) * (1.0 - (*accuracy)) / double(result->ndata - 1)) : NAN;
  583. }
  584. // ====== Computation ======
  585. void ggml_opt_prepare_alloc(
  586. ggml_opt_context_t opt_ctx,
  587. struct ggml_context * ctx_compute,
  588. struct ggml_cgraph * gf,
  589. struct ggml_tensor * inputs,
  590. struct ggml_tensor * outputs) {
  591. GGML_ASSERT(!opt_ctx->static_graphs);
  592. opt_ctx->ctx_compute = ctx_compute;
  593. opt_ctx->gf = gf;
  594. opt_ctx->inputs = inputs;
  595. opt_ctx->outputs = outputs;
  596. }
  597. void ggml_opt_alloc(ggml_opt_context_t opt_ctx, bool backward) {
  598. GGML_ASSERT(!opt_ctx->eval_ready);
  599. if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_OPT && opt_ctx->opt_period > 1 && opt_ctx->opt_i == 0) {
  600. ggml_graph_reset(opt_ctx->gb_grad);
  601. }
  602. if (backward) {
  603. const int32_t opt_i_next = (opt_ctx->opt_i + 1) % opt_ctx->opt_period;
  604. opt_ctx->build_type = opt_i_next == 0 ? GGML_OPT_BUILD_TYPE_OPT : GGML_OPT_BUILD_TYPE_GRAD;
  605. } else {
  606. opt_ctx->build_type = GGML_OPT_BUILD_TYPE_FORWARD;
  607. }
  608. if (!opt_ctx->static_graphs) {
  609. ggml_opt_build(opt_ctx);
  610. }
  611. struct ggml_cgraph * graph = nullptr;
  612. switch (opt_ctx->build_type) {
  613. case GGML_OPT_BUILD_TYPE_FORWARD: {
  614. graph = opt_ctx->gf;
  615. } break;
  616. case GGML_OPT_BUILD_TYPE_GRAD: {
  617. graph = opt_ctx->gb_grad;
  618. } break;
  619. case GGML_OPT_BUILD_TYPE_OPT: {
  620. graph = opt_ctx->gb_opt;
  621. } break;
  622. }
  623. GGML_ASSERT(graph);
  624. if (opt_ctx->allocated_graph == graph) {
  625. opt_ctx->eval_ready = true;
  626. return;
  627. }
  628. ggml_backend_sched_reset(opt_ctx->backend_sched); // clear allocation of previous graph
  629. if (opt_ctx->static_graphs) {
  630. ggml_init_params params = {
  631. /*.mem_size =*/ graph->size*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph->size, graph->grads),
  632. /*.mem_buffer =*/ nullptr,
  633. /*.no_alloc =*/ true,
  634. };
  635. ggml_free(opt_ctx->ctx_copy);
  636. opt_ctx->ctx_copy = ggml_init(params);
  637. opt_ctx->allocated_graph_copy = dup_graph(opt_ctx->ctx_copy, graph);
  638. } else {
  639. opt_ctx->allocated_graph_copy = graph;
  640. }
  641. ggml_backend_sched_alloc_graph(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
  642. opt_ctx->allocated_graph = graph;
  643. opt_ctx->eval_ready = true;
  644. }
  645. void ggml_opt_eval(ggml_opt_context_t opt_ctx, ggml_opt_result_t result) {
  646. GGML_ASSERT(opt_ctx->eval_ready);
  647. if (opt_ctx->allocated_graph == opt_ctx->gb_opt) {
  648. const ggml_opt_optimizer_params & opt_pars = opt_ctx->get_opt_pars(opt_ctx->get_opt_pars_ud);
  649. switch (opt_ctx->optimizer) {
  650. case GGML_OPT_OPTIMIZER_TYPE_ADAMW: {
  651. GGML_ASSERT(opt_pars.adamw.alpha > 0.0f);
  652. GGML_ASSERT(opt_pars.adamw.beta1 >= 0.0f);
  653. GGML_ASSERT(opt_pars.adamw.beta1 <= 1.0f);
  654. GGML_ASSERT(opt_pars.adamw.beta2 >= 0.0f);
  655. GGML_ASSERT(opt_pars.adamw.beta2 <= 1.0f);
  656. GGML_ASSERT(opt_pars.adamw.eps >= 0.0f);
  657. GGML_ASSERT(opt_pars.adamw.wd >= 0.0f);
  658. GGML_ASSERT(opt_pars.adamw.wd <= 1.0f);
  659. // beta1, beta2 after applying warmup
  660. const float beta1h = 1.0f / (1.0f - powf(opt_pars.adamw.beta1, opt_ctx->iter));
  661. const float beta2h = 1.0f / (1.0f - powf(opt_pars.adamw.beta2, opt_ctx->iter));
  662. float * adamw_par_data = ggml_get_data_f32(opt_ctx->opt_step_params);
  663. adamw_par_data[0] = opt_pars.adamw.alpha;
  664. adamw_par_data[1] = opt_pars.adamw.beta1;
  665. adamw_par_data[2] = opt_pars.adamw.beta2;
  666. adamw_par_data[3] = opt_pars.adamw.eps;
  667. adamw_par_data[4] = opt_pars.adamw.wd;
  668. adamw_par_data[5] = beta1h;
  669. adamw_par_data[6] = beta2h;
  670. } break;
  671. case GGML_OPT_OPTIMIZER_TYPE_SGD: {
  672. GGML_ASSERT(opt_pars.sgd.alpha > 0.0f);
  673. GGML_ASSERT(opt_pars.sgd.wd >= 0.0f);
  674. GGML_ASSERT(opt_pars.sgd.wd <= 1.0f);
  675. float * sgd = ggml_get_data_f32(opt_ctx->opt_step_params);
  676. sgd[0] = opt_pars.sgd.alpha;
  677. sgd[1] = opt_pars.sgd.wd;
  678. } break;
  679. default:
  680. GGML_ABORT("fatal error");
  681. }
  682. }
  683. ggml_backend_sched_graph_compute(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
  684. opt_ctx->iter += opt_ctx->allocated_graph == opt_ctx->gb_opt;
  685. opt_ctx->opt_i = (opt_ctx->opt_i + 1) % opt_ctx->opt_period;
  686. if (!opt_ctx->static_graphs) {
  687. opt_ctx->gf = nullptr;
  688. opt_ctx->gb_grad = nullptr;
  689. opt_ctx->gb_opt = nullptr;
  690. opt_ctx->allocated_graph = nullptr;
  691. opt_ctx->allocated_graph_copy = nullptr;
  692. }
  693. opt_ctx->eval_ready = false;
  694. if (!result) {
  695. return;
  696. }
  697. if (result->ndata == 0) {
  698. result->loss_per_datapoint = opt_ctx->loss_per_datapoint;
  699. result->opt_period = opt_ctx->opt_period;
  700. } else {
  701. GGML_ASSERT(result->loss_per_datapoint == opt_ctx->loss_per_datapoint);
  702. GGML_ASSERT(result->opt_period == opt_ctx->opt_period);
  703. }
  704. const int64_t ndata = opt_ctx->outputs->ne[1];
  705. GGML_ASSERT(result->ndata == ndata*int64_t(result->loss.size()) && "varying batch size not supported");
  706. result->ndata += ndata;
  707. GGML_ASSERT(ggml_is_scalar(opt_ctx->loss));
  708. GGML_ASSERT(opt_ctx->loss->type == GGML_TYPE_F32);
  709. float loss;
  710. ggml_backend_tensor_get(opt_ctx->loss, &loss, 0, ggml_nbytes(opt_ctx->loss));
  711. result->loss.push_back(loss);
  712. if (opt_ctx->pred) {
  713. GGML_ASSERT(opt_ctx->pred->type == GGML_TYPE_I32);
  714. std::vector<int32_t> pred(ndata);
  715. ggml_backend_tensor_get(opt_ctx->pred, pred.data(), 0, ggml_nbytes(opt_ctx->pred));
  716. result->pred.insert(result->pred.end(), pred.begin(), pred.end());
  717. }
  718. if (!opt_ctx->ncorrect || result->ncorrect < 0) {
  719. result->ncorrect = -1;
  720. return;
  721. }
  722. GGML_ASSERT(ggml_is_scalar(opt_ctx->ncorrect));
  723. GGML_ASSERT(opt_ctx->ncorrect->type == GGML_TYPE_I64);
  724. int64_t ncorrect;
  725. ggml_backend_tensor_get(opt_ctx->ncorrect, &ncorrect, 0, ggml_nbytes(opt_ctx->ncorrect));
  726. result->ncorrect += ncorrect;
  727. }
  728. // ====== High-Level Functions ======
  729. void ggml_opt_epoch(
  730. ggml_opt_context_t opt_ctx,
  731. ggml_opt_dataset_t dataset,
  732. ggml_opt_result_t result_train,
  733. ggml_opt_result_t result_eval,
  734. int64_t idata_split,
  735. ggml_opt_epoch_callback callback_train,
  736. ggml_opt_epoch_callback callback_eval) {
  737. GGML_ASSERT(ggml_opt_static_graphs(opt_ctx) && "ggml_opt_epoch requires static graphs");
  738. struct ggml_tensor * inputs = ggml_opt_inputs(opt_ctx);
  739. struct ggml_tensor * labels = ggml_opt_labels(opt_ctx);
  740. struct ggml_tensor * data = ggml_opt_dataset_data(dataset);
  741. GGML_ASSERT(data->ne[0] == inputs->ne[0]);
  742. const int64_t ndata = data->ne[1];
  743. const int64_t ndata_batch = inputs->ne[1];
  744. GGML_ASSERT(data->ne[1] % inputs->ne[1] == 0);
  745. const int64_t nbatches = ndata/ndata_batch;
  746. idata_split = idata_split < 0 ? ndata : idata_split;
  747. GGML_ASSERT(idata_split % ndata_batch == 0);
  748. const int64_t ibatch_split = idata_split / ndata_batch;
  749. int64_t ibatch = 0;
  750. int64_t t_loop_start = ggml_time_us();
  751. for (; ibatch < ibatch_split; ++ibatch) {
  752. ggml_opt_alloc(opt_ctx, /*backward =*/ true);
  753. ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
  754. ggml_opt_eval(opt_ctx, result_train);
  755. if (callback_train) {
  756. callback_train(true, opt_ctx, dataset, result_train, ibatch+1, ibatch_split, t_loop_start);
  757. }
  758. }
  759. t_loop_start = ggml_time_us();
  760. for (; ibatch < nbatches; ++ibatch) {
  761. ggml_opt_alloc(opt_ctx, /*backward =*/ false);
  762. ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
  763. ggml_opt_eval(opt_ctx, result_eval);
  764. if (callback_eval) {
  765. callback_eval(false, opt_ctx, dataset, result_eval, ibatch+1-ibatch_split, nbatches-ibatch_split, t_loop_start);
  766. }
  767. }
  768. }
  769. void ggml_opt_epoch_callback_progress_bar(
  770. bool train,
  771. ggml_opt_context_t opt_ctx,
  772. ggml_opt_dataset_t dataset,
  773. ggml_opt_result_t result,
  774. int64_t ibatch,
  775. int64_t ibatch_max,
  776. int64_t t_start_us) {
  777. fprintf(stderr, "%s[", train ? "train: " : "val: ");
  778. // The progress bar consists of partially filled blocks, unicode has 8 separate fill levels.
  779. constexpr int64_t bar_length = 8;
  780. const int64_t ibatch8 = 8 * ibatch;
  781. for (int64_t j = 0; j < bar_length; ++j) {
  782. if (ibatch_max * (8*j + 8) / bar_length < ibatch8) {
  783. fprintf(stderr, "\u2588"); // full block
  784. } else if (ibatch_max * (8*j + 7) / bar_length < ibatch8) {
  785. fprintf(stderr, "\u2589"); // 7/8 filled
  786. } else if (ibatch_max * (8*j + 6) / bar_length < ibatch8) {
  787. fprintf(stderr, "\u258A"); // 6/8 filled
  788. } else if (ibatch_max * (8*j + 5) / bar_length < ibatch8) {
  789. fprintf(stderr, "\u258B"); // 5/8 filled
  790. } else if (ibatch_max * (8*j + 4) / bar_length < ibatch8) {
  791. fprintf(stderr, "\u258C"); // 4/8 filled
  792. } else if (ibatch_max * (8*j + 3) / bar_length < ibatch8) {
  793. fprintf(stderr, "\u258D"); // 3/8 filled
  794. } else if (ibatch_max * (8*j + 2) / bar_length < ibatch8) {
  795. fprintf(stderr, "\u258E"); // 2/8 filled
  796. } else if (ibatch_max * (8*j + 1) / bar_length < ibatch8) {
  797. fprintf(stderr, "\u258F"); // 1/8 filled
  798. } else {
  799. fprintf(stderr, " ");
  800. }
  801. }
  802. const int64_t batch_size = ggml_opt_inputs(opt_ctx)->ne[1];
  803. const int64_t idata = ibatch*batch_size;
  804. const int64_t idata_max = ibatch_max*batch_size;
  805. double loss;
  806. double loss_unc;
  807. ggml_opt_result_loss(result, &loss, &loss_unc);
  808. double accuracy;
  809. double accuracy_unc;
  810. ggml_opt_result_accuracy(result, &accuracy, &accuracy_unc);
  811. const int64_t t_ibatch_us = ggml_time_us() - t_start_us;
  812. int64_t t_ibatch_s = t_ibatch_us / 1000000;
  813. const int64_t t_ibatch_h = t_ibatch_s / 3600;
  814. t_ibatch_s -= t_ibatch_h * 3600;
  815. const int64_t t_ibatch_m = t_ibatch_s / 60;
  816. t_ibatch_s -= t_ibatch_m * 60;
  817. const int64_t t_eta_us = t_ibatch_us * (ibatch_max - ibatch)/ibatch;
  818. int64_t t_eta_s = t_eta_us / 1000000;
  819. const int64_t t_eta_h = t_eta_s / 3600;
  820. t_eta_s -= t_eta_h * 3600;
  821. const int64_t t_eta_m = t_eta_s / 60;
  822. t_eta_s -= t_eta_m * 60;
  823. fprintf(stderr, "] data=%07" PRId64 "/%07" PRId64 " loss=%.5lf±%.5lf acc=%.2lf±%.2lf%% "
  824. "t=%02" PRId64 ":%02" PRId64 ":%02" PRId64 " ETA=%02" PRId64 ":%02" PRId64 ":%02" PRId64 " \r",
  825. idata, idata_max, loss, loss_unc, 100.0*accuracy, 100.0*accuracy_unc,
  826. t_ibatch_h, t_ibatch_m, t_ibatch_s, t_eta_h, t_eta_m, t_eta_s);
  827. if (ibatch == ibatch_max) {
  828. fprintf(stderr, "\n");
  829. }
  830. fflush(stderr);
  831. GGML_UNUSED(dataset);
  832. }
  833. void ggml_opt_fit(
  834. ggml_backend_sched_t backend_sched,
  835. ggml_context * ctx_compute,
  836. ggml_tensor * inputs,
  837. ggml_tensor * outputs,
  838. ggml_opt_dataset_t dataset,
  839. enum ggml_opt_loss_type loss_type,
  840. enum ggml_opt_optimizer_type optimizer,
  841. ggml_opt_get_optimizer_params get_opt_pars,
  842. int64_t nepoch,
  843. int64_t nbatch_logical,
  844. float val_split,
  845. bool silent) {
  846. ggml_time_init();
  847. const int64_t t_start_us = ggml_time_us();
  848. const int64_t ndata = ggml_opt_dataset_data(dataset)->ne[1];
  849. const int64_t nbatch_physical = inputs->ne[1];
  850. GGML_ASSERT(ndata % nbatch_logical == 0);
  851. GGML_ASSERT(nbatch_logical % nbatch_physical == 0);
  852. const int64_t opt_period = nbatch_logical / nbatch_physical;
  853. const int64_t nbatches_logical = ndata / nbatch_logical;
  854. GGML_ASSERT(val_split >= 0.0f);
  855. GGML_ASSERT(val_split < 1.0f);
  856. const int64_t ibatch_split = int64_t(((1.0f - val_split) * nbatches_logical)) * opt_period; // train <-> val split index (physical)
  857. const int64_t idata_split = ibatch_split * nbatch_physical;
  858. int64_t epoch = 1;
  859. ggml_opt_params params = ggml_opt_default_params(backend_sched, loss_type);
  860. params.ctx_compute = ctx_compute;
  861. params.inputs = inputs;
  862. params.outputs = outputs;
  863. params.opt_period = opt_period;
  864. params.get_opt_pars = get_opt_pars;
  865. params.get_opt_pars_ud = &epoch;
  866. params.optimizer = optimizer;
  867. ggml_opt_context_t opt_ctx = ggml_opt_init(params);
  868. // Shuffling the data is generally useful but there is only a point if not all data is used in a single batch.
  869. if (nbatch_logical < ndata) {
  870. ggml_opt_dataset_shuffle(opt_ctx, dataset, -1); // Shuffle all data (train + validation).
  871. }
  872. ggml_opt_result_t result_train = ggml_opt_result_init();
  873. ggml_opt_result_t result_val = ggml_opt_result_init();
  874. ggml_opt_epoch_callback epoch_callback = silent ? nullptr : ggml_opt_epoch_callback_progress_bar;
  875. for (; epoch <= nepoch; ++epoch) {
  876. if (nbatch_logical < idata_split) {
  877. ggml_opt_dataset_shuffle(opt_ctx, dataset, idata_split);
  878. }
  879. ggml_opt_result_reset(result_train);
  880. ggml_opt_result_reset(result_val);
  881. if (!silent) {
  882. fprintf(stderr, "%s: epoch %04" PRId64 "/%04" PRId64 ":\n", __func__, epoch, nepoch);
  883. }
  884. ggml_opt_epoch(opt_ctx, dataset, result_train, result_val, idata_split, epoch_callback, epoch_callback);
  885. if (!silent) {
  886. fprintf(stderr, "\n");
  887. }
  888. }
  889. if (!silent) {
  890. int64_t t_total_s = (ggml_time_us() - t_start_us) / 1000000;
  891. const int64_t t_total_h = t_total_s / 3600;
  892. t_total_s -= t_total_h * 3600;
  893. const int64_t t_total_m = t_total_s / 60;
  894. t_total_s -= t_total_m * 60;
  895. fprintf(stderr, "%s: training took %02" PRId64 ":%02" PRId64 ":%02" PRId64 "\n", __func__, t_total_h, t_total_m, t_total_s);
  896. }
  897. ggml_opt_free(opt_ctx);
  898. ggml_opt_result_free(result_train);
  899. ggml_opt_result_free(result_val);
  900. }
  901. enum ggml_opt_optimizer_type ggml_opt_context_optimizer_type(ggml_opt_context_t c) {
  902. return c->optimizer;
  903. }
  904. GGML_API const char * ggml_opt_optimizer_name(enum ggml_opt_optimizer_type o) {
  905. switch (o) {
  906. case GGML_OPT_OPTIMIZER_TYPE_ADAMW:
  907. return "adamw";
  908. case GGML_OPT_OPTIMIZER_TYPE_SGD:
  909. return "sgd";
  910. default:
  911. return "undefined";
  912. };
  913. }