| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596 |
- #include "arg.h"
- #include "common.h"
- #include "log.h"
- #include "llama.h"
- #include <cmath>
- #include <cstdio>
- #include <cstring>
- #include <ctime>
- #include <vector>
- #if defined(_MSC_VER)
- #pragma warning(disable: 4244 4267) // possible loss of data
- #endif
- int main(int argc, char ** argv) {
- common_params params;
- params.escape = false;
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_FINETUNE)) {
- return 1;
- }
- if (params.use_mmap) {
- LOG_INF("%s: force disabling memory mapping because it would result in-read-only pointers to the weights\n",
- __func__);
- params.use_mmap = false;
- }
- if (params.cache_type_k != GGML_TYPE_F32) {
- LOG_INF("%s: force changing k cache type to f32 due to a lack of f16 support for OUT_PROD\n", __func__);
- params.cache_type_k = GGML_TYPE_F32;
- }
- if (params.cache_type_v != GGML_TYPE_F32) {
- LOG_INF("%s: force changing v cache type to f32 due to a lack of f16 support for OUT_PROD\n", __func__);
- params.cache_type_v = GGML_TYPE_F32;
- }
- common_init();
- llama_backend_init();
- llama_numa_init(params.numa);
- // load the model and apply lora adapter, if any
- common_init_result llama_init = common_init_from_params(params);
- llama_model_ptr & model = llama_init.model;
- llama_context_ptr & ctx = llama_init.context;
- if (model == NULL) {
- LOG_ERR("%s: unable to load model\n", __func__);
- return 1;
- }
- // print system information
- {
- LOG_INF("\n");
- LOG_INF("%s\n", common_params_get_system_info(params).c_str());
- }
- std::vector<llama_token> tokens = common_tokenize(ctx.get(), params.prompt, true);
- ggml_opt_dataset_t dataset = common_opt_dataset_init(ctx.get(), tokens, llama_n_ctx(ctx.get()) / 2);
- struct lr_opt & lr = params.lr;
- LOG_INF("-optimizer %s -lr0 %.2g -wd %.2g -lr-min %.2g -min-epochs %.2g -epochs %d -period %.2g -val %.2g\n",
- ggml_opt_optimizer_name(params.optimizer), (double) lr.lr0, (double) lr.wd, (double) lr.lr_min, (double) lr.decay_epochs,
- (unsigned) lr.epochs, (double) params.n_batch / params.n_ubatch, (double) params.val_split);
- struct llama_opt_params lopt_params{
- /*n_ctx_train =*/0,
- /*param_filter =*/llama_opt_param_filter_all,
- /*param_filter_ud =*/nullptr,
- /*get_opt_pars =*/common_opt_lr_pars,
- /*get_opt_pars_ud =*/¶ms.lr,
- /*optimizer_type =*/params.optimizer,
- };
- llama_opt_init(ctx.get(), model.get(), lopt_params);
- const int64_t idata_split = ggml_opt_dataset_ndata(dataset) * (1.0f - params.val_split);
- ggml_opt_result_t result_train = ggml_opt_result_init();
- ggml_opt_result_t result_eval = ggml_opt_result_init();
- for (lr.epoch = 0; lr.epoch < lr.epochs; ++lr.epoch) {
- llama_opt_epoch(ctx.get(), dataset, result_train, result_eval, idata_split,
- ggml_opt_epoch_callback_progress_bar, ggml_opt_epoch_callback_progress_bar);
- fprintf(stderr, "\n");
- ggml_opt_result_reset(result_train);
- ggml_opt_result_reset(result_eval);
- }
- ggml_opt_result_free(result_train);
- ggml_opt_result_free(result_eval);
- llama_model_save_to_file(model.get(), params.out_file.c_str());
- llama_backend_free();
- return 0;
- }
|