imatrix.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <cmath>
  4. #include <cstdio>
  5. #include <cstring>
  6. #include <ctime>
  7. #include <sstream>
  8. #include <thread>
  9. #include <mutex>
  10. #include <vector>
  11. #include <fstream>
  12. #include <unordered_map>
  13. #include <algorithm>
  14. #if defined(_MSC_VER)
  15. #pragma warning(disable: 4244 4267) // possible loss of data
  16. #endif
  17. struct Stats {
  18. std::vector<float> values;
  19. int ncall = 0;
  20. };
  21. struct StatParams {
  22. std::string ofile = "imatrix.dat";
  23. int n_output_frequency = 10;
  24. int verbosity = 1;
  25. int keep_every = 0;
  26. bool collect_output_weight = false;
  27. };
  28. class IMatrixCollector {
  29. public:
  30. IMatrixCollector() = default;
  31. void set_parameters(StatParams&& params) { m_params = std::move(params); }
  32. bool collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data);
  33. void save_imatrix() const;
  34. bool load_imatrix(const char * file_name, bool add);
  35. static bool load_imatrix(const char * file_name, std::unordered_map<std::string, Stats>& imatrix);
  36. private:
  37. std::unordered_map<std::string, Stats> m_stats;
  38. StatParams m_params;
  39. std::mutex m_mutex;
  40. int m_last_call = 0;
  41. std::vector<float> m_src1_data;
  42. std::vector<int> m_ids; // the expert ids from ggml_mul_mat_id
  43. //
  44. void save_imatrix(const char * file_name) const;
  45. void keep_imatrix(int ncall) const;
  46. };
  47. // remove any prefix and suffixes from the name
  48. // CUDA0#blk.0.attn_k.weight#0 => blk.0.attn_k.weight
  49. static std::string filter_tensor_name(const char * name) {
  50. std::string wname;
  51. const char * p = strchr(name, '#');
  52. if (p != NULL) {
  53. p = p + 1;
  54. const char * q = strchr(p, '#');
  55. if (q != NULL) {
  56. wname = std::string(p, q - p);
  57. } else {
  58. wname = p;
  59. }
  60. } else {
  61. wname = name;
  62. }
  63. return wname;
  64. }
  65. bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) {
  66. GGML_UNUSED(user_data);
  67. const struct ggml_tensor * src0 = t->src[0];
  68. const struct ggml_tensor * src1 = t->src[1];
  69. std::string wname = filter_tensor_name(src0->name);
  70. // when ask is true, the scheduler wants to know if we are interested in data from this tensor
  71. // if we return true, a follow-up call will be made with ask=false in which we can do the actual collection
  72. if (ask) {
  73. if (t->op == GGML_OP_MUL_MAT_ID) return true; // collect all indirect matrix multiplications
  74. if (t->op != GGML_OP_MUL_MAT) return false;
  75. if (src1->ne[1] < 16 || src1->type != GGML_TYPE_F32) return false;
  76. if (!(wname.substr(0, 4) == "blk." || (m_params.collect_output_weight && wname == "output.weight"))) return false;
  77. return true;
  78. }
  79. std::lock_guard<std::mutex> lock(m_mutex);
  80. // copy the data from the GPU memory if needed
  81. const bool is_host = ggml_backend_buffer_is_host(src1->buffer);
  82. if (!is_host) {
  83. m_src1_data.resize(ggml_nelements(src1));
  84. ggml_backend_tensor_get(src1, m_src1_data.data(), 0, ggml_nbytes(src1));
  85. }
  86. const float * data = is_host ? (const float *) src1->data : m_src1_data.data();
  87. if (t->op == GGML_OP_MUL_MAT_ID) {
  88. const int idx = ((int32_t *) t->op_params)[0];
  89. const int n_as = ((int32_t *) t->op_params)[1];
  90. // the top-k selected expert ids are stored in the src0 tensor
  91. // for simplicity, always copy src0 to host, because it is small
  92. // take into account that src0 is not contiguous!
  93. GGML_ASSERT(src0->ne[1] == src1->ne[1]);
  94. GGML_ASSERT(n_as*ggml_nrows(src0)*sizeof(int) == GGML_PAD(ggml_nbytes(src0), n_as*sizeof(int)));
  95. m_ids.resize(ggml_nbytes(src0)/sizeof(int));
  96. ggml_backend_tensor_get(src0, m_ids.data(), 0, ggml_nbytes(src0));
  97. // loop over all possible experts, regardless if they are used or not in the batch
  98. // this is necessary to guarantee equal number of "ncall" for each tensor
  99. for (int ex = 0; ex < n_as; ++ex) {
  100. src0 = t->src[2 + ex];
  101. wname = filter_tensor_name(src0->name);
  102. auto& e = m_stats[wname];
  103. if (e.values.empty()) {
  104. e.values.resize(src1->ne[0], 0);
  105. }
  106. else if (e.values.size() != (size_t)src1->ne[0]) {
  107. fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]);
  108. exit(1); //GGML_ASSERT(false);
  109. }
  110. // NOTE: since we select top-k experts, the number of calls for the expert tensors will be k times larger
  111. // using the following line, we can correct for that if needed
  112. //if (idx == t->src[0]->ne[0] - 1) ++e.ncall;
  113. ++e.ncall;
  114. if (m_params.verbosity > 1) {
  115. printf("%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, wname.c_str(), ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type);
  116. }
  117. for (int row = 0; row < (int)src1->ne[1]; ++row) {
  118. const int excur = m_ids[row*n_as + idx];
  119. GGML_ASSERT(excur >= 0 && excur < n_as); // sanity check
  120. if (excur != ex) continue;
  121. const float * x = data + row * src1->ne[0];
  122. for (int j = 0; j < (int)src1->ne[0]; ++j) {
  123. e.values[j] += x[j]*x[j];
  124. }
  125. }
  126. if (e.ncall > m_last_call) {
  127. m_last_call = e.ncall;
  128. if (m_last_call % m_params.n_output_frequency == 0) {
  129. save_imatrix();
  130. }
  131. if (m_params.keep_every > 0 && m_last_call%m_params.keep_every == 0) {
  132. keep_imatrix(m_last_call);
  133. }
  134. }
  135. }
  136. } else {
  137. auto& e = m_stats[wname];
  138. if (e.values.empty()) {
  139. e.values.resize(src1->ne[0], 0);
  140. }
  141. else if (e.values.size() != (size_t)src1->ne[0]) {
  142. fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]);
  143. exit(1); //GGML_ASSERT(false);
  144. }
  145. ++e.ncall;
  146. if (m_params.verbosity > 1) {
  147. printf("%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, wname.c_str(), ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type);
  148. }
  149. for (int row = 0; row < (int)src1->ne[1]; ++row) {
  150. const float * x = data + row * src1->ne[0];
  151. for (int j = 0; j < (int)src1->ne[0]; ++j) {
  152. e.values[j] += x[j]*x[j];
  153. }
  154. }
  155. if (e.ncall > m_last_call) {
  156. m_last_call = e.ncall;
  157. if (m_last_call % m_params.n_output_frequency == 0) {
  158. save_imatrix();
  159. }
  160. if (m_params.keep_every > 0 && m_last_call%m_params.keep_every == 0) {
  161. keep_imatrix(m_last_call);
  162. }
  163. }
  164. }
  165. return true;
  166. }
  167. void IMatrixCollector::save_imatrix() const {
  168. save_imatrix(m_params.ofile.empty() ? "imatrix.dat" : m_params.ofile.c_str());
  169. }
  170. void IMatrixCollector::keep_imatrix(int ncall) const {
  171. auto file_name = m_params.ofile;
  172. if (file_name.empty()) file_name = "imatrix.dat";
  173. file_name += ".at_";
  174. file_name += std::to_string(ncall);
  175. save_imatrix(file_name.c_str());
  176. }
  177. void IMatrixCollector::save_imatrix(const char * fname) const {
  178. std::ofstream out(fname, std::ios::binary);
  179. int n_entries = m_stats.size();
  180. out.write((const char*)&n_entries, sizeof(n_entries));
  181. for (auto& p : m_stats) {
  182. int len = p.first.size();
  183. out.write((const char*)&len, sizeof(len));
  184. out.write(p.first.c_str(), len);
  185. out.write((const char*)&p.second.ncall, sizeof(p.second.ncall));
  186. int nval = p.second.values.size();
  187. out.write((const char*)&nval, sizeof(nval));
  188. if (nval > 0) out.write((const char*)p.second.values.data(), nval*sizeof(float));
  189. }
  190. if (m_params.verbosity > 0) {
  191. fprintf(stderr, "\n%s: stored collected data after %d chunks in %s\n",__func__,m_last_call,fname);
  192. }
  193. }
  194. bool IMatrixCollector::load_imatrix(const char * imatrix_file, std::unordered_map<std::string, Stats>& imatrix_data) {
  195. std::ifstream in(imatrix_file, std::ios::binary);
  196. if (!in) {
  197. printf("%s: failed to open %s\n",__func__,imatrix_file);
  198. return false;
  199. }
  200. int n_entries;
  201. in.read((char*)&n_entries, sizeof(n_entries));
  202. if (in.fail() || n_entries < 1) {
  203. printf("%s: no data in file %s\n", __func__, imatrix_file);
  204. return false;
  205. }
  206. for (int i = 0; i < n_entries; ++i) {
  207. int len; in.read((char *)&len, sizeof(len));
  208. std::vector<char> name_as_vec(len+1);
  209. in.read((char *)name_as_vec.data(), len);
  210. if (in.fail()) {
  211. printf("%s: failed reading name for entry %d from %s\n",__func__,i+1,imatrix_file);
  212. return false;
  213. }
  214. name_as_vec[len] = 0;
  215. std::string name{name_as_vec.data()};
  216. auto& e = imatrix_data[std::move(name)];
  217. int ncall;
  218. in.read((char*)&ncall, sizeof(ncall));
  219. int nval;
  220. in.read((char *)&nval, sizeof(nval));
  221. if (in.fail() || nval < 1) {
  222. printf("%s: failed reading number of values for entry %d\n",__func__,i);
  223. imatrix_data = {};
  224. return false;
  225. }
  226. e.values.resize(nval);
  227. in.read((char*)e.values.data(), nval*sizeof(float));
  228. if (in.fail()) {
  229. printf("%s: failed reading data for entry %d\n",__func__,i);
  230. imatrix_data = {};
  231. return false;
  232. }
  233. e.ncall = ncall;
  234. }
  235. return true;
  236. }
  237. bool IMatrixCollector::load_imatrix(const char * file_name, bool add) {
  238. if (!add) {
  239. m_stats.clear();
  240. }
  241. return load_imatrix(file_name, m_stats);
  242. }
  243. static IMatrixCollector g_collector;
  244. static bool ik_collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) {
  245. return g_collector.collect_imatrix(t, ask, user_data);
  246. }
  247. struct results_log_softmax {
  248. double log_softmax;
  249. float logit;
  250. float prob;
  251. };
  252. static std::vector<float> softmax(const std::vector<float>& logits) {
  253. std::vector<float> probs(logits.size());
  254. float max_logit = logits[0];
  255. for (float v : logits) {
  256. max_logit = std::max(max_logit, v);
  257. }
  258. double sum_exp = 0.0;
  259. for (size_t i = 0; i < logits.size(); i++) {
  260. // Subtract the maximum logit value from the current logit value for numerical stability
  261. const float logit = logits[i] - max_logit;
  262. const float exp_logit = expf(logit);
  263. sum_exp += exp_logit;
  264. probs[i] = exp_logit;
  265. }
  266. for (size_t i = 0; i < probs.size(); i++) {
  267. probs[i] /= sum_exp;
  268. }
  269. return probs;
  270. }
  271. static results_log_softmax log_softmax(int n_vocab, const float * logits, int tok) {
  272. float max_logit = logits[0];
  273. for (int i = 1; i < n_vocab; ++i) {
  274. max_logit = std::max(max_logit, logits[i]);
  275. }
  276. double sum_exp = 0.0;
  277. for (int i = 0; i < n_vocab; ++i) {
  278. sum_exp += expf(logits[i] - max_logit);
  279. }
  280. return {logits[tok] - max_logit - log(sum_exp), logits[tok], expf(logits[tok] - max_logit) / (float) sum_exp};
  281. }
  282. static void process_logits(
  283. int n_vocab, const float * logits, const int * tokens, int n_token, std::vector<std::thread> & workers,
  284. double & nll, double & nll2, float * logit_history, float * prob_history
  285. ) {
  286. std::mutex mutex;
  287. int counter = 0;
  288. auto compute = [&mutex, &counter, &nll, &nll2, logit_history, prob_history, n_vocab, logits, tokens, n_token] () {
  289. double local_nll = 0;
  290. double local_nll2 = 0;
  291. while (true) {
  292. std::unique_lock<std::mutex> lock(mutex);
  293. int i = counter++;
  294. if (i >= n_token) {
  295. nll += local_nll; nll2 += local_nll2;
  296. break;
  297. }
  298. lock.unlock();
  299. const results_log_softmax results = log_softmax(n_vocab, logits + i*n_vocab, tokens[i+1]);
  300. const double v = -results.log_softmax;
  301. local_nll += v;
  302. local_nll2 += v*v;
  303. logit_history[i] = results.logit;
  304. prob_history[i] = results.prob;
  305. }
  306. };
  307. for (auto & w : workers) {
  308. w = std::thread(compute);
  309. }
  310. compute();
  311. for (auto & w : workers) {
  312. w.join();
  313. }
  314. }
  315. static bool compute_imatrix(llama_context * ctx, const gpt_params & params, bool compute_ppl, int from_chunk) {
  316. const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
  317. const int n_ctx = llama_n_ctx(ctx);
  318. auto tim1 = std::chrono::high_resolution_clock::now();
  319. fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
  320. std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
  321. auto tim2 = std::chrono::high_resolution_clock::now();
  322. fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
  323. if (from_chunk > 0) {
  324. if (size_t((from_chunk + 2)*n_ctx) >= tokens.size()) {
  325. fprintf(stderr, "%s: there will be not enough tokens left after removing %d chunks\n", __func__, from_chunk);
  326. return false;
  327. }
  328. fprintf(stderr, "%s: removing initial %d chunks (%d tokens)\n", __func__, from_chunk, from_chunk*n_ctx);
  329. tokens.erase(tokens.begin(), tokens.begin() + from_chunk*n_ctx);
  330. }
  331. if (int(tokens.size()) < 2*n_ctx) {
  332. fprintf(stderr, "%s: you need at least %d tokens for a context of %d tokens\n",__func__,2*n_ctx,
  333. n_ctx);
  334. fprintf(stderr, "%s: the data file you provided tokenizes to only %zu tokens\n",__func__,tokens.size());
  335. return false;
  336. }
  337. std::vector<float> logit_history;
  338. std::vector<float> prob_history;
  339. if (compute_ppl) {
  340. logit_history.resize(tokens.size());
  341. prob_history.resize(tokens.size());
  342. }
  343. const int n_chunk_max = tokens.size() / n_ctx;
  344. const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max);
  345. const int n_vocab = llama_n_vocab(llama_get_model(ctx));
  346. const int n_batch = params.n_batch;
  347. int count = 0;
  348. double nll = 0.0;
  349. double nll2 = 0.0;
  350. fprintf(stderr, "%s: computing over %d chunks with batch_size %d\n", __func__, n_chunk, n_batch);
  351. std::vector<std::thread> workers(std::thread::hardware_concurrency() - 1);
  352. const int num_batches = (n_ctx + n_batch - 1) / n_batch;
  353. std::vector<float> logits;
  354. if (compute_ppl && num_batches > 1) {
  355. logits.reserve((size_t)n_ctx * n_vocab);
  356. }
  357. for (int i = 0; i < n_chunk; ++i) {
  358. const int start = i * n_ctx;
  359. const int end = start + n_ctx;
  360. std::vector<float> logits;
  361. const auto t_start = std::chrono::high_resolution_clock::now();
  362. // clear the KV cache
  363. llama_kv_cache_clear(ctx);
  364. for (int j = 0; j < num_batches; ++j) {
  365. const int batch_start = start + j * n_batch;
  366. const int batch_size = std::min(end - batch_start, n_batch);
  367. // save original token and restore it after eval
  368. const auto token_org = tokens[batch_start];
  369. // add BOS token for the first batch of each chunk
  370. if (add_bos && j == 0) {
  371. tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
  372. }
  373. if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
  374. fprintf(stderr, "%s : failed to eval\n", __func__);
  375. return false;
  376. }
  377. // restore the original token in case it was set to BOS
  378. tokens[batch_start] = token_org;
  379. if (compute_ppl && num_batches > 1) {
  380. const auto * batch_logits = llama_get_logits(ctx);
  381. logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
  382. }
  383. }
  384. const auto t_end = std::chrono::high_resolution_clock::now();
  385. if (i == 0) {
  386. const float t_total = std::chrono::duration<float>(t_end - t_start).count();
  387. fprintf(stderr, "%s: %.2f seconds per pass - ETA ", __func__, t_total);
  388. int total_seconds = (int)(t_total * n_chunk);
  389. if (total_seconds >= 60*60) {
  390. fprintf(stderr, "%d hours ", total_seconds / (60*60));
  391. total_seconds = total_seconds % (60*60);
  392. }
  393. fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
  394. }
  395. if (compute_ppl) {
  396. const int first = n_ctx/2;
  397. const auto all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
  398. process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
  399. workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
  400. count += n_ctx - first - 1;
  401. printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
  402. fflush(stdout);
  403. logits.clear();
  404. }
  405. }
  406. printf("\n");
  407. if (compute_ppl) {
  408. nll2 /= count;
  409. nll /= count;
  410. const double ppl = exp(nll);
  411. nll2 -= nll * nll;
  412. if (nll2 > 0) {
  413. nll2 = sqrt(nll2/(count-1));
  414. printf("Final estimate: PPL = %.4lf +/- %.5lf\n", ppl, nll2*ppl);
  415. } else {
  416. printf("Unexpected negative standard deviation of log(prob)\n");
  417. }
  418. }
  419. return true;
  420. }
  421. int main(int argc, char ** argv) {
  422. StatParams sparams;
  423. std::string prev_result_file;
  424. std::string combine_files;
  425. bool compute_ppl = true;
  426. int from_chunk = 0;
  427. std::vector<char*> args;
  428. args.push_back(argv[0]);
  429. int iarg = 1;
  430. for (; iarg < argc-1; ++iarg) {
  431. std::string arg{argv[iarg]};
  432. if (arg == "-o" || arg == "--output-file") {
  433. sparams.ofile = argv[++iarg];
  434. }
  435. else if (arg == "-ofreq" || arg == "--output-frequency") {
  436. sparams.n_output_frequency = std::stoi(argv[++iarg]);
  437. }
  438. else if (arg == "-ow" || arg == "--output-weight") {
  439. sparams.collect_output_weight = std::stoi(argv[++iarg]);
  440. }
  441. else if (arg == "--verbosity") {
  442. sparams.verbosity = std::stoi(argv[++iarg]);
  443. } else if (arg == "--no-ppl") {
  444. compute_ppl = false;
  445. } else if (arg == "--keep-imatrix") {
  446. sparams.keep_every = std::stoi(argv[++iarg]);
  447. } else if (arg == "--continue-from") {
  448. prev_result_file = argv[++iarg];
  449. } else if (arg == "--combine") {
  450. combine_files = argv[++iarg];
  451. }
  452. else if (arg == "--from-chunk") {
  453. from_chunk = std::stoi(argv[++iarg]);
  454. } else {
  455. args.push_back(argv[iarg]);
  456. }
  457. }
  458. if (iarg < argc) {
  459. std::string arg{argv[iarg]};
  460. if (arg == "--no-ppl") {
  461. compute_ppl = false;
  462. } else {
  463. args.push_back(argv[iarg]);
  464. }
  465. }
  466. g_collector.set_parameters(std::move(sparams));
  467. if (!combine_files.empty()) {
  468. std::vector<std::string> files;
  469. size_t pos = 0;
  470. while (true) {
  471. auto new_pos = combine_files.find(',', pos);
  472. if (new_pos != std::string::npos) {
  473. files.emplace_back(combine_files.substr(pos, new_pos - pos));
  474. pos = new_pos + 1;
  475. } else {
  476. files.emplace_back(combine_files.substr(pos));
  477. break;
  478. }
  479. }
  480. if (files.size() < 2) {
  481. fprintf(stderr, "You must provide at least two comma separated files to use --combine\n");
  482. return 1;
  483. }
  484. printf("Combining the following %d files\n", int(files.size()));
  485. for (auto& file : files) {
  486. printf(" %s\n", file.c_str());
  487. if (!g_collector.load_imatrix(file.c_str(), true)) {
  488. fprintf(stderr, "Failed to load %s\n", file.c_str());
  489. return 1;
  490. }
  491. }
  492. g_collector.save_imatrix();
  493. return 0;
  494. }
  495. if (!prev_result_file.empty()) {
  496. if (!g_collector.load_imatrix(prev_result_file.c_str(), false)) {
  497. fprintf(stderr, "=============== Failed to load %s\n", prev_result_file.c_str());
  498. return 1;
  499. }
  500. }
  501. gpt_params params;
  502. params.n_batch = 512;
  503. if (!gpt_params_parse(args.size(), args.data(), params)) {
  504. return 1;
  505. }
  506. params.logits_all = true;
  507. params.n_batch = std::min(params.n_batch, params.n_ctx);
  508. print_build_info();
  509. if (params.seed == LLAMA_DEFAULT_SEED) {
  510. params.seed = time(NULL);
  511. }
  512. fprintf(stderr, "%s: seed = %u\n", __func__, params.seed);
  513. std::mt19937 rng(params.seed);
  514. if (params.random_prompt) {
  515. params.prompt = gpt_random_prompt(rng);
  516. }
  517. llama_backend_init();
  518. llama_numa_init(params.numa);
  519. llama_model_params mparams = llama_model_params_from_gpt_params(params);
  520. llama_model * model = llama_load_model_from_file(params.model.c_str(), mparams);
  521. if (model == NULL) {
  522. fprintf(stderr, "%s: error: unable to load model\n", __func__);
  523. return 1;
  524. }
  525. llama_context_params cparams = llama_context_params_from_gpt_params(params);
  526. // pass the callback to the backend scheduler
  527. // it will be executed for each node during the graph computation
  528. cparams.cb_eval = ik_collect_imatrix;
  529. cparams.cb_eval_user_data = NULL;
  530. llama_context * ctx = llama_new_context_with_model(model, cparams);
  531. if (ctx == NULL) {
  532. fprintf(stderr, "%s: error: unable to create context\n", __func__);
  533. return 1;
  534. }
  535. const int n_ctx_train = llama_n_ctx_train(model);
  536. if (params.n_ctx > n_ctx_train) {
  537. fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
  538. __func__, n_ctx_train, params.n_ctx);
  539. }
  540. // print system information
  541. {
  542. fprintf(stderr, "\n");
  543. fprintf(stderr, "%s\n", get_system_info(params).c_str());
  544. }
  545. bool OK = compute_imatrix(ctx, params, compute_ppl, from_chunk);
  546. if (!OK) {
  547. return 1;
  548. }
  549. g_collector.save_imatrix();
  550. llama_print_timings(ctx);
  551. llama_free(ctx);
  552. llama_free_model(model);
  553. llama_backend_free();
  554. return 0;
  555. }